diff --git a/cts/scheduler/scores/a-demote-then-b-migrate.scores b/cts/scheduler/scores/a-demote-then-b-migrate.scores
index 3bcd203384..8d7b96cd61 100644
--- a/cts/scheduler/scores/a-demote-then-b-migrate.scores
+++ b/cts/scheduler/scores/a-demote-then-b-migrate.scores
@@ -1,15 +1,15 @@
 
 pcmk__clone_allocate: ms1 allocation score on node1: 0
 pcmk__clone_allocate: ms1 allocation score on node2: 0
 pcmk__clone_allocate: rsc1:0 allocation score on node1: 1
 pcmk__clone_allocate: rsc1:0 allocation score on node2: 0
 pcmk__clone_allocate: rsc1:1 allocation score on node1: 0
 pcmk__clone_allocate: rsc1:1 allocation score on node2: 1
 pcmk__native_allocate: rsc1:0 allocation score on node1: 1
-pcmk__native_allocate: rsc1:0 allocation score on node2: 0
-pcmk__native_allocate: rsc1:1 allocation score on node1: -INFINITY
+pcmk__native_allocate: rsc1:0 allocation score on node2: -INFINITY
+pcmk__native_allocate: rsc1:1 allocation score on node1: 0
 pcmk__native_allocate: rsc1:1 allocation score on node2: 1
 pcmk__native_allocate: rsc2 allocation score on node1: -INFINITY
 pcmk__native_allocate: rsc2 allocation score on node2: 0
 rsc1:0 promotion score on node1: -INFINITY
 rsc1:1 promotion score on node2: INFINITY
diff --git a/cts/scheduler/scores/bug-lf-2574.scores b/cts/scheduler/scores/bug-lf-2574.scores
index fa6acf9b41..b8a5fdc72a 100644
--- a/cts/scheduler/scores/bug-lf-2574.scores
+++ b/cts/scheduler/scores/bug-lf-2574.scores
@@ -1,52 +1,49 @@
 
 pcmk__clone_allocate: clnDummy1 allocation score on srv01: -INFINITY
 pcmk__clone_allocate: clnDummy1 allocation score on srv02: INFINITY
 pcmk__clone_allocate: clnDummy1 allocation score on srv03: 200
 pcmk__clone_allocate: clnPingd allocation score on srv01: INFINITY
 pcmk__clone_allocate: clnPingd allocation score on srv02: INFINITY
 pcmk__clone_allocate: clnPingd allocation score on srv03: 200
 pcmk__clone_allocate: prmDummy1:0 allocation score on srv01: -INFINITY
 pcmk__clone_allocate: prmDummy1:0 allocation score on srv02: 0
 pcmk__clone_allocate: prmDummy1:0 allocation score on srv03: INFINITY
 pcmk__clone_allocate: prmDummy1:1 allocation score on srv01: -INFINITY
 pcmk__clone_allocate: prmDummy1:1 allocation score on srv02: INFINITY
 pcmk__clone_allocate: prmDummy1:1 allocation score on srv03: 0
 pcmk__clone_allocate: prmDummy1:2 allocation score on srv01: -INFINITY
 pcmk__clone_allocate: prmDummy1:2 allocation score on srv02: 0
 pcmk__clone_allocate: prmDummy1:2 allocation score on srv03: 0
 pcmk__clone_allocate: prmPingd:0 allocation score on srv01: INFINITY
 pcmk__clone_allocate: prmPingd:0 allocation score on srv02: 0
 pcmk__clone_allocate: prmPingd:0 allocation score on srv03: 0
 pcmk__clone_allocate: prmPingd:1 allocation score on srv01: 0
 pcmk__clone_allocate: prmPingd:1 allocation score on srv02: 0
 pcmk__clone_allocate: prmPingd:1 allocation score on srv03: INFINITY
 pcmk__clone_allocate: prmPingd:2 allocation score on srv01: 0
 pcmk__clone_allocate: prmPingd:2 allocation score on srv02: INFINITY
 pcmk__clone_allocate: prmPingd:2 allocation score on srv03: 0
 pcmk__native_allocate: main_rsc allocation score on srv01: -INFINITY
 pcmk__native_allocate: main_rsc allocation score on srv02: 0
 pcmk__native_allocate: main_rsc allocation score on srv03: 100
 pcmk__native_allocate: main_rsc2 allocation score on srv01: -INFINITY
 pcmk__native_allocate: main_rsc2 allocation score on srv02: INFINITY
 pcmk__native_allocate: main_rsc2 allocation score on srv03: 100
 pcmk__native_allocate: prmDummy1:0 allocation score on srv01: -INFINITY
 pcmk__native_allocate: prmDummy1:0 allocation score on srv02: -INFINITY
 pcmk__native_allocate: prmDummy1:0 allocation score on srv03: INFINITY
 pcmk__native_allocate: prmDummy1:1 allocation score on srv01: -INFINITY
 pcmk__native_allocate: prmDummy1:1 allocation score on srv02: INFINITY
 pcmk__native_allocate: prmDummy1:1 allocation score on srv03: 0
 pcmk__native_allocate: prmDummy1:2 allocation score on srv01: -INFINITY
 pcmk__native_allocate: prmDummy1:2 allocation score on srv02: -INFINITY
 pcmk__native_allocate: prmDummy1:2 allocation score on srv03: -INFINITY
 pcmk__native_allocate: prmPingd:0 allocation score on srv01: -INFINITY
-pcmk__native_allocate: prmPingd:0 allocation score on srv01: -INFINITY
 pcmk__native_allocate: prmPingd:0 allocation score on srv02: -INFINITY
-pcmk__native_allocate: prmPingd:0 allocation score on srv02: 0
 pcmk__native_allocate: prmPingd:0 allocation score on srv03: -INFINITY
-pcmk__native_allocate: prmPingd:0 allocation score on srv03: 0
 pcmk__native_allocate: prmPingd:1 allocation score on srv01: -INFINITY
 pcmk__native_allocate: prmPingd:1 allocation score on srv02: -INFINITY
 pcmk__native_allocate: prmPingd:1 allocation score on srv03: INFINITY
 pcmk__native_allocate: prmPingd:2 allocation score on srv01: -INFINITY
 pcmk__native_allocate: prmPingd:2 allocation score on srv02: INFINITY
 pcmk__native_allocate: prmPingd:2 allocation score on srv03: 0
diff --git a/cts/scheduler/scores/bundle-order-stop-clone.scores b/cts/scheduler/scores/bundle-order-stop-clone.scores
index 2f2af2c64d..74f2110c4e 100644
--- a/cts/scheduler/scores/bundle-order-stop-clone.scores
+++ b/cts/scheduler/scores/bundle-order-stop-clone.scores
@@ -1,273 +1,269 @@
 
 galera:0 promotion score on galera-bundle-0: -1
 galera:1 promotion score on galera-bundle-1: -1
 galera:2 promotion score on galera-bundle-2: -1
 pcmk__bundle_allocate: galera-bundle allocation score on metal-1: 0
 pcmk__bundle_allocate: galera-bundle allocation score on metal-2: 0
 pcmk__bundle_allocate: galera-bundle allocation score on metal-3: 0
 pcmk__bundle_allocate: galera-bundle allocation score on rabbitmq-bundle-0: -INFINITY
 pcmk__bundle_allocate: galera-bundle-0 allocation score on metal-1: INFINITY
 pcmk__bundle_allocate: galera-bundle-0 allocation score on metal-2: 0
 pcmk__bundle_allocate: galera-bundle-0 allocation score on metal-3: 0
 pcmk__bundle_allocate: galera-bundle-0 allocation score on rabbitmq-bundle-0: -INFINITY
 pcmk__bundle_allocate: galera-bundle-1 allocation score on metal-1: 0
 pcmk__bundle_allocate: galera-bundle-1 allocation score on metal-2: INFINITY
 pcmk__bundle_allocate: galera-bundle-1 allocation score on metal-3: 0
 pcmk__bundle_allocate: galera-bundle-1 allocation score on rabbitmq-bundle-0: -INFINITY
 pcmk__bundle_allocate: galera-bundle-2 allocation score on metal-1: 0
 pcmk__bundle_allocate: galera-bundle-2 allocation score on metal-2: 0
 pcmk__bundle_allocate: galera-bundle-2 allocation score on metal-3: INFINITY
 pcmk__bundle_allocate: galera-bundle-2 allocation score on rabbitmq-bundle-0: -INFINITY
 pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on metal-1: INFINITY
 pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on metal-2: 0
 pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on metal-3: 0
 pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY
 pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on metal-1: 0
 pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on metal-2: INFINITY
 pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on metal-3: 0
 pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on rabbitmq-bundle-0: -INFINITY
 pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on metal-1: 0
 pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on metal-2: 0
 pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on metal-3: INFINITY
 pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on rabbitmq-bundle-0: -INFINITY
 pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-0: -INFINITY
 pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-1: -INFINITY
 pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-2: -INFINITY
 pcmk__bundle_allocate: galera-bundle-master allocation score on metal-1: 0
 pcmk__bundle_allocate: galera-bundle-master allocation score on metal-2: 0
 pcmk__bundle_allocate: galera-bundle-master allocation score on metal-3: 0
 pcmk__bundle_allocate: galera-bundle-master allocation score on rabbitmq-bundle-0: 0
 pcmk__bundle_allocate: galera:0 allocation score on galera-bundle-0: INFINITY
 pcmk__bundle_allocate: galera:1 allocation score on galera-bundle-1: INFINITY
 pcmk__bundle_allocate: galera:2 allocation score on galera-bundle-2: INFINITY
 pcmk__bundle_allocate: haproxy-bundle allocation score on metal-1: 0
 pcmk__bundle_allocate: haproxy-bundle allocation score on metal-1: 0
 pcmk__bundle_allocate: haproxy-bundle allocation score on metal-1: 0
 pcmk__bundle_allocate: haproxy-bundle allocation score on metal-1: 0
 pcmk__bundle_allocate: haproxy-bundle allocation score on metal-2: 0
 pcmk__bundle_allocate: haproxy-bundle allocation score on metal-2: 0
 pcmk__bundle_allocate: haproxy-bundle allocation score on metal-2: 0
 pcmk__bundle_allocate: haproxy-bundle allocation score on metal-2: 0
 pcmk__bundle_allocate: haproxy-bundle allocation score on metal-3: 0
 pcmk__bundle_allocate: haproxy-bundle allocation score on metal-3: 0
 pcmk__bundle_allocate: haproxy-bundle allocation score on metal-3: 0
 pcmk__bundle_allocate: haproxy-bundle allocation score on metal-3: 0
 pcmk__bundle_allocate: haproxy-bundle allocation score on rabbitmq-bundle-0: 0
 pcmk__bundle_allocate: haproxy-bundle allocation score on rabbitmq-bundle-0: 0
 pcmk__bundle_allocate: haproxy-bundle allocation score on rabbitmq-bundle-0: 0
 pcmk__bundle_allocate: haproxy-bundle allocation score on rabbitmq-bundle-0: 0
 pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on metal-1: INFINITY
 pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on metal-1: INFINITY
 pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on metal-1: INFINITY
 pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on metal-1: INFINITY
 pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on metal-2: 0
 pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on metal-2: 0
 pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on metal-2: 0
 pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on metal-2: 0
 pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on metal-3: 0
 pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on metal-3: 0
 pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on metal-3: 0
 pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on metal-3: 0
 pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY
 pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY
 pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY
 pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on rabbitmq-bundle-0: 0
 pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on metal-1: -INFINITY
 pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on metal-1: -INFINITY
 pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on metal-1: -INFINITY
 pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on metal-1: 0
 pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on metal-2: INFINITY
 pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on metal-2: INFINITY
 pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on metal-2: INFINITY
 pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on metal-2: INFINITY
 pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on metal-3: 0
 pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on metal-3: 0
 pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on metal-3: 0
 pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on metal-3: 0
 pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on rabbitmq-bundle-0: -INFINITY
 pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on rabbitmq-bundle-0: -INFINITY
 pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on rabbitmq-bundle-0: -INFINITY
 pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on rabbitmq-bundle-0: 0
 pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on metal-1: -INFINITY
 pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on metal-1: -INFINITY
 pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on metal-1: -INFINITY
 pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on metal-1: 0
 pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on metal-2: -INFINITY
 pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on metal-2: -INFINITY
 pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on metal-2: -INFINITY
 pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on metal-2: 0
 pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on metal-3: INFINITY
 pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on metal-3: INFINITY
 pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on metal-3: INFINITY
 pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on metal-3: INFINITY
 pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on rabbitmq-bundle-0: -INFINITY
 pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on rabbitmq-bundle-0: -INFINITY
 pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on rabbitmq-bundle-0: -INFINITY
 pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on rabbitmq-bundle-0: 0
 pcmk__bundle_allocate: redis-bundle allocation score on metal-1: 0
 pcmk__bundle_allocate: redis-bundle allocation score on metal-2: 0
 pcmk__bundle_allocate: redis-bundle allocation score on metal-3: 0
 pcmk__bundle_allocate: redis-bundle allocation score on rabbitmq-bundle-0: -INFINITY
 pcmk__bundle_allocate: redis-bundle-0 allocation score on metal-1: INFINITY
 pcmk__bundle_allocate: redis-bundle-0 allocation score on metal-2: 0
 pcmk__bundle_allocate: redis-bundle-0 allocation score on metal-3: 0
 pcmk__bundle_allocate: redis-bundle-0 allocation score on rabbitmq-bundle-0: -INFINITY
 pcmk__bundle_allocate: redis-bundle-1 allocation score on metal-1: 0
 pcmk__bundle_allocate: redis-bundle-1 allocation score on metal-2: INFINITY
 pcmk__bundle_allocate: redis-bundle-1 allocation score on metal-3: 0
 pcmk__bundle_allocate: redis-bundle-1 allocation score on rabbitmq-bundle-0: -INFINITY
 pcmk__bundle_allocate: redis-bundle-2 allocation score on metal-1: 0
 pcmk__bundle_allocate: redis-bundle-2 allocation score on metal-2: 0
 pcmk__bundle_allocate: redis-bundle-2 allocation score on metal-3: INFINITY
 pcmk__bundle_allocate: redis-bundle-2 allocation score on rabbitmq-bundle-0: -INFINITY
 pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on metal-1: INFINITY
 pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on metal-2: 0
 pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on metal-3: 0
 pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY
 pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on metal-1: 0
 pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on metal-2: INFINITY
 pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on metal-3: 0
 pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on rabbitmq-bundle-0: -INFINITY
 pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on metal-1: 0
 pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on metal-2: 0
 pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on metal-3: INFINITY
 pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on rabbitmq-bundle-0: -INFINITY
 pcmk__bundle_allocate: redis-bundle-master allocation score on metal-1: 0
 pcmk__bundle_allocate: redis-bundle-master allocation score on metal-2: 0
 pcmk__bundle_allocate: redis-bundle-master allocation score on metal-3: 0
 pcmk__bundle_allocate: redis-bundle-master allocation score on rabbitmq-bundle-0: 0
 pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-0: -INFINITY
 pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-1: -INFINITY
 pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-2: -INFINITY
 pcmk__bundle_allocate: redis:0 allocation score on redis-bundle-0: INFINITY
 pcmk__bundle_allocate: redis:1 allocation score on redis-bundle-1: INFINITY
 pcmk__bundle_allocate: redis:2 allocation score on redis-bundle-2: INFINITY
 pcmk__clone_allocate: galera-bundle-master allocation score on galera-bundle-0: 0
 pcmk__clone_allocate: galera-bundle-master allocation score on galera-bundle-1: 0
 pcmk__clone_allocate: galera-bundle-master allocation score on galera-bundle-2: 0
 pcmk__clone_allocate: galera-bundle-master allocation score on metal-1: -INFINITY
 pcmk__clone_allocate: galera-bundle-master allocation score on metal-2: -INFINITY
 pcmk__clone_allocate: galera-bundle-master allocation score on metal-3: -INFINITY
 pcmk__clone_allocate: galera-bundle-master allocation score on rabbitmq-bundle-0: -INFINITY
 pcmk__clone_allocate: galera:0 allocation score on galera-bundle-0: INFINITY
 pcmk__clone_allocate: galera:1 allocation score on galera-bundle-1: INFINITY
 pcmk__clone_allocate: galera:2 allocation score on galera-bundle-2: INFINITY
 pcmk__clone_allocate: redis-bundle-master allocation score on metal-1: -INFINITY
 pcmk__clone_allocate: redis-bundle-master allocation score on metal-2: -INFINITY
 pcmk__clone_allocate: redis-bundle-master allocation score on metal-3: -INFINITY
 pcmk__clone_allocate: redis-bundle-master allocation score on rabbitmq-bundle-0: -INFINITY
 pcmk__clone_allocate: redis-bundle-master allocation score on redis-bundle-0: 0
 pcmk__clone_allocate: redis-bundle-master allocation score on redis-bundle-1: 0
 pcmk__clone_allocate: redis-bundle-master allocation score on redis-bundle-2: 0
 pcmk__clone_allocate: redis:0 allocation score on redis-bundle-0: INFINITY
 pcmk__clone_allocate: redis:1 allocation score on redis-bundle-1: INFINITY
 pcmk__clone_allocate: redis:2 allocation score on redis-bundle-2: INFINITY
 pcmk__clone_allocate: storage-clone allocation score on metal-1: -INFINITY
 pcmk__clone_allocate: storage-clone allocation score on metal-2: 0
 pcmk__clone_allocate: storage-clone allocation score on metal-3: 0
 pcmk__clone_allocate: storage-clone allocation score on rabbitmq-bundle-0: 0
 pcmk__clone_allocate: storage:0 allocation score on metal-1: -INFINITY
 pcmk__clone_allocate: storage:0 allocation score on metal-2: 0
 pcmk__clone_allocate: storage:0 allocation score on metal-3: 0
 pcmk__clone_allocate: storage:0 allocation score on rabbitmq-bundle-0: 0
 pcmk__clone_allocate: storage:1 allocation score on metal-1: -INFINITY
 pcmk__clone_allocate: storage:1 allocation score on metal-2: INFINITY
 pcmk__clone_allocate: storage:1 allocation score on metal-3: 0
 pcmk__clone_allocate: storage:1 allocation score on rabbitmq-bundle-0: 0
 pcmk__clone_allocate: storage:2 allocation score on metal-1: -INFINITY
 pcmk__clone_allocate: storage:2 allocation score on metal-2: 0
 pcmk__clone_allocate: storage:2 allocation score on metal-3: INFINITY
 pcmk__clone_allocate: storage:2 allocation score on rabbitmq-bundle-0: 0
 pcmk__clone_allocate: storage:3 allocation score on metal-1: -INFINITY
 pcmk__clone_allocate: storage:3 allocation score on metal-2: 0
 pcmk__clone_allocate: storage:3 allocation score on metal-3: 0
 pcmk__clone_allocate: storage:3 allocation score on rabbitmq-bundle-0: 0
 pcmk__native_allocate: galera-bundle-0 allocation score on metal-1: INFINITY
 pcmk__native_allocate: galera-bundle-0 allocation score on metal-2: -10000
 pcmk__native_allocate: galera-bundle-0 allocation score on metal-3: -10000
 pcmk__native_allocate: galera-bundle-0 allocation score on rabbitmq-bundle-0: -INFINITY
 pcmk__native_allocate: galera-bundle-1 allocation score on metal-1: 0
 pcmk__native_allocate: galera-bundle-1 allocation score on metal-2: INFINITY
 pcmk__native_allocate: galera-bundle-1 allocation score on metal-3: 0
 pcmk__native_allocate: galera-bundle-1 allocation score on rabbitmq-bundle-0: -INFINITY
 pcmk__native_allocate: galera-bundle-2 allocation score on metal-1: 0
 pcmk__native_allocate: galera-bundle-2 allocation score on metal-2: 0
 pcmk__native_allocate: galera-bundle-2 allocation score on metal-3: INFINITY
 pcmk__native_allocate: galera-bundle-2 allocation score on rabbitmq-bundle-0: -INFINITY
 pcmk__native_allocate: galera-bundle-docker-0 allocation score on metal-1: -INFINITY
-pcmk__native_allocate: galera-bundle-docker-0 allocation score on metal-1: -INFINITY
 pcmk__native_allocate: galera-bundle-docker-0 allocation score on metal-2: -INFINITY
-pcmk__native_allocate: galera-bundle-docker-0 allocation score on metal-2: 0
 pcmk__native_allocate: galera-bundle-docker-0 allocation score on metal-3: -INFINITY
-pcmk__native_allocate: galera-bundle-docker-0 allocation score on metal-3: 0
-pcmk__native_allocate: galera-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY
 pcmk__native_allocate: galera-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY
 pcmk__native_allocate: galera-bundle-docker-1 allocation score on metal-1: -INFINITY
 pcmk__native_allocate: galera-bundle-docker-1 allocation score on metal-2: INFINITY
 pcmk__native_allocate: galera-bundle-docker-1 allocation score on metal-3: 0
 pcmk__native_allocate: galera-bundle-docker-1 allocation score on rabbitmq-bundle-0: -INFINITY
 pcmk__native_allocate: galera-bundle-docker-2 allocation score on metal-1: -INFINITY
 pcmk__native_allocate: galera-bundle-docker-2 allocation score on metal-2: -INFINITY
 pcmk__native_allocate: galera-bundle-docker-2 allocation score on metal-3: INFINITY
 pcmk__native_allocate: galera-bundle-docker-2 allocation score on rabbitmq-bundle-0: -INFINITY
 pcmk__native_allocate: galera:0 allocation score on galera-bundle-0: INFINITY
 pcmk__native_allocate: galera:1 allocation score on galera-bundle-1: INFINITY
 pcmk__native_allocate: galera:2 allocation score on galera-bundle-2: INFINITY
 pcmk__native_allocate: haproxy-bundle-docker-0 allocation score on metal-1: INFINITY
 pcmk__native_allocate: haproxy-bundle-docker-0 allocation score on metal-2: 0
 pcmk__native_allocate: haproxy-bundle-docker-0 allocation score on metal-3: 0
 pcmk__native_allocate: haproxy-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY
 pcmk__native_allocate: haproxy-bundle-docker-1 allocation score on metal-1: -INFINITY
 pcmk__native_allocate: haproxy-bundle-docker-1 allocation score on metal-2: INFINITY
 pcmk__native_allocate: haproxy-bundle-docker-1 allocation score on metal-3: 0
 pcmk__native_allocate: haproxy-bundle-docker-1 allocation score on rabbitmq-bundle-0: -INFINITY
 pcmk__native_allocate: haproxy-bundle-docker-2 allocation score on metal-1: -INFINITY
 pcmk__native_allocate: haproxy-bundle-docker-2 allocation score on metal-2: -INFINITY
 pcmk__native_allocate: haproxy-bundle-docker-2 allocation score on metal-3: INFINITY
 pcmk__native_allocate: haproxy-bundle-docker-2 allocation score on rabbitmq-bundle-0: -INFINITY
 pcmk__native_allocate: redis-bundle-0 allocation score on metal-1: INFINITY
 pcmk__native_allocate: redis-bundle-0 allocation score on metal-2: 0
 pcmk__native_allocate: redis-bundle-0 allocation score on metal-3: 0
 pcmk__native_allocate: redis-bundle-0 allocation score on rabbitmq-bundle-0: -INFINITY
 pcmk__native_allocate: redis-bundle-1 allocation score on metal-1: 0
 pcmk__native_allocate: redis-bundle-1 allocation score on metal-2: INFINITY
 pcmk__native_allocate: redis-bundle-1 allocation score on metal-3: 0
 pcmk__native_allocate: redis-bundle-1 allocation score on rabbitmq-bundle-0: -INFINITY
 pcmk__native_allocate: redis-bundle-2 allocation score on metal-1: 0
 pcmk__native_allocate: redis-bundle-2 allocation score on metal-2: 0
 pcmk__native_allocate: redis-bundle-2 allocation score on metal-3: INFINITY
 pcmk__native_allocate: redis-bundle-2 allocation score on rabbitmq-bundle-0: -INFINITY
 pcmk__native_allocate: redis-bundle-docker-0 allocation score on metal-1: INFINITY
 pcmk__native_allocate: redis-bundle-docker-0 allocation score on metal-2: -INFINITY
 pcmk__native_allocate: redis-bundle-docker-0 allocation score on metal-3: -INFINITY
 pcmk__native_allocate: redis-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY
 pcmk__native_allocate: redis-bundle-docker-1 allocation score on metal-1: -INFINITY
 pcmk__native_allocate: redis-bundle-docker-1 allocation score on metal-2: INFINITY
 pcmk__native_allocate: redis-bundle-docker-1 allocation score on metal-3: -INFINITY
 pcmk__native_allocate: redis-bundle-docker-1 allocation score on rabbitmq-bundle-0: -INFINITY
 pcmk__native_allocate: redis-bundle-docker-2 allocation score on metal-1: -INFINITY
 pcmk__native_allocate: redis-bundle-docker-2 allocation score on metal-2: -INFINITY
 pcmk__native_allocate: redis-bundle-docker-2 allocation score on metal-3: INFINITY
 pcmk__native_allocate: redis-bundle-docker-2 allocation score on rabbitmq-bundle-0: -INFINITY
 pcmk__native_allocate: redis:0 allocation score on redis-bundle-0: INFINITY
 pcmk__native_allocate: redis:1 allocation score on redis-bundle-1: INFINITY
 pcmk__native_allocate: redis:2 allocation score on redis-bundle-2: INFINITY
 pcmk__native_allocate: storage:0 allocation score on metal-1: -INFINITY
 pcmk__native_allocate: storage:0 allocation score on metal-2: -INFINITY
 pcmk__native_allocate: storage:0 allocation score on metal-3: -INFINITY
 pcmk__native_allocate: storage:0 allocation score on rabbitmq-bundle-0: -INFINITY
 pcmk__native_allocate: storage:1 allocation score on metal-1: -INFINITY
 pcmk__native_allocate: storage:1 allocation score on metal-2: INFINITY
 pcmk__native_allocate: storage:1 allocation score on metal-3: 0
 pcmk__native_allocate: storage:1 allocation score on rabbitmq-bundle-0: -INFINITY
 pcmk__native_allocate: storage:2 allocation score on metal-1: -INFINITY
 pcmk__native_allocate: storage:2 allocation score on metal-2: -INFINITY
 pcmk__native_allocate: storage:2 allocation score on metal-3: INFINITY
 pcmk__native_allocate: storage:2 allocation score on rabbitmq-bundle-0: -INFINITY
 pcmk__native_allocate: storage:3 allocation score on metal-1: -INFINITY
 pcmk__native_allocate: storage:3 allocation score on metal-2: -INFINITY
 pcmk__native_allocate: storage:3 allocation score on metal-3: -INFINITY
 pcmk__native_allocate: storage:3 allocation score on rabbitmq-bundle-0: -INFINITY
 redis:0 promotion score on redis-bundle-0: 99
 redis:1 promotion score on redis-bundle-1: 99
 redis:2 promotion score on redis-bundle-2: 99
diff --git a/cts/scheduler/scores/coloc-clone-stays-active.scores b/cts/scheduler/scores/coloc-clone-stays-active.scores
index 016ea63a42..f343fe4287 100644
--- a/cts/scheduler/scores/coloc-clone-stays-active.scores
+++ b/cts/scheduler/scores/coloc-clone-stays-active.scores
@@ -1,505 +1,505 @@
 
 drbd-pool-0:0 promotion score on s01-1: 10000
 drbd-pool-0:1 promotion score on s01-0: 12000
 drbd-pool-1:0 promotion score on s01-1: 12000
 drbd-pool-1:1 promotion score on s01-0: 10000
 drbd-s01-logs:0 promotion score on s01-1: 10000
 drbd-s01-logs:1 promotion score on s01-0: 10000
 drbd-s01-service:0 promotion score on s01-1: 10000
 drbd-s01-service:1 promotion score on s01-0: 10000
 drbd-s01-vm-data:0 promotion score on s01-1: 10000
 drbd-s01-vm-data:1 promotion score on s01-0: 1
 drbd-vds-dom0-stateless-0:0 promotion score on s01-1: 10000
 drbd-vds-dom0-stateless-0:1 promotion score on s01-0: 10000
 drbd-vds-http:0 promotion score on s01-1: 10000
 drbd-vds-http:1 promotion score on s01-0: 10000
 drbd-vds-tftpboot:0 promotion score on s01-1: 10000
 drbd-vds-tftpboot:1 promotion score on s01-0: 10000
 iscsi-pool-0-vips-fw:0 promotion score on s01-1: -INFINITY
 iscsi-pool-0-vips-fw:1 promotion score on s01-0: 2000
 iscsi-pool-1-vips-fw:0 promotion score on s01-1: 2000
 iscsi-pool-1-vips-fw:1 promotion score on s01-0: -INFINITY
 iscsi-vds-dom0-stateless-0-vips-fw:0 promotion score on s01-1: -INFINITY
 iscsi-vds-dom0-stateless-0-vips-fw:1 promotion score on s01-0: -INFINITY
 pcmk__clone_allocate: cl-clvmd allocation score on s01-0: 0
 pcmk__clone_allocate: cl-clvmd allocation score on s01-1: 0
 pcmk__clone_allocate: cl-dhcpd allocation score on s01-0: 0
 pcmk__clone_allocate: cl-dhcpd allocation score on s01-1: 0
 pcmk__clone_allocate: cl-dlm allocation score on s01-0: 0
 pcmk__clone_allocate: cl-dlm allocation score on s01-1: 0
 pcmk__clone_allocate: cl-drbdlinks-s01-service allocation score on s01-0: 0
 pcmk__clone_allocate: cl-drbdlinks-s01-service allocation score on s01-1: 0
 pcmk__clone_allocate: cl-gfs2 allocation score on s01-0: 0
 pcmk__clone_allocate: cl-gfs2 allocation score on s01-1: 0
 pcmk__clone_allocate: cl-ietd allocation score on s01-0: 11001
 pcmk__clone_allocate: cl-ietd allocation score on s01-1: 1000
 pcmk__clone_allocate: cl-libvirtd allocation score on s01-0: 0
 pcmk__clone_allocate: cl-libvirtd allocation score on s01-1: 0
 pcmk__clone_allocate: cl-o2cb allocation score on s01-0: 0
 pcmk__clone_allocate: cl-o2cb allocation score on s01-1: 0
 pcmk__clone_allocate: cl-ospf-routing allocation score on s01-0: 0
 pcmk__clone_allocate: cl-ospf-routing allocation score on s01-1: 0
 pcmk__clone_allocate: cl-s01-logs-fs allocation score on s01-0: 0
 pcmk__clone_allocate: cl-s01-logs-fs allocation score on s01-1: 0
 pcmk__clone_allocate: cl-s01-service-fs allocation score on s01-0: 0
 pcmk__clone_allocate: cl-s01-service-fs allocation score on s01-1: 0
 pcmk__clone_allocate: cl-s01-vm-data-metadata-fs allocation score on s01-0: 0
 pcmk__clone_allocate: cl-s01-vm-data-metadata-fs allocation score on s01-1: 0
 pcmk__clone_allocate: cl-s01-vm-data-storage-pool allocation score on s01-0: 0
 pcmk__clone_allocate: cl-s01-vm-data-storage-pool allocation score on s01-1: 0
 pcmk__clone_allocate: cl-vds-http-fs allocation score on s01-0: 0
 pcmk__clone_allocate: cl-vds-http-fs allocation score on s01-1: 0
 pcmk__clone_allocate: cl-vds-tftpboot-fs allocation score on s01-0: 0
 pcmk__clone_allocate: cl-vds-tftpboot-fs allocation score on s01-1: 0
 pcmk__clone_allocate: cl-vg-s01-vm-data allocation score on s01-0: 0
 pcmk__clone_allocate: cl-vg-s01-vm-data allocation score on s01-1: 0
 pcmk__clone_allocate: cl-xinetd allocation score on s01-0: 0
 pcmk__clone_allocate: cl-xinetd allocation score on s01-1: 0
 pcmk__clone_allocate: clvmd:0 allocation score on s01-0: 0
 pcmk__clone_allocate: clvmd:0 allocation score on s01-1: 1
 pcmk__clone_allocate: clvmd:1 allocation score on s01-0: 1
 pcmk__clone_allocate: clvmd:1 allocation score on s01-1: 0
 pcmk__clone_allocate: connected-outer allocation score on s01-0: 0
 pcmk__clone_allocate: connected-outer allocation score on s01-1: 0
 pcmk__clone_allocate: dhcpd:0 allocation score on s01-0: 0
 pcmk__clone_allocate: dhcpd:0 allocation score on s01-1: 0
 pcmk__clone_allocate: dhcpd:1 allocation score on s01-0: 0
 pcmk__clone_allocate: dhcpd:1 allocation score on s01-1: 0
 pcmk__clone_allocate: dlm:0 allocation score on s01-0: 0
 pcmk__clone_allocate: dlm:0 allocation score on s01-1: 1
 pcmk__clone_allocate: dlm:1 allocation score on s01-0: 1
 pcmk__clone_allocate: dlm:1 allocation score on s01-1: 0
 pcmk__clone_allocate: drbd-pool-0:0 allocation score on s01-0: 0
 pcmk__clone_allocate: drbd-pool-0:0 allocation score on s01-1: 10001
 pcmk__clone_allocate: drbd-pool-0:1 allocation score on s01-0: 10001
 pcmk__clone_allocate: drbd-pool-0:1 allocation score on s01-1: 0
 pcmk__clone_allocate: drbd-pool-1:0 allocation score on s01-0: 0
 pcmk__clone_allocate: drbd-pool-1:0 allocation score on s01-1: 10001
 pcmk__clone_allocate: drbd-pool-1:1 allocation score on s01-0: 10001
 pcmk__clone_allocate: drbd-pool-1:1 allocation score on s01-1: 0
 pcmk__clone_allocate: drbd-s01-logs:0 allocation score on s01-0: 0
 pcmk__clone_allocate: drbd-s01-logs:0 allocation score on s01-1: 10001
 pcmk__clone_allocate: drbd-s01-logs:1 allocation score on s01-0: 10001
 pcmk__clone_allocate: drbd-s01-logs:1 allocation score on s01-1: 0
 pcmk__clone_allocate: drbd-s01-service:0 allocation score on s01-0: 0
 pcmk__clone_allocate: drbd-s01-service:0 allocation score on s01-1: 10001
 pcmk__clone_allocate: drbd-s01-service:1 allocation score on s01-0: 10001
 pcmk__clone_allocate: drbd-s01-service:1 allocation score on s01-1: 0
 pcmk__clone_allocate: drbd-s01-vm-data:0 allocation score on s01-0: 0
 pcmk__clone_allocate: drbd-s01-vm-data:0 allocation score on s01-1: 10001
 pcmk__clone_allocate: drbd-s01-vm-data:1 allocation score on s01-0: 10001
 pcmk__clone_allocate: drbd-s01-vm-data:1 allocation score on s01-1: 0
 pcmk__clone_allocate: drbd-vds-dom0-stateless-0:0 allocation score on s01-0: 0
 pcmk__clone_allocate: drbd-vds-dom0-stateless-0:0 allocation score on s01-1: 10001
 pcmk__clone_allocate: drbd-vds-dom0-stateless-0:1 allocation score on s01-0: 10001
 pcmk__clone_allocate: drbd-vds-dom0-stateless-0:1 allocation score on s01-1: 0
 pcmk__clone_allocate: drbd-vds-http:0 allocation score on s01-0: 0
 pcmk__clone_allocate: drbd-vds-http:0 allocation score on s01-1: 10001
 pcmk__clone_allocate: drbd-vds-http:1 allocation score on s01-0: 10001
 pcmk__clone_allocate: drbd-vds-http:1 allocation score on s01-1: 0
 pcmk__clone_allocate: drbd-vds-tftpboot:0 allocation score on s01-0: 0
 pcmk__clone_allocate: drbd-vds-tftpboot:0 allocation score on s01-1: 10001
 pcmk__clone_allocate: drbd-vds-tftpboot:1 allocation score on s01-0: 10001
 pcmk__clone_allocate: drbd-vds-tftpboot:1 allocation score on s01-1: 0
 pcmk__clone_allocate: drbdlinks-s01-service:0 allocation score on s01-0: 0
 pcmk__clone_allocate: drbdlinks-s01-service:0 allocation score on s01-1: 1
 pcmk__clone_allocate: drbdlinks-s01-service:1 allocation score on s01-0: 1
 pcmk__clone_allocate: drbdlinks-s01-service:1 allocation score on s01-1: 0
 pcmk__clone_allocate: gfs2:0 allocation score on s01-0: 0
 pcmk__clone_allocate: gfs2:0 allocation score on s01-1: 1
 pcmk__clone_allocate: gfs2:1 allocation score on s01-0: 1
 pcmk__clone_allocate: gfs2:1 allocation score on s01-1: 0
 pcmk__clone_allocate: ietd:0 allocation score on s01-0: 0
 pcmk__clone_allocate: ietd:0 allocation score on s01-1: 1
 pcmk__clone_allocate: ietd:1 allocation score on s01-0: 1
 pcmk__clone_allocate: ietd:1 allocation score on s01-1: 0
 pcmk__clone_allocate: iscsi-pool-0-vips-fw:0 allocation score on s01-0: 0
 pcmk__clone_allocate: iscsi-pool-0-vips-fw:0 allocation score on s01-1: 2000
 pcmk__clone_allocate: iscsi-pool-0-vips-fw:1 allocation score on s01-0: 2000
 pcmk__clone_allocate: iscsi-pool-0-vips-fw:1 allocation score on s01-1: 0
 pcmk__clone_allocate: iscsi-pool-1-vips-fw:0 allocation score on s01-0: 0
 pcmk__clone_allocate: iscsi-pool-1-vips-fw:0 allocation score on s01-1: 2000
 pcmk__clone_allocate: iscsi-pool-1-vips-fw:1 allocation score on s01-0: 2000
 pcmk__clone_allocate: iscsi-pool-1-vips-fw:1 allocation score on s01-1: 0
 pcmk__clone_allocate: iscsi-vds-dom0-stateless-0-vips-fw:0 allocation score on s01-0: 0
 pcmk__clone_allocate: iscsi-vds-dom0-stateless-0-vips-fw:0 allocation score on s01-1: 2000
 pcmk__clone_allocate: iscsi-vds-dom0-stateless-0-vips-fw:1 allocation score on s01-0: 2000
 pcmk__clone_allocate: iscsi-vds-dom0-stateless-0-vips-fw:1 allocation score on s01-1: 0
 pcmk__clone_allocate: libvirtd:0 allocation score on s01-0: 0
 pcmk__clone_allocate: libvirtd:0 allocation score on s01-1: 1
 pcmk__clone_allocate: libvirtd:1 allocation score on s01-0: 1
 pcmk__clone_allocate: libvirtd:1 allocation score on s01-1: 0
 pcmk__clone_allocate: ms-drbd-pool-0 allocation score on s01-0: 1000
 pcmk__clone_allocate: ms-drbd-pool-0 allocation score on s01-1: 0
 pcmk__clone_allocate: ms-drbd-pool-1 allocation score on s01-0: 0
 pcmk__clone_allocate: ms-drbd-pool-1 allocation score on s01-1: 1000
 pcmk__clone_allocate: ms-drbd-s01-logs allocation score on s01-0: 0
 pcmk__clone_allocate: ms-drbd-s01-logs allocation score on s01-1: 0
 pcmk__clone_allocate: ms-drbd-s01-service allocation score on s01-0: 0
 pcmk__clone_allocate: ms-drbd-s01-service allocation score on s01-1: 0
 pcmk__clone_allocate: ms-drbd-s01-vm-data allocation score on s01-0: 0
 pcmk__clone_allocate: ms-drbd-s01-vm-data allocation score on s01-1: 0
 pcmk__clone_allocate: ms-drbd-vds-dom0-stateless-0 allocation score on s01-0: 0
 pcmk__clone_allocate: ms-drbd-vds-dom0-stateless-0 allocation score on s01-1: 0
 pcmk__clone_allocate: ms-drbd-vds-http allocation score on s01-0: 0
 pcmk__clone_allocate: ms-drbd-vds-http allocation score on s01-1: 0
 pcmk__clone_allocate: ms-drbd-vds-tftpboot allocation score on s01-0: 0
 pcmk__clone_allocate: ms-drbd-vds-tftpboot allocation score on s01-1: 0
 pcmk__clone_allocate: ms-iscsi-pool-0-vips-fw allocation score on s01-0: 0
 pcmk__clone_allocate: ms-iscsi-pool-0-vips-fw allocation score on s01-1: 0
 pcmk__clone_allocate: ms-iscsi-pool-1-vips-fw allocation score on s01-0: 0
 pcmk__clone_allocate: ms-iscsi-pool-1-vips-fw allocation score on s01-1: 0
 pcmk__clone_allocate: ms-iscsi-vds-dom0-stateless-0-vips-fw allocation score on s01-0: 0
 pcmk__clone_allocate: ms-iscsi-vds-dom0-stateless-0-vips-fw allocation score on s01-1: 0
 pcmk__clone_allocate: o2cb:0 allocation score on s01-0: 0
 pcmk__clone_allocate: o2cb:0 allocation score on s01-1: 0
 pcmk__clone_allocate: o2cb:1 allocation score on s01-0: 0
 pcmk__clone_allocate: o2cb:1 allocation score on s01-1: 0
 pcmk__clone_allocate: ospf-routing:0 allocation score on s01-0: 0
 pcmk__clone_allocate: ospf-routing:0 allocation score on s01-1: 0
 pcmk__clone_allocate: ospf-routing:1 allocation score on s01-0: 0
 pcmk__clone_allocate: ospf-routing:1 allocation score on s01-1: 0
 pcmk__clone_allocate: ospfd:0 allocation score on s01-0: 0
 pcmk__clone_allocate: ospfd:0 allocation score on s01-1: 1
 pcmk__clone_allocate: ospfd:1 allocation score on s01-0: 1
 pcmk__clone_allocate: ospfd:1 allocation score on s01-1: 0
 pcmk__clone_allocate: ping-bmc-and-switch:0 allocation score on s01-0: 0
 pcmk__clone_allocate: ping-bmc-and-switch:0 allocation score on s01-1: 1
 pcmk__clone_allocate: ping-bmc-and-switch:1 allocation score on s01-0: 1
 pcmk__clone_allocate: ping-bmc-and-switch:1 allocation score on s01-1: 0
 pcmk__clone_allocate: s01-logs-fs:0 allocation score on s01-0: 0
 pcmk__clone_allocate: s01-logs-fs:0 allocation score on s01-1: 1
 pcmk__clone_allocate: s01-logs-fs:1 allocation score on s01-0: 1
 pcmk__clone_allocate: s01-logs-fs:1 allocation score on s01-1: 0
 pcmk__clone_allocate: s01-service-fs:0 allocation score on s01-0: 0
 pcmk__clone_allocate: s01-service-fs:0 allocation score on s01-1: 1
 pcmk__clone_allocate: s01-service-fs:1 allocation score on s01-0: 1
 pcmk__clone_allocate: s01-service-fs:1 allocation score on s01-1: 0
 pcmk__clone_allocate: s01-vm-data-metadata-fs:0 allocation score on s01-0: 0
 pcmk__clone_allocate: s01-vm-data-metadata-fs:0 allocation score on s01-1: 1
 pcmk__clone_allocate: s01-vm-data-metadata-fs:1 allocation score on s01-0: 1
 pcmk__clone_allocate: s01-vm-data-metadata-fs:1 allocation score on s01-1: 0
 pcmk__clone_allocate: s01-vm-data-storage-pool:0 allocation score on s01-0: 0
 pcmk__clone_allocate: s01-vm-data-storage-pool:0 allocation score on s01-1: 1
 pcmk__clone_allocate: s01-vm-data-storage-pool:1 allocation score on s01-0: 1
 pcmk__clone_allocate: s01-vm-data-storage-pool:1 allocation score on s01-1: 0
 pcmk__clone_allocate: vds-http-fs:0 allocation score on s01-0: 0
 pcmk__clone_allocate: vds-http-fs:0 allocation score on s01-1: 1
 pcmk__clone_allocate: vds-http-fs:1 allocation score on s01-0: 1
 pcmk__clone_allocate: vds-http-fs:1 allocation score on s01-1: 0
 pcmk__clone_allocate: vds-tftpboot-fs:0 allocation score on s01-0: 0
 pcmk__clone_allocate: vds-tftpboot-fs:0 allocation score on s01-1: 0
 pcmk__clone_allocate: vds-tftpboot-fs:1 allocation score on s01-0: 0
 pcmk__clone_allocate: vds-tftpboot-fs:1 allocation score on s01-1: 0
 pcmk__clone_allocate: vg-s01-vm-data:0 allocation score on s01-0: 0
 pcmk__clone_allocate: vg-s01-vm-data:0 allocation score on s01-1: 1
 pcmk__clone_allocate: vg-s01-vm-data:1 allocation score on s01-0: 1
 pcmk__clone_allocate: vg-s01-vm-data:1 allocation score on s01-1: 0
 pcmk__clone_allocate: vip-227-fw:0 allocation score on s01-0: 0
 pcmk__clone_allocate: vip-227-fw:0 allocation score on s01-1: 1
 pcmk__clone_allocate: vip-227-fw:1 allocation score on s01-0: 1
 pcmk__clone_allocate: vip-227-fw:1 allocation score on s01-1: 0
 pcmk__clone_allocate: vip-228-fw:0 allocation score on s01-0: 0
 pcmk__clone_allocate: vip-228-fw:0 allocation score on s01-1: 1
 pcmk__clone_allocate: vip-228-fw:1 allocation score on s01-0: 1
 pcmk__clone_allocate: vip-228-fw:1 allocation score on s01-1: 0
 pcmk__clone_allocate: vip-235-fw:0 allocation score on s01-0: 0
 pcmk__clone_allocate: vip-235-fw:0 allocation score on s01-1: 1
 pcmk__clone_allocate: vip-235-fw:1 allocation score on s01-0: 1
 pcmk__clone_allocate: vip-235-fw:1 allocation score on s01-1: 0
 pcmk__clone_allocate: vip-236-fw:0 allocation score on s01-0: 0
 pcmk__clone_allocate: vip-236-fw:0 allocation score on s01-1: 1
 pcmk__clone_allocate: vip-236-fw:1 allocation score on s01-0: 1
 pcmk__clone_allocate: vip-236-fw:1 allocation score on s01-1: 0
 pcmk__clone_allocate: vip-237-fw:0 allocation score on s01-0: 0
 pcmk__clone_allocate: vip-237-fw:0 allocation score on s01-1: 1
 pcmk__clone_allocate: vip-237-fw:1 allocation score on s01-0: 1
 pcmk__clone_allocate: vip-237-fw:1 allocation score on s01-1: 0
 pcmk__clone_allocate: vip-238-fw:0 allocation score on s01-0: 0
 pcmk__clone_allocate: vip-238-fw:0 allocation score on s01-1: 1
 pcmk__clone_allocate: vip-238-fw:1 allocation score on s01-0: 1
 pcmk__clone_allocate: vip-238-fw:1 allocation score on s01-1: 0
 pcmk__clone_allocate: xinetd:0 allocation score on s01-0: 0
 pcmk__clone_allocate: xinetd:0 allocation score on s01-1: 1
 pcmk__clone_allocate: xinetd:1 allocation score on s01-0: 1
 pcmk__clone_allocate: xinetd:1 allocation score on s01-1: 0
 pcmk__clone_allocate: zebra:0 allocation score on s01-0: 0
 pcmk__clone_allocate: zebra:0 allocation score on s01-1: 1
 pcmk__clone_allocate: zebra:1 allocation score on s01-0: 1
 pcmk__clone_allocate: zebra:1 allocation score on s01-1: 0
 pcmk__group_allocate: http-server allocation score on s01-0: 0
 pcmk__group_allocate: http-server allocation score on s01-1: 0
 pcmk__group_allocate: iscsi-pool-0-lun-1 allocation score on s01-0: 0
 pcmk__group_allocate: iscsi-pool-0-lun-1 allocation score on s01-1: 0
 pcmk__group_allocate: iscsi-pool-0-target allocation score on s01-0: 1000
 pcmk__group_allocate: iscsi-pool-0-target allocation score on s01-1: 0
 pcmk__group_allocate: iscsi-pool-0-target-all allocation score on s01-0: 1000
 pcmk__group_allocate: iscsi-pool-0-target-all allocation score on s01-1: 0
 pcmk__group_allocate: iscsi-pool-0-vips allocation score on s01-0: 0
 pcmk__group_allocate: iscsi-pool-0-vips allocation score on s01-1: 0
-pcmk__group_allocate: iscsi-pool-0-vips-fw:0 allocation score on s01-0: 0
+pcmk__group_allocate: iscsi-pool-0-vips-fw:0 allocation score on s01-0: -INFINITY
 pcmk__group_allocate: iscsi-pool-0-vips-fw:0 allocation score on s01-1: 2000
 pcmk__group_allocate: iscsi-pool-0-vips-fw:1 allocation score on s01-0: 2000
-pcmk__group_allocate: iscsi-pool-0-vips-fw:1 allocation score on s01-1: -INFINITY
+pcmk__group_allocate: iscsi-pool-0-vips-fw:1 allocation score on s01-1: 0
 pcmk__group_allocate: iscsi-pool-1-lun-1 allocation score on s01-0: 0
 pcmk__group_allocate: iscsi-pool-1-lun-1 allocation score on s01-1: 0
 pcmk__group_allocate: iscsi-pool-1-target allocation score on s01-0: 0
 pcmk__group_allocate: iscsi-pool-1-target allocation score on s01-1: 1000
 pcmk__group_allocate: iscsi-pool-1-target-all allocation score on s01-0: 0
 pcmk__group_allocate: iscsi-pool-1-target-all allocation score on s01-1: 1000
 pcmk__group_allocate: iscsi-pool-1-vips allocation score on s01-0: 0
 pcmk__group_allocate: iscsi-pool-1-vips allocation score on s01-1: 0
 pcmk__group_allocate: iscsi-pool-1-vips-fw:0 allocation score on s01-0: 0
 pcmk__group_allocate: iscsi-pool-1-vips-fw:0 allocation score on s01-1: 2000
 pcmk__group_allocate: iscsi-pool-1-vips-fw:1 allocation score on s01-0: 2000
 pcmk__group_allocate: iscsi-pool-1-vips-fw:1 allocation score on s01-1: -INFINITY
 pcmk__group_allocate: iscsi-vds-dom0-stateless-0-lun-1 allocation score on s01-0: 0
 pcmk__group_allocate: iscsi-vds-dom0-stateless-0-lun-1 allocation score on s01-1: 0
 pcmk__group_allocate: iscsi-vds-dom0-stateless-0-target allocation score on s01-0: 0
 pcmk__group_allocate: iscsi-vds-dom0-stateless-0-target allocation score on s01-1: 0
 pcmk__group_allocate: iscsi-vds-dom0-stateless-0-target-all allocation score on s01-0: 0
 pcmk__group_allocate: iscsi-vds-dom0-stateless-0-target-all allocation score on s01-1: 0
 pcmk__group_allocate: iscsi-vds-dom0-stateless-0-vips allocation score on s01-0: 0
 pcmk__group_allocate: iscsi-vds-dom0-stateless-0-vips allocation score on s01-1: 0
 pcmk__group_allocate: iscsi-vds-dom0-stateless-0-vips-fw:0 allocation score on s01-0: 0
 pcmk__group_allocate: iscsi-vds-dom0-stateless-0-vips-fw:0 allocation score on s01-1: 2000
 pcmk__group_allocate: iscsi-vds-dom0-stateless-0-vips-fw:1 allocation score on s01-0: 2000
 pcmk__group_allocate: iscsi-vds-dom0-stateless-0-vips-fw:1 allocation score on s01-1: -INFINITY
 pcmk__group_allocate: nginx allocation score on s01-0: 0
 pcmk__group_allocate: nginx allocation score on s01-1: 0
 pcmk__group_allocate: ospf-routing:0 allocation score on s01-0: 0
 pcmk__group_allocate: ospf-routing:0 allocation score on s01-1: 0
 pcmk__group_allocate: ospf-routing:1 allocation score on s01-0: 0
 pcmk__group_allocate: ospf-routing:1 allocation score on s01-1: -INFINITY
 pcmk__group_allocate: ospfd:0 allocation score on s01-0: 0
 pcmk__group_allocate: ospfd:0 allocation score on s01-1: 1
 pcmk__group_allocate: ospfd:1 allocation score on s01-0: 1
 pcmk__group_allocate: ospfd:1 allocation score on s01-1: -INFINITY
 pcmk__group_allocate: syslog-ng allocation score on s01-0: 0
 pcmk__group_allocate: syslog-ng allocation score on s01-1: 0
 pcmk__group_allocate: syslog-server allocation score on s01-0: 0
 pcmk__group_allocate: syslog-server allocation score on s01-1: 0
 pcmk__group_allocate: tftp-server allocation score on s01-0: 0
 pcmk__group_allocate: tftp-server allocation score on s01-1: 0
 pcmk__group_allocate: tftpd allocation score on s01-0: 0
 pcmk__group_allocate: tftpd allocation score on s01-1: 0
 pcmk__group_allocate: vip-227 allocation score on s01-0: 0
 pcmk__group_allocate: vip-227 allocation score on s01-1: 0
 pcmk__group_allocate: vip-227-fw:0 allocation score on s01-0: 0
 pcmk__group_allocate: vip-227-fw:0 allocation score on s01-1: 1
 pcmk__group_allocate: vip-227-fw:1 allocation score on s01-0: 1
 pcmk__group_allocate: vip-227-fw:1 allocation score on s01-1: -INFINITY
 pcmk__group_allocate: vip-228 allocation score on s01-0: 0
 pcmk__group_allocate: vip-228 allocation score on s01-1: 0
 pcmk__group_allocate: vip-228-fw:0 allocation score on s01-0: 0
 pcmk__group_allocate: vip-228-fw:0 allocation score on s01-1: 1
 pcmk__group_allocate: vip-228-fw:1 allocation score on s01-0: 1
 pcmk__group_allocate: vip-228-fw:1 allocation score on s01-1: -INFINITY
 pcmk__group_allocate: vip-232 allocation score on s01-0: 0
 pcmk__group_allocate: vip-232 allocation score on s01-1: 0
 pcmk__group_allocate: vip-233 allocation score on s01-0: 0
 pcmk__group_allocate: vip-233 allocation score on s01-1: 0
 pcmk__group_allocate: vip-234 allocation score on s01-0: 0
 pcmk__group_allocate: vip-234 allocation score on s01-1: 0
 pcmk__group_allocate: vip-235 allocation score on s01-0: 0
 pcmk__group_allocate: vip-235 allocation score on s01-1: 0
-pcmk__group_allocate: vip-235-fw:0 allocation score on s01-0: 0
+pcmk__group_allocate: vip-235-fw:0 allocation score on s01-0: -INFINITY
 pcmk__group_allocate: vip-235-fw:0 allocation score on s01-1: 1
 pcmk__group_allocate: vip-235-fw:1 allocation score on s01-0: 1
-pcmk__group_allocate: vip-235-fw:1 allocation score on s01-1: -INFINITY
+pcmk__group_allocate: vip-235-fw:1 allocation score on s01-1: 0
 pcmk__group_allocate: vip-236 allocation score on s01-0: 0
 pcmk__group_allocate: vip-236 allocation score on s01-1: 0
-pcmk__group_allocate: vip-236-fw:0 allocation score on s01-0: 0
+pcmk__group_allocate: vip-236-fw:0 allocation score on s01-0: -INFINITY
 pcmk__group_allocate: vip-236-fw:0 allocation score on s01-1: 1
 pcmk__group_allocate: vip-236-fw:1 allocation score on s01-0: 1
-pcmk__group_allocate: vip-236-fw:1 allocation score on s01-1: -INFINITY
+pcmk__group_allocate: vip-236-fw:1 allocation score on s01-1: 0
 pcmk__group_allocate: vip-237 allocation score on s01-0: 0
 pcmk__group_allocate: vip-237 allocation score on s01-1: 0
 pcmk__group_allocate: vip-237-fw:0 allocation score on s01-0: 0
 pcmk__group_allocate: vip-237-fw:0 allocation score on s01-1: 1
 pcmk__group_allocate: vip-237-fw:1 allocation score on s01-0: 1
 pcmk__group_allocate: vip-237-fw:1 allocation score on s01-1: -INFINITY
 pcmk__group_allocate: vip-238 allocation score on s01-0: 0
 pcmk__group_allocate: vip-238 allocation score on s01-1: 0
 pcmk__group_allocate: vip-238-fw:0 allocation score on s01-0: 0
 pcmk__group_allocate: vip-238-fw:0 allocation score on s01-1: 1
 pcmk__group_allocate: vip-238-fw:1 allocation score on s01-0: 1
 pcmk__group_allocate: vip-238-fw:1 allocation score on s01-1: -INFINITY
 pcmk__group_allocate: zebra:0 allocation score on s01-0: 0
 pcmk__group_allocate: zebra:0 allocation score on s01-1: 1
 pcmk__group_allocate: zebra:1 allocation score on s01-0: 1
 pcmk__group_allocate: zebra:1 allocation score on s01-1: -INFINITY
 pcmk__native_allocate: clvmd:0 allocation score on s01-0: -INFINITY
 pcmk__native_allocate: clvmd:0 allocation score on s01-1: 1
 pcmk__native_allocate: clvmd:1 allocation score on s01-0: 1
 pcmk__native_allocate: clvmd:1 allocation score on s01-1: -INFINITY
 pcmk__native_allocate: dhcpd:0 allocation score on s01-0: -INFINITY
 pcmk__native_allocate: dhcpd:0 allocation score on s01-1: -INFINITY
 pcmk__native_allocate: dhcpd:1 allocation score on s01-0: -INFINITY
 pcmk__native_allocate: dhcpd:1 allocation score on s01-1: -INFINITY
 pcmk__native_allocate: dlm:0 allocation score on s01-0: 0
 pcmk__native_allocate: dlm:0 allocation score on s01-1: 1
 pcmk__native_allocate: dlm:1 allocation score on s01-0: 1
 pcmk__native_allocate: dlm:1 allocation score on s01-1: -INFINITY
 pcmk__native_allocate: drbd-pool-0:0 allocation score on s01-0: -INFINITY
 pcmk__native_allocate: drbd-pool-0:0 allocation score on s01-1: 10001
 pcmk__native_allocate: drbd-pool-0:1 allocation score on s01-0: 10001
 pcmk__native_allocate: drbd-pool-0:1 allocation score on s01-1: 0
 pcmk__native_allocate: drbd-pool-1:0 allocation score on s01-0: 0
 pcmk__native_allocate: drbd-pool-1:0 allocation score on s01-1: 10001
 pcmk__native_allocate: drbd-pool-1:1 allocation score on s01-0: 10001
 pcmk__native_allocate: drbd-pool-1:1 allocation score on s01-1: -INFINITY
 pcmk__native_allocate: drbd-s01-logs:0 allocation score on s01-0: 0
 pcmk__native_allocate: drbd-s01-logs:0 allocation score on s01-1: 10001
 pcmk__native_allocate: drbd-s01-logs:1 allocation score on s01-0: 10001
 pcmk__native_allocate: drbd-s01-logs:1 allocation score on s01-1: -INFINITY
 pcmk__native_allocate: drbd-s01-service:0 allocation score on s01-0: 0
 pcmk__native_allocate: drbd-s01-service:0 allocation score on s01-1: 10001
 pcmk__native_allocate: drbd-s01-service:1 allocation score on s01-0: 10001
 pcmk__native_allocate: drbd-s01-service:1 allocation score on s01-1: -INFINITY
 pcmk__native_allocate: drbd-s01-vm-data:0 allocation score on s01-0: 0
 pcmk__native_allocate: drbd-s01-vm-data:0 allocation score on s01-1: 10001
 pcmk__native_allocate: drbd-s01-vm-data:1 allocation score on s01-0: 10001
 pcmk__native_allocate: drbd-s01-vm-data:1 allocation score on s01-1: -INFINITY
 pcmk__native_allocate: drbd-vds-dom0-stateless-0:0 allocation score on s01-0: 0
 pcmk__native_allocate: drbd-vds-dom0-stateless-0:0 allocation score on s01-1: 10001
 pcmk__native_allocate: drbd-vds-dom0-stateless-0:1 allocation score on s01-0: 10001
 pcmk__native_allocate: drbd-vds-dom0-stateless-0:1 allocation score on s01-1: -INFINITY
 pcmk__native_allocate: drbd-vds-http:0 allocation score on s01-0: 0
 pcmk__native_allocate: drbd-vds-http:0 allocation score on s01-1: 10001
 pcmk__native_allocate: drbd-vds-http:1 allocation score on s01-0: 10001
 pcmk__native_allocate: drbd-vds-http:1 allocation score on s01-1: -INFINITY
 pcmk__native_allocate: drbd-vds-tftpboot:0 allocation score on s01-0: 0
 pcmk__native_allocate: drbd-vds-tftpboot:0 allocation score on s01-1: 10001
 pcmk__native_allocate: drbd-vds-tftpboot:1 allocation score on s01-0: 10001
 pcmk__native_allocate: drbd-vds-tftpboot:1 allocation score on s01-1: -INFINITY
 pcmk__native_allocate: drbdlinks-s01-service:0 allocation score on s01-0: -INFINITY
 pcmk__native_allocate: drbdlinks-s01-service:0 allocation score on s01-1: 1
 pcmk__native_allocate: drbdlinks-s01-service:1 allocation score on s01-0: 1
 pcmk__native_allocate: drbdlinks-s01-service:1 allocation score on s01-1: -INFINITY
 pcmk__native_allocate: gfs2:0 allocation score on s01-0: -INFINITY
 pcmk__native_allocate: gfs2:0 allocation score on s01-1: 1
 pcmk__native_allocate: gfs2:1 allocation score on s01-0: 1
 pcmk__native_allocate: gfs2:1 allocation score on s01-1: -INFINITY
 pcmk__native_allocate: ietd:0 allocation score on s01-0: -INFINITY
 pcmk__native_allocate: ietd:0 allocation score on s01-1: 1
 pcmk__native_allocate: ietd:1 allocation score on s01-0: 1
 pcmk__native_allocate: ietd:1 allocation score on s01-1: 0
 pcmk__native_allocate: iscsi-pool-0-lun-1 allocation score on s01-0: 0
 pcmk__native_allocate: iscsi-pool-0-lun-1 allocation score on s01-1: -INFINITY
 pcmk__native_allocate: iscsi-pool-0-target allocation score on s01-0: 11001
 pcmk__native_allocate: iscsi-pool-0-target allocation score on s01-1: -INFINITY
 pcmk__native_allocate: iscsi-pool-1-lun-1 allocation score on s01-0: -INFINITY
 pcmk__native_allocate: iscsi-pool-1-lun-1 allocation score on s01-1: 0
 pcmk__native_allocate: iscsi-pool-1-target allocation score on s01-0: -INFINITY
 pcmk__native_allocate: iscsi-pool-1-target allocation score on s01-1: 11001
 pcmk__native_allocate: iscsi-vds-dom0-stateless-0-lun-1 allocation score on s01-0: -INFINITY
 pcmk__native_allocate: iscsi-vds-dom0-stateless-0-lun-1 allocation score on s01-1: -INFINITY
 pcmk__native_allocate: iscsi-vds-dom0-stateless-0-target allocation score on s01-0: -INFINITY
 pcmk__native_allocate: iscsi-vds-dom0-stateless-0-target allocation score on s01-1: -INFINITY
 pcmk__native_allocate: libvirtd:0 allocation score on s01-0: 0
 pcmk__native_allocate: libvirtd:0 allocation score on s01-1: 1
 pcmk__native_allocate: libvirtd:1 allocation score on s01-0: 1
 pcmk__native_allocate: libvirtd:1 allocation score on s01-1: -INFINITY
 pcmk__native_allocate: mgmt-vm allocation score on s01-0: -INFINITY
 pcmk__native_allocate: mgmt-vm allocation score on s01-1: 0
 pcmk__native_allocate: nginx allocation score on s01-0: -INFINITY
 pcmk__native_allocate: nginx allocation score on s01-1: -INFINITY
 pcmk__native_allocate: o2cb:0 allocation score on s01-0: -INFINITY
 pcmk__native_allocate: o2cb:0 allocation score on s01-1: -INFINITY
 pcmk__native_allocate: o2cb:1 allocation score on s01-0: -INFINITY
 pcmk__native_allocate: o2cb:1 allocation score on s01-1: -INFINITY
 pcmk__native_allocate: ospfd:0 allocation score on s01-0: -INFINITY
 pcmk__native_allocate: ospfd:0 allocation score on s01-1: 1
 pcmk__native_allocate: ospfd:1 allocation score on s01-0: 1
 pcmk__native_allocate: ospfd:1 allocation score on s01-1: -INFINITY
 pcmk__native_allocate: ping-bmc-and-switch:0 allocation score on s01-0: 0
 pcmk__native_allocate: ping-bmc-and-switch:0 allocation score on s01-1: 1
 pcmk__native_allocate: ping-bmc-and-switch:1 allocation score on s01-0: 1
 pcmk__native_allocate: ping-bmc-and-switch:1 allocation score on s01-1: -INFINITY
 pcmk__native_allocate: s01-logs-fs:0 allocation score on s01-0: -INFINITY
 pcmk__native_allocate: s01-logs-fs:0 allocation score on s01-1: 10002
 pcmk__native_allocate: s01-logs-fs:1 allocation score on s01-0: 10002
 pcmk__native_allocate: s01-logs-fs:1 allocation score on s01-1: -INFINITY
 pcmk__native_allocate: s01-service-fs:0 allocation score on s01-0: -INFINITY
 pcmk__native_allocate: s01-service-fs:0 allocation score on s01-1: 10002
 pcmk__native_allocate: s01-service-fs:1 allocation score on s01-0: 10002
 pcmk__native_allocate: s01-service-fs:1 allocation score on s01-1: -INFINITY
 pcmk__native_allocate: s01-vm-data-metadata-fs:0 allocation score on s01-0: -INFINITY
 pcmk__native_allocate: s01-vm-data-metadata-fs:0 allocation score on s01-1: 1
 pcmk__native_allocate: s01-vm-data-metadata-fs:1 allocation score on s01-0: 1
 pcmk__native_allocate: s01-vm-data-metadata-fs:1 allocation score on s01-1: -INFINITY
 pcmk__native_allocate: s01-vm-data-storage-pool:0 allocation score on s01-0: -INFINITY
 pcmk__native_allocate: s01-vm-data-storage-pool:0 allocation score on s01-1: 1
 pcmk__native_allocate: s01-vm-data-storage-pool:1 allocation score on s01-0: 1
 pcmk__native_allocate: s01-vm-data-storage-pool:1 allocation score on s01-1: -INFINITY
 pcmk__native_allocate: stonith-s01-0 allocation score on s01-0: -INFINITY
 pcmk__native_allocate: stonith-s01-0 allocation score on s01-1: 0
 pcmk__native_allocate: stonith-s01-1 allocation score on s01-0: 0
 pcmk__native_allocate: stonith-s01-1 allocation score on s01-1: -INFINITY
 pcmk__native_allocate: syslog-ng allocation score on s01-0: -INFINITY
 pcmk__native_allocate: syslog-ng allocation score on s01-1: 0
 pcmk__native_allocate: tftpd allocation score on s01-0: -INFINITY
 pcmk__native_allocate: tftpd allocation score on s01-1: -INFINITY
 pcmk__native_allocate: vds-http-fs:0 allocation score on s01-0: -INFINITY
 pcmk__native_allocate: vds-http-fs:0 allocation score on s01-1: 10002
 pcmk__native_allocate: vds-http-fs:1 allocation score on s01-0: 10002
 pcmk__native_allocate: vds-http-fs:1 allocation score on s01-1: -INFINITY
 pcmk__native_allocate: vds-tftpboot-fs:0 allocation score on s01-0: -INFINITY
 pcmk__native_allocate: vds-tftpboot-fs:0 allocation score on s01-1: -INFINITY
 pcmk__native_allocate: vds-tftpboot-fs:1 allocation score on s01-0: -INFINITY
 pcmk__native_allocate: vds-tftpboot-fs:1 allocation score on s01-1: -INFINITY
 pcmk__native_allocate: vg-s01-vm-data:0 allocation score on s01-0: -INFINITY
 pcmk__native_allocate: vg-s01-vm-data:0 allocation score on s01-1: 10002
 pcmk__native_allocate: vg-s01-vm-data:1 allocation score on s01-0: 10002
 pcmk__native_allocate: vg-s01-vm-data:1 allocation score on s01-1: -INFINITY
 pcmk__native_allocate: vip-227 allocation score on s01-0: -INFINITY
 pcmk__native_allocate: vip-227 allocation score on s01-1: -INFINITY
 pcmk__native_allocate: vip-227-fw:0 allocation score on s01-0: 0
 pcmk__native_allocate: vip-227-fw:0 allocation score on s01-1: 2
 pcmk__native_allocate: vip-227-fw:1 allocation score on s01-0: 2
 pcmk__native_allocate: vip-227-fw:1 allocation score on s01-1: -INFINITY
 pcmk__native_allocate: vip-228 allocation score on s01-0: -INFINITY
 pcmk__native_allocate: vip-228 allocation score on s01-1: -INFINITY
 pcmk__native_allocate: vip-228-fw:0 allocation score on s01-0: -INFINITY
 pcmk__native_allocate: vip-228-fw:0 allocation score on s01-1: 1
 pcmk__native_allocate: vip-228-fw:1 allocation score on s01-0: 1
 pcmk__native_allocate: vip-228-fw:1 allocation score on s01-1: -INFINITY
 pcmk__native_allocate: vip-232 allocation score on s01-0: -INFINITY
 pcmk__native_allocate: vip-232 allocation score on s01-1: -INFINITY
 pcmk__native_allocate: vip-233 allocation score on s01-0: 0
 pcmk__native_allocate: vip-233 allocation score on s01-1: 0
 pcmk__native_allocate: vip-234 allocation score on s01-0: 0
 pcmk__native_allocate: vip-234 allocation score on s01-1: 0
 pcmk__native_allocate: vip-235 allocation score on s01-0: 0
 pcmk__native_allocate: vip-235 allocation score on s01-1: -INFINITY
-pcmk__native_allocate: vip-235-fw:0 allocation score on s01-0: 0
+pcmk__native_allocate: vip-235-fw:0 allocation score on s01-0: -INFINITY
 pcmk__native_allocate: vip-235-fw:0 allocation score on s01-1: 2
 pcmk__native_allocate: vip-235-fw:1 allocation score on s01-0: 2
-pcmk__native_allocate: vip-235-fw:1 allocation score on s01-1: -INFINITY
+pcmk__native_allocate: vip-235-fw:1 allocation score on s01-1: 0
 pcmk__native_allocate: vip-236 allocation score on s01-0: 0
 pcmk__native_allocate: vip-236 allocation score on s01-1: -INFINITY
 pcmk__native_allocate: vip-236-fw:0 allocation score on s01-0: -INFINITY
 pcmk__native_allocate: vip-236-fw:0 allocation score on s01-1: 1
 pcmk__native_allocate: vip-236-fw:1 allocation score on s01-0: 1
 pcmk__native_allocate: vip-236-fw:1 allocation score on s01-1: -INFINITY
 pcmk__native_allocate: vip-237 allocation score on s01-0: -INFINITY
 pcmk__native_allocate: vip-237 allocation score on s01-1: 0
 pcmk__native_allocate: vip-237-fw:0 allocation score on s01-0: 0
 pcmk__native_allocate: vip-237-fw:0 allocation score on s01-1: 2
 pcmk__native_allocate: vip-237-fw:1 allocation score on s01-0: 2
 pcmk__native_allocate: vip-237-fw:1 allocation score on s01-1: -INFINITY
 pcmk__native_allocate: vip-238 allocation score on s01-0: -INFINITY
 pcmk__native_allocate: vip-238 allocation score on s01-1: 0
 pcmk__native_allocate: vip-238-fw:0 allocation score on s01-0: -INFINITY
 pcmk__native_allocate: vip-238-fw:0 allocation score on s01-1: 1
 pcmk__native_allocate: vip-238-fw:1 allocation score on s01-0: 1
 pcmk__native_allocate: vip-238-fw:1 allocation score on s01-1: -INFINITY
 pcmk__native_allocate: xinetd:0 allocation score on s01-0: 0
 pcmk__native_allocate: xinetd:0 allocation score on s01-1: 1
 pcmk__native_allocate: xinetd:1 allocation score on s01-0: 1
 pcmk__native_allocate: xinetd:1 allocation score on s01-1: -INFINITY
 pcmk__native_allocate: zebra:0 allocation score on s01-0: 0
 pcmk__native_allocate: zebra:0 allocation score on s01-1: 2
 pcmk__native_allocate: zebra:1 allocation score on s01-0: 2
 pcmk__native_allocate: zebra:1 allocation score on s01-1: -INFINITY
diff --git a/cts/scheduler/scores/promoted-partially-demoted-group.scores b/cts/scheduler/scores/promoted-partially-demoted-group.scores
index 827bedab31..cf3d17fa70 100644
--- a/cts/scheduler/scores/promoted-partially-demoted-group.scores
+++ b/cts/scheduler/scores/promoted-partially-demoted-group.scores
@@ -1,93 +1,93 @@
 
 cdev-pool-0-drbd:0 promotion score on sd01-1: 10800
 cdev-pool-0-drbd:1 promotion score on sd01-0: INFINITY
 cdev-pool-0-iscsi-vips-fw:0 promotion score on sd01-1: -INFINITY
 cdev-pool-0-iscsi-vips-fw:1 promotion score on sd01-0: 2000
 pcmk__clone_allocate: cdev-pool-0-drbd:0 allocation score on sd01-0: 0
 pcmk__clone_allocate: cdev-pool-0-drbd:0 allocation score on sd01-1: 10100
 pcmk__clone_allocate: cdev-pool-0-drbd:1 allocation score on sd01-0: 10100
 pcmk__clone_allocate: cdev-pool-0-drbd:1 allocation score on sd01-1: 0
 pcmk__clone_allocate: cdev-pool-0-iscsi-vips-fw:0 allocation score on sd01-0: 0
 pcmk__clone_allocate: cdev-pool-0-iscsi-vips-fw:0 allocation score on sd01-1: 2000
 pcmk__clone_allocate: cdev-pool-0-iscsi-vips-fw:1 allocation score on sd01-0: 2000
 pcmk__clone_allocate: cdev-pool-0-iscsi-vips-fw:1 allocation score on sd01-1: 0
 pcmk__clone_allocate: cl-ietd allocation score on sd01-0: INFINITY
 pcmk__clone_allocate: cl-ietd allocation score on sd01-1: 300
 pcmk__clone_allocate: cl-vlan1-net allocation score on sd01-0: 0
 pcmk__clone_allocate: cl-vlan1-net allocation score on sd01-1: 0
 pcmk__clone_allocate: ietd:0 allocation score on sd01-0: 0
 pcmk__clone_allocate: ietd:0 allocation score on sd01-1: 100
 pcmk__clone_allocate: ietd:1 allocation score on sd01-0: 100
 pcmk__clone_allocate: ietd:1 allocation score on sd01-1: 0
 pcmk__clone_allocate: ms-cdev-pool-0-drbd allocation score on sd01-0: INFINITY
 pcmk__clone_allocate: ms-cdev-pool-0-drbd allocation score on sd01-1: 400
 pcmk__clone_allocate: ms-cdev-pool-0-iscsi-vips-fw allocation score on sd01-0: 0
 pcmk__clone_allocate: ms-cdev-pool-0-iscsi-vips-fw allocation score on sd01-1: 0
 pcmk__clone_allocate: vip-164-fw:0 allocation score on sd01-0: 0
 pcmk__clone_allocate: vip-164-fw:0 allocation score on sd01-1: 100
 pcmk__clone_allocate: vip-164-fw:1 allocation score on sd01-0: 100
 pcmk__clone_allocate: vip-164-fw:1 allocation score on sd01-1: 0
 pcmk__clone_allocate: vip-165-fw:0 allocation score on sd01-0: 0
 pcmk__clone_allocate: vip-165-fw:0 allocation score on sd01-1: 100
 pcmk__clone_allocate: vip-165-fw:1 allocation score on sd01-0: 100
 pcmk__clone_allocate: vip-165-fw:1 allocation score on sd01-1: 0
 pcmk__clone_allocate: vlan1-net:0 allocation score on sd01-0: 0
 pcmk__clone_allocate: vlan1-net:0 allocation score on sd01-1: 100
 pcmk__clone_allocate: vlan1-net:1 allocation score on sd01-0: 100
 pcmk__clone_allocate: vlan1-net:1 allocation score on sd01-1: 0
 pcmk__group_allocate: cdev-pool-0-iscsi-export allocation score on sd01-0: INFINITY
 pcmk__group_allocate: cdev-pool-0-iscsi-export allocation score on sd01-1: 0
 pcmk__group_allocate: cdev-pool-0-iscsi-lun-1 allocation score on sd01-0: 0
 pcmk__group_allocate: cdev-pool-0-iscsi-lun-1 allocation score on sd01-1: 100
 pcmk__group_allocate: cdev-pool-0-iscsi-target allocation score on sd01-0: INFINITY
 pcmk__group_allocate: cdev-pool-0-iscsi-target allocation score on sd01-1: 100
 pcmk__group_allocate: cdev-pool-0-iscsi-vips allocation score on sd01-0: 0
 pcmk__group_allocate: cdev-pool-0-iscsi-vips allocation score on sd01-1: 0
-pcmk__group_allocate: cdev-pool-0-iscsi-vips-fw:0 allocation score on sd01-0: 0
+pcmk__group_allocate: cdev-pool-0-iscsi-vips-fw:0 allocation score on sd01-0: -INFINITY
 pcmk__group_allocate: cdev-pool-0-iscsi-vips-fw:0 allocation score on sd01-1: 2000
 pcmk__group_allocate: cdev-pool-0-iscsi-vips-fw:1 allocation score on sd01-0: 2000
-pcmk__group_allocate: cdev-pool-0-iscsi-vips-fw:1 allocation score on sd01-1: -INFINITY
+pcmk__group_allocate: cdev-pool-0-iscsi-vips-fw:1 allocation score on sd01-1: 0
 pcmk__group_allocate: vip-164 allocation score on sd01-0: 0
 pcmk__group_allocate: vip-164 allocation score on sd01-1: 100
-pcmk__group_allocate: vip-164-fw:0 allocation score on sd01-0: 0
+pcmk__group_allocate: vip-164-fw:0 allocation score on sd01-0: -INFINITY
 pcmk__group_allocate: vip-164-fw:0 allocation score on sd01-1: 100
 pcmk__group_allocate: vip-164-fw:1 allocation score on sd01-0: 100
-pcmk__group_allocate: vip-164-fw:1 allocation score on sd01-1: -INFINITY
+pcmk__group_allocate: vip-164-fw:1 allocation score on sd01-1: 0
 pcmk__group_allocate: vip-165 allocation score on sd01-0: 0
 pcmk__group_allocate: vip-165 allocation score on sd01-1: 100
-pcmk__group_allocate: vip-165-fw:0 allocation score on sd01-0: 0
+pcmk__group_allocate: vip-165-fw:0 allocation score on sd01-0: -INFINITY
 pcmk__group_allocate: vip-165-fw:0 allocation score on sd01-1: 100
 pcmk__group_allocate: vip-165-fw:1 allocation score on sd01-0: 100
-pcmk__group_allocate: vip-165-fw:1 allocation score on sd01-1: -INFINITY
+pcmk__group_allocate: vip-165-fw:1 allocation score on sd01-1: 0
 pcmk__native_allocate: cdev-pool-0-drbd:0 allocation score on sd01-0: -INFINITY
 pcmk__native_allocate: cdev-pool-0-drbd:0 allocation score on sd01-1: 10100
 pcmk__native_allocate: cdev-pool-0-drbd:1 allocation score on sd01-0: 10100
 pcmk__native_allocate: cdev-pool-0-drbd:1 allocation score on sd01-1: 0
 pcmk__native_allocate: cdev-pool-0-iscsi-lun-1 allocation score on sd01-0: 0
 pcmk__native_allocate: cdev-pool-0-iscsi-lun-1 allocation score on sd01-1: -INFINITY
 pcmk__native_allocate: cdev-pool-0-iscsi-target allocation score on sd01-0: INFINITY
 pcmk__native_allocate: cdev-pool-0-iscsi-target allocation score on sd01-1: -INFINITY
 pcmk__native_allocate: ietd:0 allocation score on sd01-0: -INFINITY
 pcmk__native_allocate: ietd:0 allocation score on sd01-1: 100
 pcmk__native_allocate: ietd:1 allocation score on sd01-0: 100
 pcmk__native_allocate: ietd:1 allocation score on sd01-1: 0
 pcmk__native_allocate: stonith-xvm-sd01-0 allocation score on sd01-0: -INFINITY
 pcmk__native_allocate: stonith-xvm-sd01-0 allocation score on sd01-1: 100
 pcmk__native_allocate: stonith-xvm-sd01-1 allocation score on sd01-0: 100
 pcmk__native_allocate: stonith-xvm-sd01-1 allocation score on sd01-1: -INFINITY
 pcmk__native_allocate: vip-164 allocation score on sd01-0: 0
 pcmk__native_allocate: vip-164 allocation score on sd01-1: -INFINITY
-pcmk__native_allocate: vip-164-fw:0 allocation score on sd01-0: 0
+pcmk__native_allocate: vip-164-fw:0 allocation score on sd01-0: -INFINITY
 pcmk__native_allocate: vip-164-fw:0 allocation score on sd01-1: 200
 pcmk__native_allocate: vip-164-fw:1 allocation score on sd01-0: 200
-pcmk__native_allocate: vip-164-fw:1 allocation score on sd01-1: -INFINITY
+pcmk__native_allocate: vip-164-fw:1 allocation score on sd01-1: 0
 pcmk__native_allocate: vip-165 allocation score on sd01-0: 0
 pcmk__native_allocate: vip-165 allocation score on sd01-1: -INFINITY
 pcmk__native_allocate: vip-165-fw:0 allocation score on sd01-0: -INFINITY
 pcmk__native_allocate: vip-165-fw:0 allocation score on sd01-1: 100
 pcmk__native_allocate: vip-165-fw:1 allocation score on sd01-0: 100
 pcmk__native_allocate: vip-165-fw:1 allocation score on sd01-1: -INFINITY
 pcmk__native_allocate: vlan1-net:0 allocation score on sd01-0: 0
 pcmk__native_allocate: vlan1-net:0 allocation score on sd01-1: 100
 pcmk__native_allocate: vlan1-net:1 allocation score on sd01-0: 100
 pcmk__native_allocate: vlan1-net:1 allocation score on sd01-1: -INFINITY
diff --git a/cts/scheduler/scores/rec-node-13.scores b/cts/scheduler/scores/rec-node-13.scores
index c6b890b4ee..0d31e79966 100644
--- a/cts/scheduler/scores/rec-node-13.scores
+++ b/cts/scheduler/scores/rec-node-13.scores
@@ -1,331 +1,331 @@
 
 ocf_msdummy:0 promotion score on c001n02: 1000
 ocf_msdummy:1 promotion score on none: 0
 ocf_msdummy:10 promotion score on c001n06: -1
 ocf_msdummy:11 promotion score on c001n07: -1
 ocf_msdummy:2 promotion score on none: 0
 ocf_msdummy:3 promotion score on none: 0
 ocf_msdummy:4 promotion score on c001n02: -1
 ocf_msdummy:5 promotion score on none: 0
 ocf_msdummy:6 promotion score on none: 0
 ocf_msdummy:7 promotion score on none: 0
 ocf_msdummy:8 promotion score on c001n06: -1
 ocf_msdummy:9 promotion score on c001n07: -1
 pcmk__clone_allocate: DoFencing allocation score on c001n02: 0
 pcmk__clone_allocate: DoFencing allocation score on c001n03: 0
 pcmk__clone_allocate: DoFencing allocation score on c001n04: 0
 pcmk__clone_allocate: DoFencing allocation score on c001n05: 0
 pcmk__clone_allocate: DoFencing allocation score on c001n06: 0
 pcmk__clone_allocate: DoFencing allocation score on c001n07: 0
 pcmk__clone_allocate: child_DoFencing:0 allocation score on c001n02: 1
 pcmk__clone_allocate: child_DoFencing:0 allocation score on c001n03: 0
 pcmk__clone_allocate: child_DoFencing:0 allocation score on c001n04: 0
 pcmk__clone_allocate: child_DoFencing:0 allocation score on c001n05: 0
 pcmk__clone_allocate: child_DoFencing:0 allocation score on c001n06: 0
 pcmk__clone_allocate: child_DoFencing:0 allocation score on c001n07: 0
 pcmk__clone_allocate: child_DoFencing:1 allocation score on c001n02: 0
 pcmk__clone_allocate: child_DoFencing:1 allocation score on c001n03: 0
 pcmk__clone_allocate: child_DoFencing:1 allocation score on c001n04: 0
 pcmk__clone_allocate: child_DoFencing:1 allocation score on c001n05: 0
 pcmk__clone_allocate: child_DoFencing:1 allocation score on c001n06: 0
 pcmk__clone_allocate: child_DoFencing:1 allocation score on c001n07: 1
 pcmk__clone_allocate: child_DoFencing:2 allocation score on c001n02: 0
 pcmk__clone_allocate: child_DoFencing:2 allocation score on c001n03: 0
 pcmk__clone_allocate: child_DoFencing:2 allocation score on c001n04: 0
 pcmk__clone_allocate: child_DoFencing:2 allocation score on c001n05: 0
 pcmk__clone_allocate: child_DoFencing:2 allocation score on c001n06: 1
 pcmk__clone_allocate: child_DoFencing:2 allocation score on c001n07: 0
 pcmk__clone_allocate: child_DoFencing:3 allocation score on c001n02: 0
 pcmk__clone_allocate: child_DoFencing:3 allocation score on c001n03: 0
 pcmk__clone_allocate: child_DoFencing:3 allocation score on c001n04: 0
 pcmk__clone_allocate: child_DoFencing:3 allocation score on c001n05: 0
 pcmk__clone_allocate: child_DoFencing:3 allocation score on c001n06: 0
 pcmk__clone_allocate: child_DoFencing:3 allocation score on c001n07: 0
 pcmk__clone_allocate: child_DoFencing:4 allocation score on c001n02: 0
 pcmk__clone_allocate: child_DoFencing:4 allocation score on c001n03: 0
 pcmk__clone_allocate: child_DoFencing:4 allocation score on c001n04: 0
 pcmk__clone_allocate: child_DoFencing:4 allocation score on c001n05: 0
 pcmk__clone_allocate: child_DoFencing:4 allocation score on c001n06: 0
 pcmk__clone_allocate: child_DoFencing:4 allocation score on c001n07: 0
 pcmk__clone_allocate: child_DoFencing:5 allocation score on c001n02: 0
 pcmk__clone_allocate: child_DoFencing:5 allocation score on c001n03: 0
 pcmk__clone_allocate: child_DoFencing:5 allocation score on c001n04: 0
 pcmk__clone_allocate: child_DoFencing:5 allocation score on c001n05: 0
 pcmk__clone_allocate: child_DoFencing:5 allocation score on c001n06: 0
 pcmk__clone_allocate: child_DoFencing:5 allocation score on c001n07: 0
 pcmk__clone_allocate: master_rsc_1 allocation score on c001n02: 0
 pcmk__clone_allocate: master_rsc_1 allocation score on c001n03: 0
 pcmk__clone_allocate: master_rsc_1 allocation score on c001n04: 0
 pcmk__clone_allocate: master_rsc_1 allocation score on c001n05: 0
 pcmk__clone_allocate: master_rsc_1 allocation score on c001n06: 0
 pcmk__clone_allocate: master_rsc_1 allocation score on c001n07: 0
 pcmk__clone_allocate: ocf_msdummy:0 allocation score on c001n02: 1001
 pcmk__clone_allocate: ocf_msdummy:0 allocation score on c001n03: 0
 pcmk__clone_allocate: ocf_msdummy:0 allocation score on c001n04: 0
 pcmk__clone_allocate: ocf_msdummy:0 allocation score on c001n05: 0
 pcmk__clone_allocate: ocf_msdummy:0 allocation score on c001n06: 0
 pcmk__clone_allocate: ocf_msdummy:0 allocation score on c001n07: 0
 pcmk__clone_allocate: ocf_msdummy:1 allocation score on c001n02: 0
 pcmk__clone_allocate: ocf_msdummy:1 allocation score on c001n03: 0
 pcmk__clone_allocate: ocf_msdummy:1 allocation score on c001n04: 0
 pcmk__clone_allocate: ocf_msdummy:1 allocation score on c001n05: 0
 pcmk__clone_allocate: ocf_msdummy:1 allocation score on c001n06: 0
 pcmk__clone_allocate: ocf_msdummy:1 allocation score on c001n07: 0
 pcmk__clone_allocate: ocf_msdummy:10 allocation score on c001n02: 0
 pcmk__clone_allocate: ocf_msdummy:10 allocation score on c001n03: 0
 pcmk__clone_allocate: ocf_msdummy:10 allocation score on c001n04: 0
 pcmk__clone_allocate: ocf_msdummy:10 allocation score on c001n05: 0
 pcmk__clone_allocate: ocf_msdummy:10 allocation score on c001n06: 1
 pcmk__clone_allocate: ocf_msdummy:10 allocation score on c001n07: 0
 pcmk__clone_allocate: ocf_msdummy:11 allocation score on c001n02: 0
 pcmk__clone_allocate: ocf_msdummy:11 allocation score on c001n03: 0
 pcmk__clone_allocate: ocf_msdummy:11 allocation score on c001n04: 0
 pcmk__clone_allocate: ocf_msdummy:11 allocation score on c001n05: 0
 pcmk__clone_allocate: ocf_msdummy:11 allocation score on c001n06: 0
 pcmk__clone_allocate: ocf_msdummy:11 allocation score on c001n07: 1
 pcmk__clone_allocate: ocf_msdummy:2 allocation score on c001n02: 0
 pcmk__clone_allocate: ocf_msdummy:2 allocation score on c001n03: 0
 pcmk__clone_allocate: ocf_msdummy:2 allocation score on c001n04: 0
 pcmk__clone_allocate: ocf_msdummy:2 allocation score on c001n05: 0
 pcmk__clone_allocate: ocf_msdummy:2 allocation score on c001n06: 0
 pcmk__clone_allocate: ocf_msdummy:2 allocation score on c001n07: 0
 pcmk__clone_allocate: ocf_msdummy:3 allocation score on c001n02: 0
 pcmk__clone_allocate: ocf_msdummy:3 allocation score on c001n03: 0
 pcmk__clone_allocate: ocf_msdummy:3 allocation score on c001n04: 0
 pcmk__clone_allocate: ocf_msdummy:3 allocation score on c001n05: 0
 pcmk__clone_allocate: ocf_msdummy:3 allocation score on c001n06: 0
 pcmk__clone_allocate: ocf_msdummy:3 allocation score on c001n07: 0
 pcmk__clone_allocate: ocf_msdummy:4 allocation score on c001n02: 1
 pcmk__clone_allocate: ocf_msdummy:4 allocation score on c001n03: 0
 pcmk__clone_allocate: ocf_msdummy:4 allocation score on c001n04: 0
 pcmk__clone_allocate: ocf_msdummy:4 allocation score on c001n05: 0
 pcmk__clone_allocate: ocf_msdummy:4 allocation score on c001n06: 0
 pcmk__clone_allocate: ocf_msdummy:4 allocation score on c001n07: 0
 pcmk__clone_allocate: ocf_msdummy:5 allocation score on c001n02: 0
 pcmk__clone_allocate: ocf_msdummy:5 allocation score on c001n03: 0
 pcmk__clone_allocate: ocf_msdummy:5 allocation score on c001n04: 0
 pcmk__clone_allocate: ocf_msdummy:5 allocation score on c001n05: 0
 pcmk__clone_allocate: ocf_msdummy:5 allocation score on c001n06: 0
 pcmk__clone_allocate: ocf_msdummy:5 allocation score on c001n07: 0
 pcmk__clone_allocate: ocf_msdummy:6 allocation score on c001n02: 0
 pcmk__clone_allocate: ocf_msdummy:6 allocation score on c001n03: 0
 pcmk__clone_allocate: ocf_msdummy:6 allocation score on c001n04: -INFINITY
 pcmk__clone_allocate: ocf_msdummy:6 allocation score on c001n05: 0
 pcmk__clone_allocate: ocf_msdummy:6 allocation score on c001n06: 0
 pcmk__clone_allocate: ocf_msdummy:6 allocation score on c001n07: 0
 pcmk__clone_allocate: ocf_msdummy:7 allocation score on c001n02: 0
 pcmk__clone_allocate: ocf_msdummy:7 allocation score on c001n03: 0
 pcmk__clone_allocate: ocf_msdummy:7 allocation score on c001n04: 0
 pcmk__clone_allocate: ocf_msdummy:7 allocation score on c001n05: 0
 pcmk__clone_allocate: ocf_msdummy:7 allocation score on c001n06: 0
 pcmk__clone_allocate: ocf_msdummy:7 allocation score on c001n07: 0
 pcmk__clone_allocate: ocf_msdummy:8 allocation score on c001n02: 0
 pcmk__clone_allocate: ocf_msdummy:8 allocation score on c001n03: 0
 pcmk__clone_allocate: ocf_msdummy:8 allocation score on c001n04: 0
 pcmk__clone_allocate: ocf_msdummy:8 allocation score on c001n05: 0
 pcmk__clone_allocate: ocf_msdummy:8 allocation score on c001n06: 1
 pcmk__clone_allocate: ocf_msdummy:8 allocation score on c001n07: 0
 pcmk__clone_allocate: ocf_msdummy:9 allocation score on c001n02: 0
 pcmk__clone_allocate: ocf_msdummy:9 allocation score on c001n03: 0
 pcmk__clone_allocate: ocf_msdummy:9 allocation score on c001n04: 0
 pcmk__clone_allocate: ocf_msdummy:9 allocation score on c001n05: 0
 pcmk__clone_allocate: ocf_msdummy:9 allocation score on c001n06: 0
 pcmk__clone_allocate: ocf_msdummy:9 allocation score on c001n07: 1
 pcmk__group_allocate: group-1 allocation score on c001n02: 0
 pcmk__group_allocate: group-1 allocation score on c001n03: 0
 pcmk__group_allocate: group-1 allocation score on c001n04: 0
 pcmk__group_allocate: group-1 allocation score on c001n05: 0
 pcmk__group_allocate: group-1 allocation score on c001n06: 0
 pcmk__group_allocate: group-1 allocation score on c001n07: 0
 pcmk__group_allocate: heartbeat_192.168.100.182 allocation score on c001n02: 0
 pcmk__group_allocate: heartbeat_192.168.100.182 allocation score on c001n03: 0
 pcmk__group_allocate: heartbeat_192.168.100.182 allocation score on c001n04: 0
 pcmk__group_allocate: heartbeat_192.168.100.182 allocation score on c001n05: 0
 pcmk__group_allocate: heartbeat_192.168.100.182 allocation score on c001n06: 0
 pcmk__group_allocate: heartbeat_192.168.100.182 allocation score on c001n07: 0
 pcmk__group_allocate: ocf_192.168.100.181 allocation score on c001n02: 0
 pcmk__group_allocate: ocf_192.168.100.181 allocation score on c001n03: 0
 pcmk__group_allocate: ocf_192.168.100.181 allocation score on c001n04: 0
 pcmk__group_allocate: ocf_192.168.100.181 allocation score on c001n05: 0
 pcmk__group_allocate: ocf_192.168.100.181 allocation score on c001n06: 0
 pcmk__group_allocate: ocf_192.168.100.181 allocation score on c001n07: 0
 pcmk__group_allocate: ocf_192.168.100.183 allocation score on c001n02: 0
 pcmk__group_allocate: ocf_192.168.100.183 allocation score on c001n03: 0
 pcmk__group_allocate: ocf_192.168.100.183 allocation score on c001n04: 0
 pcmk__group_allocate: ocf_192.168.100.183 allocation score on c001n05: 0
 pcmk__group_allocate: ocf_192.168.100.183 allocation score on c001n06: 0
 pcmk__group_allocate: ocf_192.168.100.183 allocation score on c001n07: 0
 pcmk__native_allocate: DcIPaddr allocation score on c001n02: -INFINITY
 pcmk__native_allocate: DcIPaddr allocation score on c001n03: -INFINITY
 pcmk__native_allocate: DcIPaddr allocation score on c001n04: 0
 pcmk__native_allocate: DcIPaddr allocation score on c001n05: -INFINITY
 pcmk__native_allocate: DcIPaddr allocation score on c001n06: -INFINITY
 pcmk__native_allocate: DcIPaddr allocation score on c001n07: -INFINITY
 pcmk__native_allocate: child_DoFencing:0 allocation score on c001n02: 1
 pcmk__native_allocate: child_DoFencing:0 allocation score on c001n03: -INFINITY
 pcmk__native_allocate: child_DoFencing:0 allocation score on c001n04: -INFINITY
 pcmk__native_allocate: child_DoFencing:0 allocation score on c001n05: -INFINITY
 pcmk__native_allocate: child_DoFencing:0 allocation score on c001n06: 0
 pcmk__native_allocate: child_DoFencing:0 allocation score on c001n07: 0
 pcmk__native_allocate: child_DoFencing:1 allocation score on c001n02: -INFINITY
 pcmk__native_allocate: child_DoFencing:1 allocation score on c001n03: -INFINITY
 pcmk__native_allocate: child_DoFencing:1 allocation score on c001n04: -INFINITY
 pcmk__native_allocate: child_DoFencing:1 allocation score on c001n05: -INFINITY
 pcmk__native_allocate: child_DoFencing:1 allocation score on c001n06: 0
 pcmk__native_allocate: child_DoFencing:1 allocation score on c001n07: 1
 pcmk__native_allocate: child_DoFencing:2 allocation score on c001n02: -INFINITY
 pcmk__native_allocate: child_DoFencing:2 allocation score on c001n03: -INFINITY
 pcmk__native_allocate: child_DoFencing:2 allocation score on c001n04: -INFINITY
 pcmk__native_allocate: child_DoFencing:2 allocation score on c001n05: -INFINITY
 pcmk__native_allocate: child_DoFencing:2 allocation score on c001n06: 1
 pcmk__native_allocate: child_DoFencing:2 allocation score on c001n07: -INFINITY
 pcmk__native_allocate: child_DoFencing:3 allocation score on c001n02: -INFINITY
 pcmk__native_allocate: child_DoFencing:3 allocation score on c001n03: -INFINITY
 pcmk__native_allocate: child_DoFencing:3 allocation score on c001n04: -INFINITY
 pcmk__native_allocate: child_DoFencing:3 allocation score on c001n05: -INFINITY
 pcmk__native_allocate: child_DoFencing:3 allocation score on c001n06: -INFINITY
 pcmk__native_allocate: child_DoFencing:3 allocation score on c001n07: -INFINITY
 pcmk__native_allocate: child_DoFencing:4 allocation score on c001n02: -INFINITY
 pcmk__native_allocate: child_DoFencing:4 allocation score on c001n03: -INFINITY
 pcmk__native_allocate: child_DoFencing:4 allocation score on c001n04: -INFINITY
 pcmk__native_allocate: child_DoFencing:4 allocation score on c001n05: -INFINITY
 pcmk__native_allocate: child_DoFencing:4 allocation score on c001n06: -INFINITY
 pcmk__native_allocate: child_DoFencing:4 allocation score on c001n07: -INFINITY
 pcmk__native_allocate: child_DoFencing:5 allocation score on c001n02: -INFINITY
 pcmk__native_allocate: child_DoFencing:5 allocation score on c001n03: -INFINITY
 pcmk__native_allocate: child_DoFencing:5 allocation score on c001n04: -INFINITY
 pcmk__native_allocate: child_DoFencing:5 allocation score on c001n05: -INFINITY
 pcmk__native_allocate: child_DoFencing:5 allocation score on c001n06: -INFINITY
 pcmk__native_allocate: child_DoFencing:5 allocation score on c001n07: -INFINITY
 pcmk__native_allocate: heartbeat_192.168.100.182 allocation score on c001n02: 0
 pcmk__native_allocate: heartbeat_192.168.100.182 allocation score on c001n03: -INFINITY
 pcmk__native_allocate: heartbeat_192.168.100.182 allocation score on c001n04: -INFINITY
 pcmk__native_allocate: heartbeat_192.168.100.182 allocation score on c001n05: -INFINITY
 pcmk__native_allocate: heartbeat_192.168.100.182 allocation score on c001n06: -INFINITY
 pcmk__native_allocate: heartbeat_192.168.100.182 allocation score on c001n07: -INFINITY
 pcmk__native_allocate: lsb_dummy allocation score on c001n02: 0
 pcmk__native_allocate: lsb_dummy allocation score on c001n03: 0
 pcmk__native_allocate: lsb_dummy allocation score on c001n04: 0
 pcmk__native_allocate: lsb_dummy allocation score on c001n05: 0
 pcmk__native_allocate: lsb_dummy allocation score on c001n06: 0
 pcmk__native_allocate: lsb_dummy allocation score on c001n07: 0
 pcmk__native_allocate: ocf_192.168.100.181 allocation score on c001n02: 0
 pcmk__native_allocate: ocf_192.168.100.181 allocation score on c001n03: -INFINITY
 pcmk__native_allocate: ocf_192.168.100.181 allocation score on c001n04: -INFINITY
 pcmk__native_allocate: ocf_192.168.100.181 allocation score on c001n05: -INFINITY
 pcmk__native_allocate: ocf_192.168.100.181 allocation score on c001n06: 0
 pcmk__native_allocate: ocf_192.168.100.181 allocation score on c001n07: 0
 pcmk__native_allocate: ocf_192.168.100.183 allocation score on c001n02: 0
 pcmk__native_allocate: ocf_192.168.100.183 allocation score on c001n03: -INFINITY
 pcmk__native_allocate: ocf_192.168.100.183 allocation score on c001n04: -INFINITY
 pcmk__native_allocate: ocf_192.168.100.183 allocation score on c001n05: -INFINITY
 pcmk__native_allocate: ocf_192.168.100.183 allocation score on c001n06: -INFINITY
 pcmk__native_allocate: ocf_192.168.100.183 allocation score on c001n07: -INFINITY
 pcmk__native_allocate: ocf_msdummy:0 allocation score on c001n02: 1001
 pcmk__native_allocate: ocf_msdummy:0 allocation score on c001n03: -INFINITY
 pcmk__native_allocate: ocf_msdummy:0 allocation score on c001n04: -INFINITY
 pcmk__native_allocate: ocf_msdummy:0 allocation score on c001n05: -INFINITY
 pcmk__native_allocate: ocf_msdummy:0 allocation score on c001n06: 0
 pcmk__native_allocate: ocf_msdummy:0 allocation score on c001n07: 0
 pcmk__native_allocate: ocf_msdummy:1 allocation score on c001n02: -INFINITY
 pcmk__native_allocate: ocf_msdummy:1 allocation score on c001n03: -INFINITY
 pcmk__native_allocate: ocf_msdummy:1 allocation score on c001n04: -INFINITY
 pcmk__native_allocate: ocf_msdummy:1 allocation score on c001n05: -INFINITY
 pcmk__native_allocate: ocf_msdummy:1 allocation score on c001n06: -INFINITY
 pcmk__native_allocate: ocf_msdummy:1 allocation score on c001n07: -INFINITY
-pcmk__native_allocate: ocf_msdummy:10 allocation score on c001n02: 0
+pcmk__native_allocate: ocf_msdummy:10 allocation score on c001n02: -INFINITY
 pcmk__native_allocate: ocf_msdummy:10 allocation score on c001n03: -INFINITY
 pcmk__native_allocate: ocf_msdummy:10 allocation score on c001n04: -INFINITY
 pcmk__native_allocate: ocf_msdummy:10 allocation score on c001n05: -INFINITY
 pcmk__native_allocate: ocf_msdummy:10 allocation score on c001n06: 1
 pcmk__native_allocate: ocf_msdummy:10 allocation score on c001n07: 0
-pcmk__native_allocate: ocf_msdummy:11 allocation score on c001n02: 0
+pcmk__native_allocate: ocf_msdummy:11 allocation score on c001n02: -INFINITY
 pcmk__native_allocate: ocf_msdummy:11 allocation score on c001n03: -INFINITY
 pcmk__native_allocate: ocf_msdummy:11 allocation score on c001n04: -INFINITY
 pcmk__native_allocate: ocf_msdummy:11 allocation score on c001n05: -INFINITY
-pcmk__native_allocate: ocf_msdummy:11 allocation score on c001n06: 0
+pcmk__native_allocate: ocf_msdummy:11 allocation score on c001n06: -INFINITY
 pcmk__native_allocate: ocf_msdummy:11 allocation score on c001n07: 1
 pcmk__native_allocate: ocf_msdummy:2 allocation score on c001n02: -INFINITY
 pcmk__native_allocate: ocf_msdummy:2 allocation score on c001n03: -INFINITY
 pcmk__native_allocate: ocf_msdummy:2 allocation score on c001n04: -INFINITY
 pcmk__native_allocate: ocf_msdummy:2 allocation score on c001n05: -INFINITY
 pcmk__native_allocate: ocf_msdummy:2 allocation score on c001n06: -INFINITY
 pcmk__native_allocate: ocf_msdummy:2 allocation score on c001n07: -INFINITY
 pcmk__native_allocate: ocf_msdummy:3 allocation score on c001n02: -INFINITY
 pcmk__native_allocate: ocf_msdummy:3 allocation score on c001n03: -INFINITY
 pcmk__native_allocate: ocf_msdummy:3 allocation score on c001n04: -INFINITY
 pcmk__native_allocate: ocf_msdummy:3 allocation score on c001n05: -INFINITY
 pcmk__native_allocate: ocf_msdummy:3 allocation score on c001n06: -INFINITY
 pcmk__native_allocate: ocf_msdummy:3 allocation score on c001n07: -INFINITY
 pcmk__native_allocate: ocf_msdummy:4 allocation score on c001n02: 1
 pcmk__native_allocate: ocf_msdummy:4 allocation score on c001n03: -INFINITY
 pcmk__native_allocate: ocf_msdummy:4 allocation score on c001n04: -INFINITY
 pcmk__native_allocate: ocf_msdummy:4 allocation score on c001n05: -INFINITY
 pcmk__native_allocate: ocf_msdummy:4 allocation score on c001n06: 0
 pcmk__native_allocate: ocf_msdummy:4 allocation score on c001n07: 0
 pcmk__native_allocate: ocf_msdummy:5 allocation score on c001n02: -INFINITY
 pcmk__native_allocate: ocf_msdummy:5 allocation score on c001n03: -INFINITY
 pcmk__native_allocate: ocf_msdummy:5 allocation score on c001n04: -INFINITY
 pcmk__native_allocate: ocf_msdummy:5 allocation score on c001n05: -INFINITY
 pcmk__native_allocate: ocf_msdummy:5 allocation score on c001n06: -INFINITY
 pcmk__native_allocate: ocf_msdummy:5 allocation score on c001n07: -INFINITY
 pcmk__native_allocate: ocf_msdummy:6 allocation score on c001n02: -INFINITY
 pcmk__native_allocate: ocf_msdummy:6 allocation score on c001n03: -INFINITY
 pcmk__native_allocate: ocf_msdummy:6 allocation score on c001n04: -INFINITY
 pcmk__native_allocate: ocf_msdummy:6 allocation score on c001n05: -INFINITY
 pcmk__native_allocate: ocf_msdummy:6 allocation score on c001n06: -INFINITY
 pcmk__native_allocate: ocf_msdummy:6 allocation score on c001n07: -INFINITY
 pcmk__native_allocate: ocf_msdummy:7 allocation score on c001n02: -INFINITY
 pcmk__native_allocate: ocf_msdummy:7 allocation score on c001n03: -INFINITY
 pcmk__native_allocate: ocf_msdummy:7 allocation score on c001n04: -INFINITY
 pcmk__native_allocate: ocf_msdummy:7 allocation score on c001n05: -INFINITY
 pcmk__native_allocate: ocf_msdummy:7 allocation score on c001n06: -INFINITY
 pcmk__native_allocate: ocf_msdummy:7 allocation score on c001n07: -INFINITY
 pcmk__native_allocate: ocf_msdummy:8 allocation score on c001n02: -INFINITY
 pcmk__native_allocate: ocf_msdummy:8 allocation score on c001n03: -INFINITY
 pcmk__native_allocate: ocf_msdummy:8 allocation score on c001n04: -INFINITY
 pcmk__native_allocate: ocf_msdummy:8 allocation score on c001n05: -INFINITY
 pcmk__native_allocate: ocf_msdummy:8 allocation score on c001n06: 1
 pcmk__native_allocate: ocf_msdummy:8 allocation score on c001n07: 0
 pcmk__native_allocate: ocf_msdummy:9 allocation score on c001n02: -INFINITY
 pcmk__native_allocate: ocf_msdummy:9 allocation score on c001n03: -INFINITY
 pcmk__native_allocate: ocf_msdummy:9 allocation score on c001n04: -INFINITY
 pcmk__native_allocate: ocf_msdummy:9 allocation score on c001n05: -INFINITY
-pcmk__native_allocate: ocf_msdummy:9 allocation score on c001n06: -INFINITY
+pcmk__native_allocate: ocf_msdummy:9 allocation score on c001n06: 0
 pcmk__native_allocate: ocf_msdummy:9 allocation score on c001n07: 1
 pcmk__native_allocate: rsc_c001n02 allocation score on c001n02: 100
 pcmk__native_allocate: rsc_c001n02 allocation score on c001n03: 0
 pcmk__native_allocate: rsc_c001n02 allocation score on c001n04: 0
 pcmk__native_allocate: rsc_c001n02 allocation score on c001n05: 0
 pcmk__native_allocate: rsc_c001n02 allocation score on c001n06: 0
 pcmk__native_allocate: rsc_c001n02 allocation score on c001n07: 0
 pcmk__native_allocate: rsc_c001n03 allocation score on c001n02: 0
 pcmk__native_allocate: rsc_c001n03 allocation score on c001n03: 100
 pcmk__native_allocate: rsc_c001n03 allocation score on c001n04: 0
 pcmk__native_allocate: rsc_c001n03 allocation score on c001n05: 0
 pcmk__native_allocate: rsc_c001n03 allocation score on c001n06: 0
 pcmk__native_allocate: rsc_c001n03 allocation score on c001n07: 0
 pcmk__native_allocate: rsc_c001n04 allocation score on c001n02: 0
 pcmk__native_allocate: rsc_c001n04 allocation score on c001n03: 0
 pcmk__native_allocate: rsc_c001n04 allocation score on c001n04: 100
 pcmk__native_allocate: rsc_c001n04 allocation score on c001n05: 0
 pcmk__native_allocate: rsc_c001n04 allocation score on c001n06: 0
 pcmk__native_allocate: rsc_c001n04 allocation score on c001n07: 0
 pcmk__native_allocate: rsc_c001n05 allocation score on c001n02: 0
 pcmk__native_allocate: rsc_c001n05 allocation score on c001n03: 0
 pcmk__native_allocate: rsc_c001n05 allocation score on c001n04: 0
 pcmk__native_allocate: rsc_c001n05 allocation score on c001n05: 100
 pcmk__native_allocate: rsc_c001n05 allocation score on c001n06: 0
 pcmk__native_allocate: rsc_c001n05 allocation score on c001n07: 0
 pcmk__native_allocate: rsc_c001n06 allocation score on c001n02: 0
 pcmk__native_allocate: rsc_c001n06 allocation score on c001n03: 0
 pcmk__native_allocate: rsc_c001n06 allocation score on c001n04: 0
 pcmk__native_allocate: rsc_c001n06 allocation score on c001n05: 0
 pcmk__native_allocate: rsc_c001n06 allocation score on c001n06: 100
 pcmk__native_allocate: rsc_c001n06 allocation score on c001n07: 0
 pcmk__native_allocate: rsc_c001n07 allocation score on c001n02: 0
 pcmk__native_allocate: rsc_c001n07 allocation score on c001n03: 0
 pcmk__native_allocate: rsc_c001n07 allocation score on c001n04: 0
 pcmk__native_allocate: rsc_c001n07 allocation score on c001n05: 0
 pcmk__native_allocate: rsc_c001n07 allocation score on c001n06: 0
 pcmk__native_allocate: rsc_c001n07 allocation score on c001n07: 100
diff --git a/cts/scheduler/scores/stonith-0.scores b/cts/scheduler/scores/stonith-0.scores
index 61eaca6d5e..47097e71d3 100644
--- a/cts/scheduler/scores/stonith-0.scores
+++ b/cts/scheduler/scores/stonith-0.scores
@@ -1,435 +1,435 @@
 
 ocf_msdummy:0 promotion score on c001n02: 1000
 ocf_msdummy:1 promotion score on c001n02: -1
 ocf_msdummy:10 promotion score on c001n04: -1
 ocf_msdummy:11 promotion score on c001n04: -1
 ocf_msdummy:12 promotion score on c001n06: -1
 ocf_msdummy:13 promotion score on c001n06: -1
 ocf_msdummy:2 promotion score on c001n07: -1
 ocf_msdummy:3 promotion score on c001n07: -1
 ocf_msdummy:4 promotion score on c001n08: -1
 ocf_msdummy:5 promotion score on c001n08: -1
 ocf_msdummy:6 promotion score on none: 0
 ocf_msdummy:7 promotion score on none: 0
 ocf_msdummy:8 promotion score on none: 0
 ocf_msdummy:9 promotion score on none: 0
 pcmk__clone_allocate: DoFencing allocation score on c001n02: 0
 pcmk__clone_allocate: DoFencing allocation score on c001n03: 0
 pcmk__clone_allocate: DoFencing allocation score on c001n04: 0
 pcmk__clone_allocate: DoFencing allocation score on c001n05: 0
 pcmk__clone_allocate: DoFencing allocation score on c001n06: 0
 pcmk__clone_allocate: DoFencing allocation score on c001n07: 0
 pcmk__clone_allocate: DoFencing allocation score on c001n08: 0
 pcmk__clone_allocate: child_DoFencing:0 allocation score on c001n02: 1
 pcmk__clone_allocate: child_DoFencing:0 allocation score on c001n03: 0
 pcmk__clone_allocate: child_DoFencing:0 allocation score on c001n04: 0
 pcmk__clone_allocate: child_DoFencing:0 allocation score on c001n05: 0
 pcmk__clone_allocate: child_DoFencing:0 allocation score on c001n06: 0
 pcmk__clone_allocate: child_DoFencing:0 allocation score on c001n07: 0
 pcmk__clone_allocate: child_DoFencing:0 allocation score on c001n08: 0
 pcmk__clone_allocate: child_DoFencing:1 allocation score on c001n02: 0
 pcmk__clone_allocate: child_DoFencing:1 allocation score on c001n03: 0
 pcmk__clone_allocate: child_DoFencing:1 allocation score on c001n04: 1
 pcmk__clone_allocate: child_DoFencing:1 allocation score on c001n05: 0
 pcmk__clone_allocate: child_DoFencing:1 allocation score on c001n06: 0
 pcmk__clone_allocate: child_DoFencing:1 allocation score on c001n07: 0
 pcmk__clone_allocate: child_DoFencing:1 allocation score on c001n08: 0
 pcmk__clone_allocate: child_DoFencing:2 allocation score on c001n02: 0
 pcmk__clone_allocate: child_DoFencing:2 allocation score on c001n03: 0
 pcmk__clone_allocate: child_DoFencing:2 allocation score on c001n04: 0
 pcmk__clone_allocate: child_DoFencing:2 allocation score on c001n05: 0
 pcmk__clone_allocate: child_DoFencing:2 allocation score on c001n06: 1
 pcmk__clone_allocate: child_DoFencing:2 allocation score on c001n07: 0
 pcmk__clone_allocate: child_DoFencing:2 allocation score on c001n08: 0
 pcmk__clone_allocate: child_DoFencing:3 allocation score on c001n02: 0
 pcmk__clone_allocate: child_DoFencing:3 allocation score on c001n03: 0
 pcmk__clone_allocate: child_DoFencing:3 allocation score on c001n04: 0
 pcmk__clone_allocate: child_DoFencing:3 allocation score on c001n05: 0
 pcmk__clone_allocate: child_DoFencing:3 allocation score on c001n06: 0
 pcmk__clone_allocate: child_DoFencing:3 allocation score on c001n07: 1
 pcmk__clone_allocate: child_DoFencing:3 allocation score on c001n08: 0
 pcmk__clone_allocate: child_DoFencing:4 allocation score on c001n02: 0
 pcmk__clone_allocate: child_DoFencing:4 allocation score on c001n03: 0
 pcmk__clone_allocate: child_DoFencing:4 allocation score on c001n04: 0
 pcmk__clone_allocate: child_DoFencing:4 allocation score on c001n05: 0
 pcmk__clone_allocate: child_DoFencing:4 allocation score on c001n06: 0
 pcmk__clone_allocate: child_DoFencing:4 allocation score on c001n07: 0
 pcmk__clone_allocate: child_DoFencing:4 allocation score on c001n08: 1
 pcmk__clone_allocate: child_DoFencing:5 allocation score on c001n02: 0
 pcmk__clone_allocate: child_DoFencing:5 allocation score on c001n03: 0
 pcmk__clone_allocate: child_DoFencing:5 allocation score on c001n04: 0
 pcmk__clone_allocate: child_DoFencing:5 allocation score on c001n05: 0
 pcmk__clone_allocate: child_DoFencing:5 allocation score on c001n06: 0
 pcmk__clone_allocate: child_DoFencing:5 allocation score on c001n07: 0
 pcmk__clone_allocate: child_DoFencing:5 allocation score on c001n08: 0
 pcmk__clone_allocate: child_DoFencing:6 allocation score on c001n02: 0
 pcmk__clone_allocate: child_DoFencing:6 allocation score on c001n03: 0
 pcmk__clone_allocate: child_DoFencing:6 allocation score on c001n04: 0
 pcmk__clone_allocate: child_DoFencing:6 allocation score on c001n05: 0
 pcmk__clone_allocate: child_DoFencing:6 allocation score on c001n06: 0
 pcmk__clone_allocate: child_DoFencing:6 allocation score on c001n07: 0
 pcmk__clone_allocate: child_DoFencing:6 allocation score on c001n08: 0
 pcmk__clone_allocate: master_rsc_1 allocation score on c001n02: 0
 pcmk__clone_allocate: master_rsc_1 allocation score on c001n03: 0
 pcmk__clone_allocate: master_rsc_1 allocation score on c001n04: 0
 pcmk__clone_allocate: master_rsc_1 allocation score on c001n05: 0
 pcmk__clone_allocate: master_rsc_1 allocation score on c001n06: 0
 pcmk__clone_allocate: master_rsc_1 allocation score on c001n07: 0
 pcmk__clone_allocate: master_rsc_1 allocation score on c001n08: 0
 pcmk__clone_allocate: ocf_msdummy:0 allocation score on c001n02: 1001
 pcmk__clone_allocate: ocf_msdummy:0 allocation score on c001n03: 0
 pcmk__clone_allocate: ocf_msdummy:0 allocation score on c001n04: 0
 pcmk__clone_allocate: ocf_msdummy:0 allocation score on c001n05: 0
 pcmk__clone_allocate: ocf_msdummy:0 allocation score on c001n06: 0
 pcmk__clone_allocate: ocf_msdummy:0 allocation score on c001n07: 0
 pcmk__clone_allocate: ocf_msdummy:0 allocation score on c001n08: 0
 pcmk__clone_allocate: ocf_msdummy:1 allocation score on c001n02: 1
 pcmk__clone_allocate: ocf_msdummy:1 allocation score on c001n03: 0
 pcmk__clone_allocate: ocf_msdummy:1 allocation score on c001n04: 0
 pcmk__clone_allocate: ocf_msdummy:1 allocation score on c001n05: 0
 pcmk__clone_allocate: ocf_msdummy:1 allocation score on c001n06: 0
 pcmk__clone_allocate: ocf_msdummy:1 allocation score on c001n07: 0
 pcmk__clone_allocate: ocf_msdummy:1 allocation score on c001n08: 0
 pcmk__clone_allocate: ocf_msdummy:10 allocation score on c001n02: 0
 pcmk__clone_allocate: ocf_msdummy:10 allocation score on c001n03: 0
 pcmk__clone_allocate: ocf_msdummy:10 allocation score on c001n04: 1
 pcmk__clone_allocate: ocf_msdummy:10 allocation score on c001n05: 0
 pcmk__clone_allocate: ocf_msdummy:10 allocation score on c001n06: 0
 pcmk__clone_allocate: ocf_msdummy:10 allocation score on c001n07: 0
 pcmk__clone_allocate: ocf_msdummy:10 allocation score on c001n08: 0
 pcmk__clone_allocate: ocf_msdummy:11 allocation score on c001n02: 0
 pcmk__clone_allocate: ocf_msdummy:11 allocation score on c001n03: 0
 pcmk__clone_allocate: ocf_msdummy:11 allocation score on c001n04: 1
 pcmk__clone_allocate: ocf_msdummy:11 allocation score on c001n05: 0
 pcmk__clone_allocate: ocf_msdummy:11 allocation score on c001n06: 0
 pcmk__clone_allocate: ocf_msdummy:11 allocation score on c001n07: 0
 pcmk__clone_allocate: ocf_msdummy:11 allocation score on c001n08: 0
 pcmk__clone_allocate: ocf_msdummy:12 allocation score on c001n02: 0
 pcmk__clone_allocate: ocf_msdummy:12 allocation score on c001n03: 0
 pcmk__clone_allocate: ocf_msdummy:12 allocation score on c001n04: 0
 pcmk__clone_allocate: ocf_msdummy:12 allocation score on c001n05: 0
 pcmk__clone_allocate: ocf_msdummy:12 allocation score on c001n06: 1
 pcmk__clone_allocate: ocf_msdummy:12 allocation score on c001n07: 0
 pcmk__clone_allocate: ocf_msdummy:12 allocation score on c001n08: 0
 pcmk__clone_allocate: ocf_msdummy:13 allocation score on c001n02: 0
 pcmk__clone_allocate: ocf_msdummy:13 allocation score on c001n03: 0
 pcmk__clone_allocate: ocf_msdummy:13 allocation score on c001n04: 0
 pcmk__clone_allocate: ocf_msdummy:13 allocation score on c001n05: 0
 pcmk__clone_allocate: ocf_msdummy:13 allocation score on c001n06: 1
 pcmk__clone_allocate: ocf_msdummy:13 allocation score on c001n07: 0
 pcmk__clone_allocate: ocf_msdummy:13 allocation score on c001n08: 0
 pcmk__clone_allocate: ocf_msdummy:2 allocation score on c001n02: 0
 pcmk__clone_allocate: ocf_msdummy:2 allocation score on c001n03: 0
 pcmk__clone_allocate: ocf_msdummy:2 allocation score on c001n04: 0
 pcmk__clone_allocate: ocf_msdummy:2 allocation score on c001n05: 0
 pcmk__clone_allocate: ocf_msdummy:2 allocation score on c001n06: 0
 pcmk__clone_allocate: ocf_msdummy:2 allocation score on c001n07: 1
 pcmk__clone_allocate: ocf_msdummy:2 allocation score on c001n08: 0
 pcmk__clone_allocate: ocf_msdummy:3 allocation score on c001n02: 0
 pcmk__clone_allocate: ocf_msdummy:3 allocation score on c001n03: 0
 pcmk__clone_allocate: ocf_msdummy:3 allocation score on c001n04: 0
 pcmk__clone_allocate: ocf_msdummy:3 allocation score on c001n05: 0
 pcmk__clone_allocate: ocf_msdummy:3 allocation score on c001n06: 0
 pcmk__clone_allocate: ocf_msdummy:3 allocation score on c001n07: 1
 pcmk__clone_allocate: ocf_msdummy:3 allocation score on c001n08: 0
 pcmk__clone_allocate: ocf_msdummy:4 allocation score on c001n02: 0
 pcmk__clone_allocate: ocf_msdummy:4 allocation score on c001n03: 0
 pcmk__clone_allocate: ocf_msdummy:4 allocation score on c001n04: 0
 pcmk__clone_allocate: ocf_msdummy:4 allocation score on c001n05: 0
 pcmk__clone_allocate: ocf_msdummy:4 allocation score on c001n06: 0
 pcmk__clone_allocate: ocf_msdummy:4 allocation score on c001n07: 0
 pcmk__clone_allocate: ocf_msdummy:4 allocation score on c001n08: 1
 pcmk__clone_allocate: ocf_msdummy:5 allocation score on c001n02: 0
 pcmk__clone_allocate: ocf_msdummy:5 allocation score on c001n03: 0
 pcmk__clone_allocate: ocf_msdummy:5 allocation score on c001n04: 0
 pcmk__clone_allocate: ocf_msdummy:5 allocation score on c001n05: 0
 pcmk__clone_allocate: ocf_msdummy:5 allocation score on c001n06: 0
 pcmk__clone_allocate: ocf_msdummy:5 allocation score on c001n07: 0
 pcmk__clone_allocate: ocf_msdummy:5 allocation score on c001n08: 1
 pcmk__clone_allocate: ocf_msdummy:6 allocation score on c001n02: 0
 pcmk__clone_allocate: ocf_msdummy:6 allocation score on c001n03: 0
 pcmk__clone_allocate: ocf_msdummy:6 allocation score on c001n04: 0
 pcmk__clone_allocate: ocf_msdummy:6 allocation score on c001n05: 0
 pcmk__clone_allocate: ocf_msdummy:6 allocation score on c001n06: 0
 pcmk__clone_allocate: ocf_msdummy:6 allocation score on c001n07: 0
 pcmk__clone_allocate: ocf_msdummy:6 allocation score on c001n08: 0
 pcmk__clone_allocate: ocf_msdummy:7 allocation score on c001n02: 0
 pcmk__clone_allocate: ocf_msdummy:7 allocation score on c001n03: 0
 pcmk__clone_allocate: ocf_msdummy:7 allocation score on c001n04: 0
 pcmk__clone_allocate: ocf_msdummy:7 allocation score on c001n05: 0
 pcmk__clone_allocate: ocf_msdummy:7 allocation score on c001n06: 0
 pcmk__clone_allocate: ocf_msdummy:7 allocation score on c001n07: 0
 pcmk__clone_allocate: ocf_msdummy:7 allocation score on c001n08: 0
 pcmk__clone_allocate: ocf_msdummy:8 allocation score on c001n02: 0
 pcmk__clone_allocate: ocf_msdummy:8 allocation score on c001n03: 0
 pcmk__clone_allocate: ocf_msdummy:8 allocation score on c001n04: 0
 pcmk__clone_allocate: ocf_msdummy:8 allocation score on c001n05: 0
 pcmk__clone_allocate: ocf_msdummy:8 allocation score on c001n06: 0
 pcmk__clone_allocate: ocf_msdummy:8 allocation score on c001n07: 0
 pcmk__clone_allocate: ocf_msdummy:8 allocation score on c001n08: 0
 pcmk__clone_allocate: ocf_msdummy:9 allocation score on c001n02: 0
 pcmk__clone_allocate: ocf_msdummy:9 allocation score on c001n03: 0
 pcmk__clone_allocate: ocf_msdummy:9 allocation score on c001n04: 0
 pcmk__clone_allocate: ocf_msdummy:9 allocation score on c001n05: 0
 pcmk__clone_allocate: ocf_msdummy:9 allocation score on c001n06: 0
 pcmk__clone_allocate: ocf_msdummy:9 allocation score on c001n07: 0
 pcmk__clone_allocate: ocf_msdummy:9 allocation score on c001n08: 0
 pcmk__group_allocate: group-1 allocation score on c001n02: 0
 pcmk__group_allocate: group-1 allocation score on c001n03: 0
 pcmk__group_allocate: group-1 allocation score on c001n04: 0
 pcmk__group_allocate: group-1 allocation score on c001n05: 0
 pcmk__group_allocate: group-1 allocation score on c001n06: 0
 pcmk__group_allocate: group-1 allocation score on c001n07: 0
 pcmk__group_allocate: group-1 allocation score on c001n08: 0
 pcmk__group_allocate: heartbeat_192.168.100.182 allocation score on c001n02: 0
 pcmk__group_allocate: heartbeat_192.168.100.182 allocation score on c001n03: 0
 pcmk__group_allocate: heartbeat_192.168.100.182 allocation score on c001n04: 0
 pcmk__group_allocate: heartbeat_192.168.100.182 allocation score on c001n05: 0
 pcmk__group_allocate: heartbeat_192.168.100.182 allocation score on c001n06: 0
 pcmk__group_allocate: heartbeat_192.168.100.182 allocation score on c001n07: 0
 pcmk__group_allocate: heartbeat_192.168.100.182 allocation score on c001n08: 0
 pcmk__group_allocate: ocf_192.168.100.181 allocation score on c001n02: 0
 pcmk__group_allocate: ocf_192.168.100.181 allocation score on c001n03: 0
 pcmk__group_allocate: ocf_192.168.100.181 allocation score on c001n04: 0
 pcmk__group_allocate: ocf_192.168.100.181 allocation score on c001n05: 0
 pcmk__group_allocate: ocf_192.168.100.181 allocation score on c001n06: 0
 pcmk__group_allocate: ocf_192.168.100.181 allocation score on c001n07: 0
 pcmk__group_allocate: ocf_192.168.100.181 allocation score on c001n08: 0
 pcmk__group_allocate: ocf_192.168.100.183 allocation score on c001n02: 0
 pcmk__group_allocate: ocf_192.168.100.183 allocation score on c001n03: -INFINITY
 pcmk__group_allocate: ocf_192.168.100.183 allocation score on c001n04: 0
 pcmk__group_allocate: ocf_192.168.100.183 allocation score on c001n05: -INFINITY
 pcmk__group_allocate: ocf_192.168.100.183 allocation score on c001n06: 0
 pcmk__group_allocate: ocf_192.168.100.183 allocation score on c001n07: 0
 pcmk__group_allocate: ocf_192.168.100.183 allocation score on c001n08: 0
 pcmk__native_allocate: DcIPaddr allocation score on c001n02: -INFINITY
 pcmk__native_allocate: DcIPaddr allocation score on c001n03: 0
 pcmk__native_allocate: DcIPaddr allocation score on c001n04: -INFINITY
 pcmk__native_allocate: DcIPaddr allocation score on c001n05: -INFINITY
 pcmk__native_allocate: DcIPaddr allocation score on c001n06: -INFINITY
 pcmk__native_allocate: DcIPaddr allocation score on c001n07: -INFINITY
 pcmk__native_allocate: DcIPaddr allocation score on c001n08: -INFINITY
 pcmk__native_allocate: child_DoFencing:0 allocation score on c001n02: 1
 pcmk__native_allocate: child_DoFencing:0 allocation score on c001n03: -INFINITY
 pcmk__native_allocate: child_DoFencing:0 allocation score on c001n04: 0
 pcmk__native_allocate: child_DoFencing:0 allocation score on c001n05: -INFINITY
 pcmk__native_allocate: child_DoFencing:0 allocation score on c001n06: 0
 pcmk__native_allocate: child_DoFencing:0 allocation score on c001n07: 0
 pcmk__native_allocate: child_DoFencing:0 allocation score on c001n08: 0
 pcmk__native_allocate: child_DoFencing:1 allocation score on c001n02: -INFINITY
 pcmk__native_allocate: child_DoFencing:1 allocation score on c001n03: -INFINITY
 pcmk__native_allocate: child_DoFencing:1 allocation score on c001n04: 1
 pcmk__native_allocate: child_DoFencing:1 allocation score on c001n05: -INFINITY
 pcmk__native_allocate: child_DoFencing:1 allocation score on c001n06: 0
 pcmk__native_allocate: child_DoFencing:1 allocation score on c001n07: 0
 pcmk__native_allocate: child_DoFencing:1 allocation score on c001n08: 0
 pcmk__native_allocate: child_DoFencing:2 allocation score on c001n02: -INFINITY
 pcmk__native_allocate: child_DoFencing:2 allocation score on c001n03: -INFINITY
 pcmk__native_allocate: child_DoFencing:2 allocation score on c001n04: -INFINITY
 pcmk__native_allocate: child_DoFencing:2 allocation score on c001n05: -INFINITY
 pcmk__native_allocate: child_DoFencing:2 allocation score on c001n06: 1
 pcmk__native_allocate: child_DoFencing:2 allocation score on c001n07: 0
 pcmk__native_allocate: child_DoFencing:2 allocation score on c001n08: 0
 pcmk__native_allocate: child_DoFencing:3 allocation score on c001n02: -INFINITY
 pcmk__native_allocate: child_DoFencing:3 allocation score on c001n03: -INFINITY
 pcmk__native_allocate: child_DoFencing:3 allocation score on c001n04: -INFINITY
 pcmk__native_allocate: child_DoFencing:3 allocation score on c001n05: -INFINITY
 pcmk__native_allocate: child_DoFencing:3 allocation score on c001n06: -INFINITY
 pcmk__native_allocate: child_DoFencing:3 allocation score on c001n07: 1
 pcmk__native_allocate: child_DoFencing:3 allocation score on c001n08: 0
 pcmk__native_allocate: child_DoFencing:4 allocation score on c001n02: -INFINITY
 pcmk__native_allocate: child_DoFencing:4 allocation score on c001n03: -INFINITY
 pcmk__native_allocate: child_DoFencing:4 allocation score on c001n04: -INFINITY
 pcmk__native_allocate: child_DoFencing:4 allocation score on c001n05: -INFINITY
 pcmk__native_allocate: child_DoFencing:4 allocation score on c001n06: -INFINITY
 pcmk__native_allocate: child_DoFencing:4 allocation score on c001n07: -INFINITY
 pcmk__native_allocate: child_DoFencing:4 allocation score on c001n08: 1
 pcmk__native_allocate: child_DoFencing:5 allocation score on c001n02: -INFINITY
 pcmk__native_allocate: child_DoFencing:5 allocation score on c001n03: -INFINITY
 pcmk__native_allocate: child_DoFencing:5 allocation score on c001n04: -INFINITY
 pcmk__native_allocate: child_DoFencing:5 allocation score on c001n05: -INFINITY
 pcmk__native_allocate: child_DoFencing:5 allocation score on c001n06: -INFINITY
 pcmk__native_allocate: child_DoFencing:5 allocation score on c001n07: -INFINITY
 pcmk__native_allocate: child_DoFencing:5 allocation score on c001n08: -INFINITY
 pcmk__native_allocate: child_DoFencing:6 allocation score on c001n02: -INFINITY
 pcmk__native_allocate: child_DoFencing:6 allocation score on c001n03: -INFINITY
 pcmk__native_allocate: child_DoFencing:6 allocation score on c001n04: -INFINITY
 pcmk__native_allocate: child_DoFencing:6 allocation score on c001n05: -INFINITY
 pcmk__native_allocate: child_DoFencing:6 allocation score on c001n06: -INFINITY
 pcmk__native_allocate: child_DoFencing:6 allocation score on c001n07: -INFINITY
 pcmk__native_allocate: child_DoFencing:6 allocation score on c001n08: -INFINITY
 pcmk__native_allocate: heartbeat_192.168.100.182 allocation score on c001n02: 0
 pcmk__native_allocate: heartbeat_192.168.100.182 allocation score on c001n03: -INFINITY
 pcmk__native_allocate: heartbeat_192.168.100.182 allocation score on c001n04: -INFINITY
 pcmk__native_allocate: heartbeat_192.168.100.182 allocation score on c001n05: -INFINITY
 pcmk__native_allocate: heartbeat_192.168.100.182 allocation score on c001n06: -INFINITY
 pcmk__native_allocate: heartbeat_192.168.100.182 allocation score on c001n07: -INFINITY
 pcmk__native_allocate: heartbeat_192.168.100.182 allocation score on c001n08: -INFINITY
 pcmk__native_allocate: lsb_dummy allocation score on c001n02: 0
 pcmk__native_allocate: lsb_dummy allocation score on c001n03: 0
 pcmk__native_allocate: lsb_dummy allocation score on c001n04: 0
 pcmk__native_allocate: lsb_dummy allocation score on c001n05: 0
 pcmk__native_allocate: lsb_dummy allocation score on c001n06: 0
 pcmk__native_allocate: lsb_dummy allocation score on c001n07: 0
 pcmk__native_allocate: lsb_dummy allocation score on c001n08: 0
 pcmk__native_allocate: ocf_192.168.100.181 allocation score on c001n02: 0
 pcmk__native_allocate: ocf_192.168.100.181 allocation score on c001n03: -INFINITY
 pcmk__native_allocate: ocf_192.168.100.181 allocation score on c001n04: 0
 pcmk__native_allocate: ocf_192.168.100.181 allocation score on c001n05: -INFINITY
 pcmk__native_allocate: ocf_192.168.100.181 allocation score on c001n06: 0
 pcmk__native_allocate: ocf_192.168.100.181 allocation score on c001n07: 0
 pcmk__native_allocate: ocf_192.168.100.181 allocation score on c001n08: 0
 pcmk__native_allocate: ocf_192.168.100.183 allocation score on c001n02: 0
 pcmk__native_allocate: ocf_192.168.100.183 allocation score on c001n03: -INFINITY
 pcmk__native_allocate: ocf_192.168.100.183 allocation score on c001n04: -INFINITY
 pcmk__native_allocate: ocf_192.168.100.183 allocation score on c001n05: -INFINITY
 pcmk__native_allocate: ocf_192.168.100.183 allocation score on c001n06: -INFINITY
 pcmk__native_allocate: ocf_192.168.100.183 allocation score on c001n07: -INFINITY
 pcmk__native_allocate: ocf_192.168.100.183 allocation score on c001n08: -INFINITY
 pcmk__native_allocate: ocf_msdummy:0 allocation score on c001n02: 1001
 pcmk__native_allocate: ocf_msdummy:0 allocation score on c001n03: -INFINITY
 pcmk__native_allocate: ocf_msdummy:0 allocation score on c001n04: 0
 pcmk__native_allocate: ocf_msdummy:0 allocation score on c001n05: -INFINITY
 pcmk__native_allocate: ocf_msdummy:0 allocation score on c001n06: 0
 pcmk__native_allocate: ocf_msdummy:0 allocation score on c001n07: 0
 pcmk__native_allocate: ocf_msdummy:0 allocation score on c001n08: 0
 pcmk__native_allocate: ocf_msdummy:1 allocation score on c001n02: 1
 pcmk__native_allocate: ocf_msdummy:1 allocation score on c001n03: -INFINITY
 pcmk__native_allocate: ocf_msdummy:1 allocation score on c001n04: 0
 pcmk__native_allocate: ocf_msdummy:1 allocation score on c001n05: -INFINITY
 pcmk__native_allocate: ocf_msdummy:1 allocation score on c001n06: 0
 pcmk__native_allocate: ocf_msdummy:1 allocation score on c001n07: 0
 pcmk__native_allocate: ocf_msdummy:1 allocation score on c001n08: 0
 pcmk__native_allocate: ocf_msdummy:10 allocation score on c001n02: -INFINITY
 pcmk__native_allocate: ocf_msdummy:10 allocation score on c001n03: -INFINITY
 pcmk__native_allocate: ocf_msdummy:10 allocation score on c001n04: 1
 pcmk__native_allocate: ocf_msdummy:10 allocation score on c001n05: -INFINITY
 pcmk__native_allocate: ocf_msdummy:10 allocation score on c001n06: 0
-pcmk__native_allocate: ocf_msdummy:10 allocation score on c001n07: 0
-pcmk__native_allocate: ocf_msdummy:10 allocation score on c001n08: 0
+pcmk__native_allocate: ocf_msdummy:10 allocation score on c001n07: -INFINITY
+pcmk__native_allocate: ocf_msdummy:10 allocation score on c001n08: -INFINITY
 pcmk__native_allocate: ocf_msdummy:11 allocation score on c001n02: -INFINITY
 pcmk__native_allocate: ocf_msdummy:11 allocation score on c001n03: -INFINITY
 pcmk__native_allocate: ocf_msdummy:11 allocation score on c001n04: 1
 pcmk__native_allocate: ocf_msdummy:11 allocation score on c001n05: -INFINITY
 pcmk__native_allocate: ocf_msdummy:11 allocation score on c001n06: 0
-pcmk__native_allocate: ocf_msdummy:11 allocation score on c001n07: 0
-pcmk__native_allocate: ocf_msdummy:11 allocation score on c001n08: 0
+pcmk__native_allocate: ocf_msdummy:11 allocation score on c001n07: -INFINITY
+pcmk__native_allocate: ocf_msdummy:11 allocation score on c001n08: -INFINITY
 pcmk__native_allocate: ocf_msdummy:12 allocation score on c001n02: -INFINITY
 pcmk__native_allocate: ocf_msdummy:12 allocation score on c001n03: -INFINITY
 pcmk__native_allocate: ocf_msdummy:12 allocation score on c001n04: -INFINITY
 pcmk__native_allocate: ocf_msdummy:12 allocation score on c001n05: -INFINITY
 pcmk__native_allocate: ocf_msdummy:12 allocation score on c001n06: 1
-pcmk__native_allocate: ocf_msdummy:12 allocation score on c001n07: 0
-pcmk__native_allocate: ocf_msdummy:12 allocation score on c001n08: 0
+pcmk__native_allocate: ocf_msdummy:12 allocation score on c001n07: -INFINITY
+pcmk__native_allocate: ocf_msdummy:12 allocation score on c001n08: -INFINITY
 pcmk__native_allocate: ocf_msdummy:13 allocation score on c001n02: -INFINITY
 pcmk__native_allocate: ocf_msdummy:13 allocation score on c001n03: -INFINITY
 pcmk__native_allocate: ocf_msdummy:13 allocation score on c001n04: -INFINITY
 pcmk__native_allocate: ocf_msdummy:13 allocation score on c001n05: -INFINITY
 pcmk__native_allocate: ocf_msdummy:13 allocation score on c001n06: 1
-pcmk__native_allocate: ocf_msdummy:13 allocation score on c001n07: 0
-pcmk__native_allocate: ocf_msdummy:13 allocation score on c001n08: 0
+pcmk__native_allocate: ocf_msdummy:13 allocation score on c001n07: -INFINITY
+pcmk__native_allocate: ocf_msdummy:13 allocation score on c001n08: -INFINITY
 pcmk__native_allocate: ocf_msdummy:2 allocation score on c001n02: -INFINITY
 pcmk__native_allocate: ocf_msdummy:2 allocation score on c001n03: -INFINITY
-pcmk__native_allocate: ocf_msdummy:2 allocation score on c001n04: -INFINITY
+pcmk__native_allocate: ocf_msdummy:2 allocation score on c001n04: 0
 pcmk__native_allocate: ocf_msdummy:2 allocation score on c001n05: -INFINITY
-pcmk__native_allocate: ocf_msdummy:2 allocation score on c001n06: -INFINITY
+pcmk__native_allocate: ocf_msdummy:2 allocation score on c001n06: 0
 pcmk__native_allocate: ocf_msdummy:2 allocation score on c001n07: 1
 pcmk__native_allocate: ocf_msdummy:2 allocation score on c001n08: 0
 pcmk__native_allocate: ocf_msdummy:3 allocation score on c001n02: -INFINITY
 pcmk__native_allocate: ocf_msdummy:3 allocation score on c001n03: -INFINITY
-pcmk__native_allocate: ocf_msdummy:3 allocation score on c001n04: -INFINITY
+pcmk__native_allocate: ocf_msdummy:3 allocation score on c001n04: 0
 pcmk__native_allocate: ocf_msdummy:3 allocation score on c001n05: -INFINITY
-pcmk__native_allocate: ocf_msdummy:3 allocation score on c001n06: -INFINITY
+pcmk__native_allocate: ocf_msdummy:3 allocation score on c001n06: 0
 pcmk__native_allocate: ocf_msdummy:3 allocation score on c001n07: 1
 pcmk__native_allocate: ocf_msdummy:3 allocation score on c001n08: 0
 pcmk__native_allocate: ocf_msdummy:4 allocation score on c001n02: -INFINITY
 pcmk__native_allocate: ocf_msdummy:4 allocation score on c001n03: -INFINITY
-pcmk__native_allocate: ocf_msdummy:4 allocation score on c001n04: -INFINITY
+pcmk__native_allocate: ocf_msdummy:4 allocation score on c001n04: 0
 pcmk__native_allocate: ocf_msdummy:4 allocation score on c001n05: -INFINITY
-pcmk__native_allocate: ocf_msdummy:4 allocation score on c001n06: -INFINITY
+pcmk__native_allocate: ocf_msdummy:4 allocation score on c001n06: 0
 pcmk__native_allocate: ocf_msdummy:4 allocation score on c001n07: -INFINITY
 pcmk__native_allocate: ocf_msdummy:4 allocation score on c001n08: 1
 pcmk__native_allocate: ocf_msdummy:5 allocation score on c001n02: -INFINITY
 pcmk__native_allocate: ocf_msdummy:5 allocation score on c001n03: -INFINITY
-pcmk__native_allocate: ocf_msdummy:5 allocation score on c001n04: -INFINITY
+pcmk__native_allocate: ocf_msdummy:5 allocation score on c001n04: 0
 pcmk__native_allocate: ocf_msdummy:5 allocation score on c001n05: -INFINITY
-pcmk__native_allocate: ocf_msdummy:5 allocation score on c001n06: -INFINITY
+pcmk__native_allocate: ocf_msdummy:5 allocation score on c001n06: 0
 pcmk__native_allocate: ocf_msdummy:5 allocation score on c001n07: -INFINITY
 pcmk__native_allocate: ocf_msdummy:5 allocation score on c001n08: 1
 pcmk__native_allocate: ocf_msdummy:6 allocation score on c001n02: -INFINITY
 pcmk__native_allocate: ocf_msdummy:6 allocation score on c001n03: -INFINITY
 pcmk__native_allocate: ocf_msdummy:6 allocation score on c001n04: -INFINITY
 pcmk__native_allocate: ocf_msdummy:6 allocation score on c001n05: -INFINITY
 pcmk__native_allocate: ocf_msdummy:6 allocation score on c001n06: -INFINITY
 pcmk__native_allocate: ocf_msdummy:6 allocation score on c001n07: -INFINITY
 pcmk__native_allocate: ocf_msdummy:6 allocation score on c001n08: -INFINITY
 pcmk__native_allocate: ocf_msdummy:7 allocation score on c001n02: -INFINITY
 pcmk__native_allocate: ocf_msdummy:7 allocation score on c001n03: -INFINITY
 pcmk__native_allocate: ocf_msdummy:7 allocation score on c001n04: -INFINITY
 pcmk__native_allocate: ocf_msdummy:7 allocation score on c001n05: -INFINITY
 pcmk__native_allocate: ocf_msdummy:7 allocation score on c001n06: -INFINITY
 pcmk__native_allocate: ocf_msdummy:7 allocation score on c001n07: -INFINITY
 pcmk__native_allocate: ocf_msdummy:7 allocation score on c001n08: -INFINITY
 pcmk__native_allocate: ocf_msdummy:8 allocation score on c001n02: -INFINITY
 pcmk__native_allocate: ocf_msdummy:8 allocation score on c001n03: -INFINITY
 pcmk__native_allocate: ocf_msdummy:8 allocation score on c001n04: -INFINITY
 pcmk__native_allocate: ocf_msdummy:8 allocation score on c001n05: -INFINITY
 pcmk__native_allocate: ocf_msdummy:8 allocation score on c001n06: -INFINITY
 pcmk__native_allocate: ocf_msdummy:8 allocation score on c001n07: -INFINITY
 pcmk__native_allocate: ocf_msdummy:8 allocation score on c001n08: -INFINITY
 pcmk__native_allocate: ocf_msdummy:9 allocation score on c001n02: -INFINITY
 pcmk__native_allocate: ocf_msdummy:9 allocation score on c001n03: -INFINITY
 pcmk__native_allocate: ocf_msdummy:9 allocation score on c001n04: -INFINITY
 pcmk__native_allocate: ocf_msdummy:9 allocation score on c001n05: -INFINITY
 pcmk__native_allocate: ocf_msdummy:9 allocation score on c001n06: -INFINITY
 pcmk__native_allocate: ocf_msdummy:9 allocation score on c001n07: -INFINITY
 pcmk__native_allocate: ocf_msdummy:9 allocation score on c001n08: -INFINITY
 pcmk__native_allocate: rsc_c001n02 allocation score on c001n02: 100
 pcmk__native_allocate: rsc_c001n02 allocation score on c001n03: 0
 pcmk__native_allocate: rsc_c001n02 allocation score on c001n04: 0
 pcmk__native_allocate: rsc_c001n02 allocation score on c001n05: 0
 pcmk__native_allocate: rsc_c001n02 allocation score on c001n06: 0
 pcmk__native_allocate: rsc_c001n02 allocation score on c001n07: 0
 pcmk__native_allocate: rsc_c001n02 allocation score on c001n08: 0
 pcmk__native_allocate: rsc_c001n03 allocation score on c001n02: 0
 pcmk__native_allocate: rsc_c001n03 allocation score on c001n03: 100
 pcmk__native_allocate: rsc_c001n03 allocation score on c001n04: 0
 pcmk__native_allocate: rsc_c001n03 allocation score on c001n05: 0
 pcmk__native_allocate: rsc_c001n03 allocation score on c001n06: 0
 pcmk__native_allocate: rsc_c001n03 allocation score on c001n07: 0
 pcmk__native_allocate: rsc_c001n03 allocation score on c001n08: 0
 pcmk__native_allocate: rsc_c001n04 allocation score on c001n02: 0
 pcmk__native_allocate: rsc_c001n04 allocation score on c001n03: 0
 pcmk__native_allocate: rsc_c001n04 allocation score on c001n04: 100
 pcmk__native_allocate: rsc_c001n04 allocation score on c001n05: 0
 pcmk__native_allocate: rsc_c001n04 allocation score on c001n06: 0
 pcmk__native_allocate: rsc_c001n04 allocation score on c001n07: 0
 pcmk__native_allocate: rsc_c001n04 allocation score on c001n08: 0
 pcmk__native_allocate: rsc_c001n05 allocation score on c001n02: 0
 pcmk__native_allocate: rsc_c001n05 allocation score on c001n03: 0
 pcmk__native_allocate: rsc_c001n05 allocation score on c001n04: 0
 pcmk__native_allocate: rsc_c001n05 allocation score on c001n05: -INFINITY
 pcmk__native_allocate: rsc_c001n05 allocation score on c001n06: 0
 pcmk__native_allocate: rsc_c001n05 allocation score on c001n07: 0
 pcmk__native_allocate: rsc_c001n05 allocation score on c001n08: 0
 pcmk__native_allocate: rsc_c001n06 allocation score on c001n02: 0
 pcmk__native_allocate: rsc_c001n06 allocation score on c001n03: 0
 pcmk__native_allocate: rsc_c001n06 allocation score on c001n04: 0
 pcmk__native_allocate: rsc_c001n06 allocation score on c001n05: 0
 pcmk__native_allocate: rsc_c001n06 allocation score on c001n06: 100
 pcmk__native_allocate: rsc_c001n06 allocation score on c001n07: 0
 pcmk__native_allocate: rsc_c001n06 allocation score on c001n08: 0
 pcmk__native_allocate: rsc_c001n07 allocation score on c001n02: 0
 pcmk__native_allocate: rsc_c001n07 allocation score on c001n03: 0
 pcmk__native_allocate: rsc_c001n07 allocation score on c001n04: 0
 pcmk__native_allocate: rsc_c001n07 allocation score on c001n05: 0
 pcmk__native_allocate: rsc_c001n07 allocation score on c001n06: 0
 pcmk__native_allocate: rsc_c001n07 allocation score on c001n07: 100
 pcmk__native_allocate: rsc_c001n07 allocation score on c001n08: 0
 pcmk__native_allocate: rsc_c001n08 allocation score on c001n02: 0
 pcmk__native_allocate: rsc_c001n08 allocation score on c001n03: 0
 pcmk__native_allocate: rsc_c001n08 allocation score on c001n04: 0
 pcmk__native_allocate: rsc_c001n08 allocation score on c001n05: 0
 pcmk__native_allocate: rsc_c001n08 allocation score on c001n06: 0
 pcmk__native_allocate: rsc_c001n08 allocation score on c001n07: 0
 pcmk__native_allocate: rsc_c001n08 allocation score on c001n08: 100
diff --git a/cts/scheduler/scores/stonith-2.scores b/cts/scheduler/scores/stonith-2.scores
index c39c006c82..aae83ddd98 100644
--- a/cts/scheduler/scores/stonith-2.scores
+++ b/cts/scheduler/scores/stonith-2.scores
@@ -1,331 +1,331 @@
 
 ocf_msdummy:0 promotion score on sles-3: -1
 ocf_msdummy:1 promotion score on sles-4: -1
 ocf_msdummy:10 promotion score on sles-2: -1
 ocf_msdummy:11 promotion score on sles-3: -1
 ocf_msdummy:2 promotion score on sles-4: -1
 ocf_msdummy:3 promotion score on sles-1: -1
 ocf_msdummy:4 promotion score on sles-2: -1
 ocf_msdummy:5 promotion score on sles-1: -1
 ocf_msdummy:6 promotion score on none: 0
 ocf_msdummy:7 promotion score on none: 0
 ocf_msdummy:8 promotion score on sles-6: -1
 ocf_msdummy:9 promotion score on sles-6: -1
 pcmk__clone_allocate: DoFencing allocation score on sles-1: 0
 pcmk__clone_allocate: DoFencing allocation score on sles-2: 0
 pcmk__clone_allocate: DoFencing allocation score on sles-3: 0
 pcmk__clone_allocate: DoFencing allocation score on sles-4: 0
 pcmk__clone_allocate: DoFencing allocation score on sles-5: 0
 pcmk__clone_allocate: DoFencing allocation score on sles-6: 0
 pcmk__clone_allocate: child_DoFencing:0 allocation score on sles-1: 1
 pcmk__clone_allocate: child_DoFencing:0 allocation score on sles-2: 0
 pcmk__clone_allocate: child_DoFencing:0 allocation score on sles-3: 0
 pcmk__clone_allocate: child_DoFencing:0 allocation score on sles-4: 0
 pcmk__clone_allocate: child_DoFencing:0 allocation score on sles-5: 0
 pcmk__clone_allocate: child_DoFencing:0 allocation score on sles-6: 0
 pcmk__clone_allocate: child_DoFencing:1 allocation score on sles-1: 0
 pcmk__clone_allocate: child_DoFencing:1 allocation score on sles-2: 1
 pcmk__clone_allocate: child_DoFencing:1 allocation score on sles-3: 0
 pcmk__clone_allocate: child_DoFencing:1 allocation score on sles-4: 0
 pcmk__clone_allocate: child_DoFencing:1 allocation score on sles-5: 0
 pcmk__clone_allocate: child_DoFencing:1 allocation score on sles-6: 0
 pcmk__clone_allocate: child_DoFencing:2 allocation score on sles-1: 0
 pcmk__clone_allocate: child_DoFencing:2 allocation score on sles-2: 0
 pcmk__clone_allocate: child_DoFencing:2 allocation score on sles-3: 1
 pcmk__clone_allocate: child_DoFencing:2 allocation score on sles-4: 0
 pcmk__clone_allocate: child_DoFencing:2 allocation score on sles-5: 0
 pcmk__clone_allocate: child_DoFencing:2 allocation score on sles-6: 0
 pcmk__clone_allocate: child_DoFencing:3 allocation score on sles-1: 0
 pcmk__clone_allocate: child_DoFencing:3 allocation score on sles-2: 0
 pcmk__clone_allocate: child_DoFencing:3 allocation score on sles-3: 0
 pcmk__clone_allocate: child_DoFencing:3 allocation score on sles-4: 1
 pcmk__clone_allocate: child_DoFencing:3 allocation score on sles-5: 0
 pcmk__clone_allocate: child_DoFencing:3 allocation score on sles-6: 0
 pcmk__clone_allocate: child_DoFencing:4 allocation score on sles-1: 0
 pcmk__clone_allocate: child_DoFencing:4 allocation score on sles-2: 0
 pcmk__clone_allocate: child_DoFencing:4 allocation score on sles-3: 0
 pcmk__clone_allocate: child_DoFencing:4 allocation score on sles-4: 0
 pcmk__clone_allocate: child_DoFencing:4 allocation score on sles-5: 0
 pcmk__clone_allocate: child_DoFencing:4 allocation score on sles-6: 1
 pcmk__clone_allocate: child_DoFencing:5 allocation score on sles-1: 0
 pcmk__clone_allocate: child_DoFencing:5 allocation score on sles-2: 0
 pcmk__clone_allocate: child_DoFencing:5 allocation score on sles-3: 0
 pcmk__clone_allocate: child_DoFencing:5 allocation score on sles-4: 0
 pcmk__clone_allocate: child_DoFencing:5 allocation score on sles-5: 0
 pcmk__clone_allocate: child_DoFencing:5 allocation score on sles-6: 0
 pcmk__clone_allocate: master_rsc_1 allocation score on sles-1: 0
 pcmk__clone_allocate: master_rsc_1 allocation score on sles-2: 0
 pcmk__clone_allocate: master_rsc_1 allocation score on sles-3: 0
 pcmk__clone_allocate: master_rsc_1 allocation score on sles-4: 0
 pcmk__clone_allocate: master_rsc_1 allocation score on sles-5: 0
 pcmk__clone_allocate: master_rsc_1 allocation score on sles-6: 0
 pcmk__clone_allocate: ocf_msdummy:0 allocation score on sles-1: 0
 pcmk__clone_allocate: ocf_msdummy:0 allocation score on sles-2: 0
 pcmk__clone_allocate: ocf_msdummy:0 allocation score on sles-3: 1
 pcmk__clone_allocate: ocf_msdummy:0 allocation score on sles-4: 0
 pcmk__clone_allocate: ocf_msdummy:0 allocation score on sles-5: 0
 pcmk__clone_allocate: ocf_msdummy:0 allocation score on sles-6: 0
 pcmk__clone_allocate: ocf_msdummy:1 allocation score on sles-1: 0
 pcmk__clone_allocate: ocf_msdummy:1 allocation score on sles-2: 0
 pcmk__clone_allocate: ocf_msdummy:1 allocation score on sles-3: 0
 pcmk__clone_allocate: ocf_msdummy:1 allocation score on sles-4: 1
 pcmk__clone_allocate: ocf_msdummy:1 allocation score on sles-5: 0
 pcmk__clone_allocate: ocf_msdummy:1 allocation score on sles-6: 0
 pcmk__clone_allocate: ocf_msdummy:10 allocation score on sles-1: 0
 pcmk__clone_allocate: ocf_msdummy:10 allocation score on sles-2: 1
 pcmk__clone_allocate: ocf_msdummy:10 allocation score on sles-3: 0
 pcmk__clone_allocate: ocf_msdummy:10 allocation score on sles-4: 0
 pcmk__clone_allocate: ocf_msdummy:10 allocation score on sles-5: 0
 pcmk__clone_allocate: ocf_msdummy:10 allocation score on sles-6: 0
 pcmk__clone_allocate: ocf_msdummy:11 allocation score on sles-1: 0
 pcmk__clone_allocate: ocf_msdummy:11 allocation score on sles-2: 0
 pcmk__clone_allocate: ocf_msdummy:11 allocation score on sles-3: 1
 pcmk__clone_allocate: ocf_msdummy:11 allocation score on sles-4: 0
 pcmk__clone_allocate: ocf_msdummy:11 allocation score on sles-5: 0
 pcmk__clone_allocate: ocf_msdummy:11 allocation score on sles-6: 0
 pcmk__clone_allocate: ocf_msdummy:2 allocation score on sles-1: 0
 pcmk__clone_allocate: ocf_msdummy:2 allocation score on sles-2: 0
 pcmk__clone_allocate: ocf_msdummy:2 allocation score on sles-3: 0
 pcmk__clone_allocate: ocf_msdummy:2 allocation score on sles-4: 1
 pcmk__clone_allocate: ocf_msdummy:2 allocation score on sles-5: 0
 pcmk__clone_allocate: ocf_msdummy:2 allocation score on sles-6: 0
 pcmk__clone_allocate: ocf_msdummy:3 allocation score on sles-1: 1
 pcmk__clone_allocate: ocf_msdummy:3 allocation score on sles-2: 0
 pcmk__clone_allocate: ocf_msdummy:3 allocation score on sles-3: 0
 pcmk__clone_allocate: ocf_msdummy:3 allocation score on sles-4: 0
 pcmk__clone_allocate: ocf_msdummy:3 allocation score on sles-5: 0
 pcmk__clone_allocate: ocf_msdummy:3 allocation score on sles-6: 0
 pcmk__clone_allocate: ocf_msdummy:4 allocation score on sles-1: 0
 pcmk__clone_allocate: ocf_msdummy:4 allocation score on sles-2: 1
 pcmk__clone_allocate: ocf_msdummy:4 allocation score on sles-3: 0
 pcmk__clone_allocate: ocf_msdummy:4 allocation score on sles-4: 0
 pcmk__clone_allocate: ocf_msdummy:4 allocation score on sles-5: 0
 pcmk__clone_allocate: ocf_msdummy:4 allocation score on sles-6: 0
 pcmk__clone_allocate: ocf_msdummy:5 allocation score on sles-1: 1
 pcmk__clone_allocate: ocf_msdummy:5 allocation score on sles-2: 0
 pcmk__clone_allocate: ocf_msdummy:5 allocation score on sles-3: 0
 pcmk__clone_allocate: ocf_msdummy:5 allocation score on sles-4: 0
 pcmk__clone_allocate: ocf_msdummy:5 allocation score on sles-5: 0
 pcmk__clone_allocate: ocf_msdummy:5 allocation score on sles-6: 0
 pcmk__clone_allocate: ocf_msdummy:6 allocation score on sles-1: 0
 pcmk__clone_allocate: ocf_msdummy:6 allocation score on sles-2: 0
 pcmk__clone_allocate: ocf_msdummy:6 allocation score on sles-3: 0
 pcmk__clone_allocate: ocf_msdummy:6 allocation score on sles-4: 0
 pcmk__clone_allocate: ocf_msdummy:6 allocation score on sles-5: 0
 pcmk__clone_allocate: ocf_msdummy:6 allocation score on sles-6: 0
 pcmk__clone_allocate: ocf_msdummy:7 allocation score on sles-1: 0
 pcmk__clone_allocate: ocf_msdummy:7 allocation score on sles-2: 0
 pcmk__clone_allocate: ocf_msdummy:7 allocation score on sles-3: 0
 pcmk__clone_allocate: ocf_msdummy:7 allocation score on sles-4: 0
 pcmk__clone_allocate: ocf_msdummy:7 allocation score on sles-5: 0
 pcmk__clone_allocate: ocf_msdummy:7 allocation score on sles-6: 0
 pcmk__clone_allocate: ocf_msdummy:8 allocation score on sles-1: 0
 pcmk__clone_allocate: ocf_msdummy:8 allocation score on sles-2: 0
 pcmk__clone_allocate: ocf_msdummy:8 allocation score on sles-3: 0
 pcmk__clone_allocate: ocf_msdummy:8 allocation score on sles-4: 0
 pcmk__clone_allocate: ocf_msdummy:8 allocation score on sles-5: 0
 pcmk__clone_allocate: ocf_msdummy:8 allocation score on sles-6: 1
 pcmk__clone_allocate: ocf_msdummy:9 allocation score on sles-1: 0
 pcmk__clone_allocate: ocf_msdummy:9 allocation score on sles-2: 0
 pcmk__clone_allocate: ocf_msdummy:9 allocation score on sles-3: 0
 pcmk__clone_allocate: ocf_msdummy:9 allocation score on sles-4: 0
 pcmk__clone_allocate: ocf_msdummy:9 allocation score on sles-5: 0
 pcmk__clone_allocate: ocf_msdummy:9 allocation score on sles-6: 1
 pcmk__group_allocate: group-1 allocation score on sles-1: 0
 pcmk__group_allocate: group-1 allocation score on sles-2: 0
 pcmk__group_allocate: group-1 allocation score on sles-3: 0
 pcmk__group_allocate: group-1 allocation score on sles-4: 0
 pcmk__group_allocate: group-1 allocation score on sles-5: 0
 pcmk__group_allocate: group-1 allocation score on sles-6: 0
 pcmk__group_allocate: r192.168.100.181 allocation score on sles-1: 0
 pcmk__group_allocate: r192.168.100.181 allocation score on sles-2: 0
 pcmk__group_allocate: r192.168.100.181 allocation score on sles-3: 0
 pcmk__group_allocate: r192.168.100.181 allocation score on sles-4: 0
 pcmk__group_allocate: r192.168.100.181 allocation score on sles-5: 0
 pcmk__group_allocate: r192.168.100.181 allocation score on sles-6: 0
 pcmk__group_allocate: r192.168.100.182 allocation score on sles-1: 0
 pcmk__group_allocate: r192.168.100.182 allocation score on sles-2: 0
 pcmk__group_allocate: r192.168.100.182 allocation score on sles-3: 0
 pcmk__group_allocate: r192.168.100.182 allocation score on sles-4: 0
 pcmk__group_allocate: r192.168.100.182 allocation score on sles-5: 0
 pcmk__group_allocate: r192.168.100.182 allocation score on sles-6: 0
 pcmk__group_allocate: r192.168.100.183 allocation score on sles-1: 0
 pcmk__group_allocate: r192.168.100.183 allocation score on sles-2: 0
 pcmk__group_allocate: r192.168.100.183 allocation score on sles-3: 0
 pcmk__group_allocate: r192.168.100.183 allocation score on sles-4: 0
 pcmk__group_allocate: r192.168.100.183 allocation score on sles-5: 0
 pcmk__group_allocate: r192.168.100.183 allocation score on sles-6: 0
 pcmk__native_allocate: child_DoFencing:0 allocation score on sles-1: 1
 pcmk__native_allocate: child_DoFencing:0 allocation score on sles-2: 0
 pcmk__native_allocate: child_DoFencing:0 allocation score on sles-3: 0
 pcmk__native_allocate: child_DoFencing:0 allocation score on sles-4: 0
 pcmk__native_allocate: child_DoFencing:0 allocation score on sles-5: -INFINITY
 pcmk__native_allocate: child_DoFencing:0 allocation score on sles-6: 0
 pcmk__native_allocate: child_DoFencing:1 allocation score on sles-1: -INFINITY
 pcmk__native_allocate: child_DoFencing:1 allocation score on sles-2: 1
 pcmk__native_allocate: child_DoFencing:1 allocation score on sles-3: 0
 pcmk__native_allocate: child_DoFencing:1 allocation score on sles-4: 0
 pcmk__native_allocate: child_DoFencing:1 allocation score on sles-5: -INFINITY
 pcmk__native_allocate: child_DoFencing:1 allocation score on sles-6: 0
 pcmk__native_allocate: child_DoFencing:2 allocation score on sles-1: -INFINITY
 pcmk__native_allocate: child_DoFencing:2 allocation score on sles-2: -INFINITY
 pcmk__native_allocate: child_DoFencing:2 allocation score on sles-3: 1
 pcmk__native_allocate: child_DoFencing:2 allocation score on sles-4: 0
 pcmk__native_allocate: child_DoFencing:2 allocation score on sles-5: -INFINITY
 pcmk__native_allocate: child_DoFencing:2 allocation score on sles-6: 0
 pcmk__native_allocate: child_DoFencing:3 allocation score on sles-1: -INFINITY
 pcmk__native_allocate: child_DoFencing:3 allocation score on sles-2: -INFINITY
 pcmk__native_allocate: child_DoFencing:3 allocation score on sles-3: -INFINITY
 pcmk__native_allocate: child_DoFencing:3 allocation score on sles-4: 1
 pcmk__native_allocate: child_DoFencing:3 allocation score on sles-5: -INFINITY
 pcmk__native_allocate: child_DoFencing:3 allocation score on sles-6: 0
 pcmk__native_allocate: child_DoFencing:4 allocation score on sles-1: -INFINITY
 pcmk__native_allocate: child_DoFencing:4 allocation score on sles-2: -INFINITY
 pcmk__native_allocate: child_DoFencing:4 allocation score on sles-3: -INFINITY
 pcmk__native_allocate: child_DoFencing:4 allocation score on sles-4: -INFINITY
 pcmk__native_allocate: child_DoFencing:4 allocation score on sles-5: -INFINITY
 pcmk__native_allocate: child_DoFencing:4 allocation score on sles-6: 1
 pcmk__native_allocate: child_DoFencing:5 allocation score on sles-1: -INFINITY
 pcmk__native_allocate: child_DoFencing:5 allocation score on sles-2: -INFINITY
 pcmk__native_allocate: child_DoFencing:5 allocation score on sles-3: -INFINITY
 pcmk__native_allocate: child_DoFencing:5 allocation score on sles-4: -INFINITY
 pcmk__native_allocate: child_DoFencing:5 allocation score on sles-5: -INFINITY
 pcmk__native_allocate: child_DoFencing:5 allocation score on sles-6: -INFINITY
 pcmk__native_allocate: lsb_dummy allocation score on sles-1: 0
 pcmk__native_allocate: lsb_dummy allocation score on sles-2: 0
 pcmk__native_allocate: lsb_dummy allocation score on sles-3: 0
 pcmk__native_allocate: lsb_dummy allocation score on sles-4: 0
 pcmk__native_allocate: lsb_dummy allocation score on sles-5: 0
 pcmk__native_allocate: lsb_dummy allocation score on sles-6: 0
 pcmk__native_allocate: migrator allocation score on sles-1: 0
 pcmk__native_allocate: migrator allocation score on sles-2: 0
 pcmk__native_allocate: migrator allocation score on sles-3: 0
 pcmk__native_allocate: migrator allocation score on sles-4: 0
 pcmk__native_allocate: migrator allocation score on sles-5: 0
 pcmk__native_allocate: migrator allocation score on sles-6: 0
 pcmk__native_allocate: ocf_msdummy:0 allocation score on sles-1: 0
 pcmk__native_allocate: ocf_msdummy:0 allocation score on sles-2: 0
 pcmk__native_allocate: ocf_msdummy:0 allocation score on sles-3: 1
 pcmk__native_allocate: ocf_msdummy:0 allocation score on sles-4: 0
 pcmk__native_allocate: ocf_msdummy:0 allocation score on sles-5: -INFINITY
 pcmk__native_allocate: ocf_msdummy:0 allocation score on sles-6: 0
 pcmk__native_allocate: ocf_msdummy:1 allocation score on sles-1: 0
 pcmk__native_allocate: ocf_msdummy:1 allocation score on sles-2: 0
 pcmk__native_allocate: ocf_msdummy:1 allocation score on sles-3: 0
 pcmk__native_allocate: ocf_msdummy:1 allocation score on sles-4: 1
 pcmk__native_allocate: ocf_msdummy:1 allocation score on sles-5: -INFINITY
 pcmk__native_allocate: ocf_msdummy:1 allocation score on sles-6: 0
-pcmk__native_allocate: ocf_msdummy:10 allocation score on sles-1: 0
+pcmk__native_allocate: ocf_msdummy:10 allocation score on sles-1: -INFINITY
 pcmk__native_allocate: ocf_msdummy:10 allocation score on sles-2: 1
 pcmk__native_allocate: ocf_msdummy:10 allocation score on sles-3: 0
-pcmk__native_allocate: ocf_msdummy:10 allocation score on sles-4: 0
+pcmk__native_allocate: ocf_msdummy:10 allocation score on sles-4: -INFINITY
 pcmk__native_allocate: ocf_msdummy:10 allocation score on sles-5: -INFINITY
-pcmk__native_allocate: ocf_msdummy:10 allocation score on sles-6: 0
-pcmk__native_allocate: ocf_msdummy:11 allocation score on sles-1: 0
-pcmk__native_allocate: ocf_msdummy:11 allocation score on sles-2: 0
+pcmk__native_allocate: ocf_msdummy:10 allocation score on sles-6: -INFINITY
+pcmk__native_allocate: ocf_msdummy:11 allocation score on sles-1: -INFINITY
+pcmk__native_allocate: ocf_msdummy:11 allocation score on sles-2: -INFINITY
 pcmk__native_allocate: ocf_msdummy:11 allocation score on sles-3: 1
-pcmk__native_allocate: ocf_msdummy:11 allocation score on sles-4: 0
+pcmk__native_allocate: ocf_msdummy:11 allocation score on sles-4: -INFINITY
 pcmk__native_allocate: ocf_msdummy:11 allocation score on sles-5: -INFINITY
-pcmk__native_allocate: ocf_msdummy:11 allocation score on sles-6: 0
+pcmk__native_allocate: ocf_msdummy:11 allocation score on sles-6: -INFINITY
 pcmk__native_allocate: ocf_msdummy:2 allocation score on sles-1: 0
 pcmk__native_allocate: ocf_msdummy:2 allocation score on sles-2: 0
-pcmk__native_allocate: ocf_msdummy:2 allocation score on sles-3: -INFINITY
+pcmk__native_allocate: ocf_msdummy:2 allocation score on sles-3: 0
 pcmk__native_allocate: ocf_msdummy:2 allocation score on sles-4: 1
 pcmk__native_allocate: ocf_msdummy:2 allocation score on sles-5: -INFINITY
 pcmk__native_allocate: ocf_msdummy:2 allocation score on sles-6: 0
 pcmk__native_allocate: ocf_msdummy:3 allocation score on sles-1: 1
 pcmk__native_allocate: ocf_msdummy:3 allocation score on sles-2: 0
-pcmk__native_allocate: ocf_msdummy:3 allocation score on sles-3: -INFINITY
+pcmk__native_allocate: ocf_msdummy:3 allocation score on sles-3: 0
 pcmk__native_allocate: ocf_msdummy:3 allocation score on sles-4: -INFINITY
 pcmk__native_allocate: ocf_msdummy:3 allocation score on sles-5: -INFINITY
 pcmk__native_allocate: ocf_msdummy:3 allocation score on sles-6: 0
 pcmk__native_allocate: ocf_msdummy:4 allocation score on sles-1: 0
 pcmk__native_allocate: ocf_msdummy:4 allocation score on sles-2: 1
-pcmk__native_allocate: ocf_msdummy:4 allocation score on sles-3: -INFINITY
+pcmk__native_allocate: ocf_msdummy:4 allocation score on sles-3: 0
 pcmk__native_allocate: ocf_msdummy:4 allocation score on sles-4: -INFINITY
 pcmk__native_allocate: ocf_msdummy:4 allocation score on sles-5: -INFINITY
 pcmk__native_allocate: ocf_msdummy:4 allocation score on sles-6: 0
 pcmk__native_allocate: ocf_msdummy:5 allocation score on sles-1: 1
-pcmk__native_allocate: ocf_msdummy:5 allocation score on sles-2: -INFINITY
-pcmk__native_allocate: ocf_msdummy:5 allocation score on sles-3: -INFINITY
+pcmk__native_allocate: ocf_msdummy:5 allocation score on sles-2: 0
+pcmk__native_allocate: ocf_msdummy:5 allocation score on sles-3: 0
 pcmk__native_allocate: ocf_msdummy:5 allocation score on sles-4: -INFINITY
 pcmk__native_allocate: ocf_msdummy:5 allocation score on sles-5: -INFINITY
 pcmk__native_allocate: ocf_msdummy:5 allocation score on sles-6: 0
 pcmk__native_allocate: ocf_msdummy:6 allocation score on sles-1: -INFINITY
 pcmk__native_allocate: ocf_msdummy:6 allocation score on sles-2: -INFINITY
 pcmk__native_allocate: ocf_msdummy:6 allocation score on sles-3: -INFINITY
 pcmk__native_allocate: ocf_msdummy:6 allocation score on sles-4: -INFINITY
 pcmk__native_allocate: ocf_msdummy:6 allocation score on sles-5: -INFINITY
 pcmk__native_allocate: ocf_msdummy:6 allocation score on sles-6: -INFINITY
 pcmk__native_allocate: ocf_msdummy:7 allocation score on sles-1: -INFINITY
 pcmk__native_allocate: ocf_msdummy:7 allocation score on sles-2: -INFINITY
 pcmk__native_allocate: ocf_msdummy:7 allocation score on sles-3: -INFINITY
 pcmk__native_allocate: ocf_msdummy:7 allocation score on sles-4: -INFINITY
 pcmk__native_allocate: ocf_msdummy:7 allocation score on sles-5: -INFINITY
 pcmk__native_allocate: ocf_msdummy:7 allocation score on sles-6: -INFINITY
 pcmk__native_allocate: ocf_msdummy:8 allocation score on sles-1: -INFINITY
-pcmk__native_allocate: ocf_msdummy:8 allocation score on sles-2: -INFINITY
-pcmk__native_allocate: ocf_msdummy:8 allocation score on sles-3: -INFINITY
+pcmk__native_allocate: ocf_msdummy:8 allocation score on sles-2: 0
+pcmk__native_allocate: ocf_msdummy:8 allocation score on sles-3: 0
 pcmk__native_allocate: ocf_msdummy:8 allocation score on sles-4: -INFINITY
 pcmk__native_allocate: ocf_msdummy:8 allocation score on sles-5: -INFINITY
 pcmk__native_allocate: ocf_msdummy:8 allocation score on sles-6: 1
 pcmk__native_allocate: ocf_msdummy:9 allocation score on sles-1: -INFINITY
-pcmk__native_allocate: ocf_msdummy:9 allocation score on sles-2: -INFINITY
-pcmk__native_allocate: ocf_msdummy:9 allocation score on sles-3: -INFINITY
+pcmk__native_allocate: ocf_msdummy:9 allocation score on sles-2: 0
+pcmk__native_allocate: ocf_msdummy:9 allocation score on sles-3: 0
 pcmk__native_allocate: ocf_msdummy:9 allocation score on sles-4: -INFINITY
 pcmk__native_allocate: ocf_msdummy:9 allocation score on sles-5: -INFINITY
 pcmk__native_allocate: ocf_msdummy:9 allocation score on sles-6: 1
 pcmk__native_allocate: r192.168.100.181 allocation score on sles-1: 0
 pcmk__native_allocate: r192.168.100.181 allocation score on sles-2: 0
 pcmk__native_allocate: r192.168.100.181 allocation score on sles-3: 0
 pcmk__native_allocate: r192.168.100.181 allocation score on sles-4: 0
 pcmk__native_allocate: r192.168.100.181 allocation score on sles-5: -INFINITY
 pcmk__native_allocate: r192.168.100.181 allocation score on sles-6: 0
 pcmk__native_allocate: r192.168.100.182 allocation score on sles-1: 0
 pcmk__native_allocate: r192.168.100.182 allocation score on sles-2: -INFINITY
 pcmk__native_allocate: r192.168.100.182 allocation score on sles-3: -INFINITY
 pcmk__native_allocate: r192.168.100.182 allocation score on sles-4: -INFINITY
 pcmk__native_allocate: r192.168.100.182 allocation score on sles-5: -INFINITY
 pcmk__native_allocate: r192.168.100.182 allocation score on sles-6: -INFINITY
 pcmk__native_allocate: r192.168.100.183 allocation score on sles-1: 0
 pcmk__native_allocate: r192.168.100.183 allocation score on sles-2: -INFINITY
 pcmk__native_allocate: r192.168.100.183 allocation score on sles-3: -INFINITY
 pcmk__native_allocate: r192.168.100.183 allocation score on sles-4: -INFINITY
 pcmk__native_allocate: r192.168.100.183 allocation score on sles-5: -INFINITY
 pcmk__native_allocate: r192.168.100.183 allocation score on sles-6: -INFINITY
 pcmk__native_allocate: rsc_sles-1 allocation score on sles-1: 100
 pcmk__native_allocate: rsc_sles-1 allocation score on sles-2: 0
 pcmk__native_allocate: rsc_sles-1 allocation score on sles-3: 0
 pcmk__native_allocate: rsc_sles-1 allocation score on sles-4: 0
 pcmk__native_allocate: rsc_sles-1 allocation score on sles-5: 0
 pcmk__native_allocate: rsc_sles-1 allocation score on sles-6: 0
 pcmk__native_allocate: rsc_sles-2 allocation score on sles-1: 0
 pcmk__native_allocate: rsc_sles-2 allocation score on sles-2: 100
 pcmk__native_allocate: rsc_sles-2 allocation score on sles-3: 0
 pcmk__native_allocate: rsc_sles-2 allocation score on sles-4: 0
 pcmk__native_allocate: rsc_sles-2 allocation score on sles-5: 0
 pcmk__native_allocate: rsc_sles-2 allocation score on sles-6: 0
 pcmk__native_allocate: rsc_sles-3 allocation score on sles-1: 0
 pcmk__native_allocate: rsc_sles-3 allocation score on sles-2: 0
 pcmk__native_allocate: rsc_sles-3 allocation score on sles-3: 100
 pcmk__native_allocate: rsc_sles-3 allocation score on sles-4: 0
 pcmk__native_allocate: rsc_sles-3 allocation score on sles-5: 0
 pcmk__native_allocate: rsc_sles-3 allocation score on sles-6: 0
 pcmk__native_allocate: rsc_sles-4 allocation score on sles-1: 0
 pcmk__native_allocate: rsc_sles-4 allocation score on sles-2: 0
 pcmk__native_allocate: rsc_sles-4 allocation score on sles-3: 0
 pcmk__native_allocate: rsc_sles-4 allocation score on sles-4: 100
 pcmk__native_allocate: rsc_sles-4 allocation score on sles-5: 0
 pcmk__native_allocate: rsc_sles-4 allocation score on sles-6: 0
 pcmk__native_allocate: rsc_sles-5 allocation score on sles-1: 0
 pcmk__native_allocate: rsc_sles-5 allocation score on sles-2: 0
 pcmk__native_allocate: rsc_sles-5 allocation score on sles-3: 0
 pcmk__native_allocate: rsc_sles-5 allocation score on sles-4: 0
 pcmk__native_allocate: rsc_sles-5 allocation score on sles-5: 100
 pcmk__native_allocate: rsc_sles-5 allocation score on sles-6: 0
 pcmk__native_allocate: rsc_sles-6 allocation score on sles-1: 0
 pcmk__native_allocate: rsc_sles-6 allocation score on sles-2: 0
 pcmk__native_allocate: rsc_sles-6 allocation score on sles-3: 0
 pcmk__native_allocate: rsc_sles-6 allocation score on sles-4: 0
 pcmk__native_allocate: rsc_sles-6 allocation score on sles-5: 0
 pcmk__native_allocate: rsc_sles-6 allocation score on sles-6: 100
diff --git a/include/pcmki/pcmki_resource.h b/include/pcmki/pcmki_resource.h
index 09737ebf87..a89d1748b9 100644
--- a/include/pcmki/pcmki_resource.h
+++ b/include/pcmki/pcmki_resource.h
@@ -1,21 +1,20 @@
 /*
- * Copyright 2021 the Pacemaker project contributors
+ * Copyright 2021-2022 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 #ifndef PCMK__PCMKI_PCMKI_RESOURCE__H
 #define PCMK__PCMKI_PCMKI_RESOURCE__H
 
 #include <glib.h>
 
 #include <crm/common/output_internal.h>
 #include <crm/pengine/pe_types.h>
 
 int pcmk__resource_digests(pcmk__output_t *out, pe_resource_t *rsc,
-                           pe_node_t *node, GHashTable *overrides,
-                           pe_working_set_t *data_set);
+                           pe_node_t *node, GHashTable *overrides);
 
 #endif /* PCMK__PCMKI_PCMKI_RESOURCE__H */
diff --git a/lib/pacemaker/libpacemaker_private.h b/lib/pacemaker/libpacemaker_private.h
index bd7e28346e..f959fbc58b 100644
--- a/lib/pacemaker/libpacemaker_private.h
+++ b/lib/pacemaker/libpacemaker_private.h
@@ -1,426 +1,429 @@
 /*
  * Copyright 2021-2022 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #ifndef PCMK__LIBPACEMAKER_PRIVATE__H
 #  define PCMK__LIBPACEMAKER_PRIVATE__H
 
 /* This header is for the sole use of libpacemaker, so that functions can be
  * declared with G_GNUC_INTERNAL for efficiency.
  */
 
 #include <crm/pengine/pe_types.h> // pe_action_t, pe_node_t, pe_working_set_t
 
 // Actions (pcmk_sched_actions.c)
 
 G_GNUC_INTERNAL
 void pcmk__update_action_for_orderings(pe_action_t *action,
                                        pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 void pcmk__log_action(const char *pre_text, pe_action_t *action, bool details);
 
 G_GNUC_INTERNAL
 pe_action_t *pcmk__new_rsc_pseudo_action(pe_resource_t *rsc, const char *task,
                                          bool optional, bool runnable);
 
 G_GNUC_INTERNAL
 pe_action_t *pcmk__new_cancel_action(pe_resource_t *rsc, const char *name,
                                      guint interval_ms, pe_node_t *node);
 
 G_GNUC_INTERNAL
-pe_action_t *pcmk__new_shutdown_action(pe_node_t *node,
-                                       pe_working_set_t *data_set);
+pe_action_t *pcmk__new_shutdown_action(pe_node_t *node);
 
 G_GNUC_INTERNAL
 bool pcmk__action_locks_rsc_to_node(const pe_action_t *action);
 
 G_GNUC_INTERNAL
 void pcmk__deduplicate_action_inputs(pe_action_t *action);
 
 G_GNUC_INTERNAL
 void pcmk__output_actions(pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 bool pcmk__check_action_config(pe_resource_t *rsc, pe_node_t *node,
                                xmlNode *xml_op);
 
 G_GNUC_INTERNAL
 void pcmk__handle_rsc_config_changes(pe_working_set_t *data_set);
 
 // Producing transition graphs (pcmk_graph_producer.c)
 
 G_GNUC_INTERNAL
 bool pcmk__graph_has_loop(pe_action_t *init_action, pe_action_t *action,
                           pe_action_wrapper_t *input);
 
 G_GNUC_INTERNAL
 void pcmk__add_action_to_graph(pe_action_t *action, pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 void pcmk__create_graph(pe_working_set_t *data_set);
 
 
 // Fencing (pcmk_sched_fencing.c)
 
 G_GNUC_INTERNAL
 void pcmk__order_vs_fence(pe_action_t *stonith_op, pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 void pcmk__order_vs_unfence(pe_resource_t *rsc, pe_node_t *node,
                             pe_action_t *action, enum pe_ordering order,
                             pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
-void pcmk__fence_guest(pe_node_t *node, pe_working_set_t *data_set);
+void pcmk__fence_guest(pe_node_t *node);
 
 G_GNUC_INTERNAL
 bool pcmk__node_unfenced(pe_node_t *node);
 
-G_GNUC_INTERNAL
-bool pcmk__is_unfence_device(const pe_resource_t *rsc,
-                             const pe_working_set_t *data_set);
-
 
 // Injected scheduler inputs (pcmk_sched_injections.c)
 
 void pcmk__inject_scheduler_input(pe_working_set_t *data_set, cib_t *cib,
                                   pcmk_injections_t *injections);
 
 
 // Constraints of any type (pcmk_sched_constraints.c)
 
 G_GNUC_INTERNAL
 pe_resource_t *pcmk__find_constraint_resource(GList *rsc_list, const char *id);
 
 G_GNUC_INTERNAL
 xmlNode *pcmk__expand_tags_in_sets(xmlNode *xml_obj,
                                    pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 bool pcmk__valid_resource_or_tag(pe_working_set_t *data_set, const char *id,
                                  pe_resource_t **rsc, pe_tag_t **tag);
 
 G_GNUC_INTERNAL
 bool pcmk__tag_to_set(xmlNode *xml_obj, xmlNode **rsc_set, const char *attr,
                       bool convert_rsc, pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 void pcmk__create_internal_constraints(pe_working_set_t *data_set);
 
 
 // Location constraints
 
 G_GNUC_INTERNAL
 void pcmk__unpack_location(xmlNode *xml_obj, pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 pe__location_t *pcmk__new_location(const char *id, pe_resource_t *rsc,
                                    int node_weight, const char *discover_mode,
                                    pe_node_t *foo_node,
                                    pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 void pcmk__apply_locations(pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 void pcmk__apply_location(pe__location_t *constraint, pe_resource_t *rsc);
 
 
 // Colocation constraints
 
 enum pcmk__coloc_affects {
     pcmk__coloc_affects_nothing = 0,
     pcmk__coloc_affects_location,
     pcmk__coloc_affects_role,
 };
 
 G_GNUC_INTERNAL
 enum pcmk__coloc_affects pcmk__colocation_affects(pe_resource_t *dependent,
                                                   pe_resource_t *primary,
                                                   pcmk__colocation_t *constraint,
                                                   bool preview);
 
 G_GNUC_INTERNAL
 void pcmk__apply_coloc_to_weights(pe_resource_t *dependent,
                                   pe_resource_t *primary,
                                   pcmk__colocation_t *constraint);
 
 G_GNUC_INTERNAL
 void pcmk__apply_coloc_to_priority(pe_resource_t *dependent,
                                    pe_resource_t *primary,
                                    pcmk__colocation_t *constraint);
 
 G_GNUC_INTERNAL
 void pcmk__unpack_colocation(xmlNode *xml_obj, pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 void pcmk__new_colocation(const char *id, const char *node_attr, int score,
                           pe_resource_t *dependent, pe_resource_t *primary,
                           const char *dependent_role, const char *primary_role,
                           bool influence, pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 void pcmk__block_colocated_starts(pe_action_t *action,
                                   pe_working_set_t *data_set);
 
 /*!
  * \internal
  * \brief Check whether colocation's left-hand preferences should be considered
  *
  * \param[in] colocation  Colocation constraint
  * \param[in] rsc         Right-hand instance (normally this will be
  *                        colocation->primary, which NULL will be treated as,
  *                        but for clones or bundles with multiple instances
  *                        this can be a particular instance)
  *
  * \return true if colocation influence should be effective, otherwise false
  */
 static inline bool
 pcmk__colocation_has_influence(const pcmk__colocation_t *colocation,
                                const pe_resource_t *rsc)
 {
     if (rsc == NULL) {
         rsc = colocation->primary;
     }
 
     /* The left hand of a colocation influences the right hand's location
      * if the influence option is true, or the right hand is not yet active.
      */
     return colocation->influence || (rsc->running_on == NULL);
 }
 
 
 // Ordering constraints (pcmk_sched_ordering.c)
 
 G_GNUC_INTERNAL
 void pcmk__new_ordering(pe_resource_t *lh_rsc, char *lh_task,
                         pe_action_t *lh_action, pe_resource_t *rh_rsc,
                         char *rh_task, pe_action_t *rh_action,
                         enum pe_ordering type, pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 void pcmk__unpack_ordering(xmlNode *xml_obj, pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 void pcmk__disable_invalid_orderings(pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 void pcmk__order_stops_before_shutdown(pe_node_t *node,
                                        pe_action_t *shutdown_op,
                                        pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 void pcmk__apply_orderings(pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 void pcmk__order_after_each(pe_action_t *after, GList *list);
 
 
 /*!
  * \internal
  * \brief Create a new ordering between two resource actions
  *
  * \param[in] lh_rsc    Resource for 'first' action
  * \param[in] rh_rsc    Resource for 'then' action
  * \param[in] lh_task   Action key for 'first' action
  * \param[in] rh_task   Action key for 'then' action
  * \param[in] flags     Bitmask of enum pe_ordering flags
  * \param[in] data_set  Cluster working set to add ordering to
  */
 #define pcmk__order_resource_actions(lh_rsc, lh_task, rh_rsc, rh_task,      \
                                      flags, data_set)                       \
     pcmk__new_ordering((lh_rsc), pcmk__op_key((lh_rsc)->id, (lh_task), 0),  \
                        NULL,                                                \
                        (rh_rsc), pcmk__op_key((rh_rsc)->id, (rh_task), 0),  \
                        NULL, (flags), (data_set))
 
 #define pcmk__order_starts(rsc1, rsc2, type, data_set)       \
     pcmk__order_resource_actions((rsc1), CRMD_ACTION_START,  \
                                  (rsc2), CRMD_ACTION_START, (type), (data_set))
 
 #define pcmk__order_stops(rsc1, rsc2, type, data_set)        \
     pcmk__order_resource_actions((rsc1), CRMD_ACTION_STOP,   \
                                  (rsc2), CRMD_ACTION_STOP, (type), (data_set))
 
 G_GNUC_INTERNAL
 void pcmk__unpack_rsc_ticket(xmlNode *xml_obj, pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 void pcmk__require_promotion_tickets(pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 bool pcmk__is_failed_remote_node(pe_node_t *node);
 
 G_GNUC_INTERNAL
 void pcmk__order_remote_connection_actions(pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 bool pcmk__rsc_corresponds_to_guest(pe_resource_t *rsc, pe_node_t *node);
 
 G_GNUC_INTERNAL
 pe_node_t *pcmk__connection_host_for_action(pe_action_t *action);
 
 G_GNUC_INTERNAL
-void pcmk__substitute_remote_addr(pe_resource_t *rsc, GHashTable *params,
-                                  pe_working_set_t *data_set);
+void pcmk__substitute_remote_addr(pe_resource_t *rsc, GHashTable *params);
 
 G_GNUC_INTERNAL
 void pcmk__add_bundle_meta_to_xml(xmlNode *args_xml, pe_action_t *action);
 
 
 // Groups (pcmk_sched_group.c)
 
 G_GNUC_INTERNAL
 GList *pcmk__group_colocated_resources(pe_resource_t *rsc,
                                        pe_resource_t *orig_rsc,
                                        GList *colocated_rscs);
 
 
 // Bundles (pcmk_sched_bundle.c)
 
 G_GNUC_INTERNAL
 void pcmk__output_bundle_actions(pe_resource_t *rsc);
 
 
 // Injections (pcmk_injections.c)
 
 G_GNUC_INTERNAL
 xmlNode *pcmk__inject_node(cib_t *cib_conn, const char *node, const char *uuid);
 
 G_GNUC_INTERNAL
 xmlNode *pcmk__inject_node_state_change(cib_t *cib_conn, const char *node,
                                         bool up);
 
 G_GNUC_INTERNAL
 xmlNode *pcmk__inject_resource_history(pcmk__output_t *out, xmlNode *cib_node,
                                        const char *resource,
                                        const char *lrm_name,
                                        const char *rclass,
                                        const char *rtype,
                                        const char *rprovider);
 
 G_GNUC_INTERNAL
 void pcmk__inject_failcount(pcmk__output_t *out, xmlNode *cib_node,
                             const char *resource, const char *task,
                             guint interval_ms, int rc);
 
 G_GNUC_INTERNAL
 xmlNode *pcmk__inject_action_result(xmlNode *cib_resource,
                                     lrmd_event_data_t *op, int target_rc);
 
 
 // Nodes (pcmk_sched_nodes.c)
 
 G_GNUC_INTERNAL
 bool pcmk__node_available(const pe_node_t *node);
 
 G_GNUC_INTERNAL
 bool pcmk__any_node_available(GHashTable *nodes);
 
 G_GNUC_INTERNAL
 GHashTable *pcmk__copy_node_table(GHashTable *nodes);
 
 G_GNUC_INTERNAL
 GList *pcmk__sort_nodes(GList *nodes, pe_node_t *active_node,
                         pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 void pcmk__apply_node_health(pe_working_set_t *data_set);
 
+G_GNUC_INTERNAL
+pe_node_t *pcmk__top_allowed_node(const pe_resource_t *rsc,
+                                  const pe_node_t *node);
+
 
 // Clone notifictions (pcmk_sched_notif.c)
 
 G_GNUC_INTERNAL
 void pcmk__create_notifications(pe_resource_t *rsc, notify_data_t *n_data);
 
 G_GNUC_INTERNAL
 notify_data_t *pcmk__clone_notif_pseudo_ops(pe_resource_t *rsc,
                                             const char *task,
                                             pe_action_t *action,
                                             pe_action_t *complete);
 
 G_GNUC_INTERNAL
 void pcmk__free_notification_data(notify_data_t *n_data);
 
 G_GNUC_INTERNAL
 void pcmk__order_notifs_after_fencing(pe_action_t *action, pe_resource_t *rsc,
                                       pe_action_t *stonith_op);
 
 
 // Functions applying to more than one variant (pcmk_sched_resource.c)
 
 G_GNUC_INTERNAL
 void pcmk__set_allocation_methods(pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 bool pcmk__rsc_agent_changed(pe_resource_t *rsc, pe_node_t *node,
                              const xmlNode *rsc_entry, bool active_on_node);
 
 G_GNUC_INTERNAL
 GList *pcmk__rscs_matching_id(const char *id, pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 GList *pcmk__colocated_resources(pe_resource_t *rsc, pe_resource_t *orig_rsc,
                                  GList *colocated_rscs);
 
 G_GNUC_INTERNAL
 void pcmk__output_resource_actions(pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 bool pcmk__assign_primitive(pe_resource_t *rsc, pe_node_t *chosen, bool force);
 
 G_GNUC_INTERNAL
 bool pcmk__assign_resource(pe_resource_t *rsc, pe_node_t *node, bool force);
 
 G_GNUC_INTERNAL
 void pcmk__unassign_resource(pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 bool pcmk__threshold_reached(pe_resource_t *rsc, pe_node_t *node,
                              pe_resource_t **failed);
 
 G_GNUC_INTERNAL
 void pcmk__sort_resources(pe_working_set_t *data_set);
 
+G_GNUC_INTERNAL
+gint pcmk__cmp_instance(gconstpointer a, gconstpointer b);
+
+G_GNUC_INTERNAL
+gint pcmk__cmp_instance_number(gconstpointer a, gconstpointer b);
+
 
 // Functions related to probes (pcmk_sched_probes.c)
 
 G_GNUC_INTERNAL
 void pcmk__order_probes(pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 void pcmk__schedule_probes(pe_working_set_t *data_set);
 
 
 // Functions related to node utilization (pcmk_sched_utilization.c)
 
 G_GNUC_INTERNAL
 int pcmk__compare_node_capacities(const pe_node_t *node1,
                                   const pe_node_t *node2);
 
 G_GNUC_INTERNAL
 void pcmk__consume_node_capacity(GHashTable *current_utilization,
                                  pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__release_node_capacity(GHashTable *current_utilization,
                                  pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
-void pcmk__ban_insufficient_capacity(pe_resource_t *rsc, pe_node_t **prefer,
-                                     pe_working_set_t *data_set);
+void pcmk__ban_insufficient_capacity(pe_resource_t *rsc, pe_node_t **prefer);
 
 G_GNUC_INTERNAL
 void pcmk__create_utilization_constraints(pe_resource_t *rsc,
                                           GList *allowed_nodes);
 
 G_GNUC_INTERNAL
 void pcmk__show_node_capacities(const char *desc, pe_working_set_t *data_set);
 
 #endif // PCMK__LIBPACEMAKER_PRIVATE__H
diff --git a/lib/pacemaker/pcmk_graph_producer.c b/lib/pacemaker/pcmk_graph_producer.c
index c63e15bc0e..9f4d85a4f0 100644
--- a/lib/pacemaker/pcmk_graph_producer.c
+++ b/lib/pacemaker/pcmk_graph_producer.c
@@ -1,1089 +1,1089 @@
 /*
- * Copyright 2004-2021 the Pacemaker project contributors
+ * Copyright 2004-2022 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <sys/param.h>
 #include <crm/crm.h>
 #include <crm/cib.h>
 #include <crm/msg_xml.h>
 #include <crm/common/xml.h>
 
 #include <glib.h>
 
 #include <pacemaker-internal.h>
 
 #include "libpacemaker_private.h"
 
 // Convenience macros for logging action properties
 
 #define action_type_str(flags) \
     (pcmk_is_set((flags), pe_action_pseudo)? "pseudo-action" : "action")
 
 #define action_optional_str(flags) \
     (pcmk_is_set((flags), pe_action_optional)? "optional" : "required")
 
 #define action_runnable_str(flags) \
     (pcmk_is_set((flags), pe_action_runnable)? "runnable" : "unrunnable")
 
 #define action_node_str(a) \
     (((a)->node == NULL)? "no node" : (a)->node->details->uname)
 
 /*!
  * \internal
  * \brief Add an XML node tag for a specified ID
  *
  * \param[in]     id      Node UUID to add
  * \param[in,out] xml     Parent XML tag to add to
  */
 static xmlNode*
 add_node_to_xml_by_id(const char *id, xmlNode *xml)
 {
     xmlNode *node_xml;
 
     node_xml = create_xml_node(xml, XML_CIB_TAG_NODE);
     crm_xml_add(node_xml, XML_ATTR_UUID, id);
 
     return node_xml;
 }
 
 /*!
  * \internal
  * \brief Add an XML node tag for a specified node
  *
  * \param[in]     node  Node to add
  * \param[in,out] xml   XML to add node to
  */
 static void
 add_node_to_xml(const pe_node_t *node, void *xml)
 {
     add_node_to_xml_by_id(node->details->id, (xmlNode *) xml);
 }
 
 /*!
  * \internal
  * \brief Add XML with nodes that need an update of their maintenance state
  *
  * \param[in,out] xml       Parent XML tag to add to
  * \param[in]     data_set  Working set for cluster
  */
 static int
 add_maintenance_nodes(xmlNode *xml, const pe_working_set_t *data_set)
 {
     GList *gIter = NULL;
     xmlNode *maintenance =
         xml?create_xml_node(xml, XML_GRAPH_TAG_MAINTENANCE):NULL;
     int count = 0;
 
     for (gIter = data_set->nodes; gIter != NULL;
          gIter = gIter->next) {
         pe_node_t *node = (pe_node_t *) gIter->data;
         struct pe_node_shared_s *details = node->details;
 
         if (!pe__is_guest_or_remote_node(node)) {
             continue; /* just remote nodes need to know atm */
         }
 
         if (details->maintenance != details->remote_maintenance) {
             if (maintenance) {
                 crm_xml_add(
                     add_node_to_xml_by_id(node->details->id, maintenance),
                     XML_NODE_IS_MAINTENANCE, details->maintenance?"1":"0");
             }
             count++;
         }
     }
     crm_trace("%s %d nodes to adjust maintenance-mode "
               "to transition", maintenance?"Added":"Counted", count);
     return count;
 }
 
 /*!
  * \internal
  * \brief Add pseudo action with nodes needing maintenance state update
  *
  * \param[in,out] data_set  Working set for cluster
  */
 static void
 add_maintenance_update(pe_working_set_t *data_set)
 {
     pe_action_t *action = NULL;
 
     if (add_maintenance_nodes(NULL, data_set)) {
         crm_trace("adding maintenance state update pseudo action");
         action = get_pseudo_op(CRM_OP_MAINTENANCE_NODES, data_set);
         pe__set_action_flags(action, pe_action_print_always);
     }
 }
 
 /*!
  * \internal
  * \brief Add XML with nodes that an action is expected to bring down
  *
  * If a specified action is expected to bring any nodes down, add an XML block
  * with their UUIDs. When a node is lost, this allows the controller to
  * determine whether it was expected.
  *
  * \param[in,out] xml       Parent XML tag to add to
  * \param[in]     action    Action to check for downed nodes
  * \param[in]     data_set  Working set for cluster
  */
 static void
 add_downed_nodes(xmlNode *xml, const pe_action_t *action,
                  const pe_working_set_t *data_set)
 {
     CRM_CHECK(xml && action && action->node && data_set, return);
 
     if (pcmk__str_eq(action->task, CRM_OP_SHUTDOWN, pcmk__str_casei)) {
 
         /* Shutdown makes the action's node down */
         xmlNode *downed = create_xml_node(xml, XML_GRAPH_TAG_DOWNED);
         add_node_to_xml_by_id(action->node->details->id, downed);
 
     } else if (pcmk__str_eq(action->task, CRM_OP_FENCE, pcmk__str_casei)) {
 
         /* Fencing makes the action's node and any hosted guest nodes down */
         const char *fence = g_hash_table_lookup(action->meta, "stonith_action");
 
         if (pcmk__is_fencing_action(fence)) {
             xmlNode *downed = create_xml_node(xml, XML_GRAPH_TAG_DOWNED);
             add_node_to_xml_by_id(action->node->details->id, downed);
             pe_foreach_guest_node(data_set, action->node, add_node_to_xml, downed);
         }
 
     } else if (action->rsc && action->rsc->is_remote_node
                && pcmk__str_eq(action->task, CRMD_ACTION_STOP, pcmk__str_casei)) {
 
         /* Stopping a remote connection resource makes connected node down,
          * unless it's part of a migration
          */
         GList *iter;
         pe_action_t *input;
         gboolean migrating = FALSE;
 
         for (iter = action->actions_before; iter != NULL; iter = iter->next) {
             input = ((pe_action_wrapper_t *) iter->data)->action;
             if (input->rsc && pcmk__str_eq(action->rsc->id, input->rsc->id, pcmk__str_casei)
                 && pcmk__str_eq(input->task, CRMD_ACTION_MIGRATED, pcmk__str_casei)) {
                 migrating = TRUE;
                 break;
             }
         }
         if (!migrating) {
             xmlNode *downed = create_xml_node(xml, XML_GRAPH_TAG_DOWNED);
             add_node_to_xml_by_id(action->rsc->id, downed);
         }
     }
 }
 
 /*!
  * \internal
  * \brief Create a transition graph operation key for a clone action
  *
  * \param[in] action       Clone action
  * \param[in] interval_ms  Action interval in milliseconds
  *
  * \return Newly allocated string with transition graph operation key
  */
 static char *
 clone_op_key(pe_action_t *action, guint interval_ms)
 {
     if (pcmk__str_eq(action->task, RSC_NOTIFY, pcmk__str_none)) {
         const char *n_type = g_hash_table_lookup(action->meta, "notify_type");
         const char *n_task = g_hash_table_lookup(action->meta,
                                                  "notify_operation");
 
         CRM_LOG_ASSERT((n_type != NULL) && (n_task != NULL));
         return pcmk__notify_key(action->rsc->clone_name, n_type, n_task);
 
     } else if (action->cancel_task != NULL) {
         return pcmk__op_key(action->rsc->clone_name, action->cancel_task,
                             interval_ms);
     } else {
         return pcmk__op_key(action->rsc->clone_name, action->task, interval_ms);
     }
 }
 
 /*!
  * \internal
  * \brief Add node details to transition graph action XML
  *
  * \param[in] action  Scheduled action
  * \param[in] xml     Transition graph action XML for \p action
  */
 static void
 add_node_details(pe_action_t *action, xmlNode *xml)
 {
     pe_node_t *router_node = pcmk__connection_host_for_action(action);
 
     crm_xml_add(xml, XML_LRM_ATTR_TARGET, action->node->details->uname);
     crm_xml_add(xml, XML_LRM_ATTR_TARGET_UUID, action->node->details->id);
     if (router_node != NULL) {
         crm_xml_add(xml, XML_LRM_ATTR_ROUTER_NODE, router_node->details->uname);
     }
 }
 
 /*!
  * \internal
  * \brief Add resource details to transition graph action XML
  *
  * \param[in] action      Scheduled action
  * \param[in] action_xml  Transition graph action XML for \p action
  */
 static void
 add_resource_details(pe_action_t *action, xmlNode *action_xml)
 {
     xmlNode *rsc_xml = NULL;
     const char *attr_list[] = {
         XML_AGENT_ATTR_CLASS,
         XML_AGENT_ATTR_PROVIDER,
         XML_ATTR_TYPE
     };
 
     /* If a resource is locked to a node via shutdown-lock, mark its actions
      * so the controller can preserve the lock when the action completes.
      */
     if (pcmk__action_locks_rsc_to_node(action)) {
         crm_xml_add_ll(action_xml, XML_CONFIG_ATTR_SHUTDOWN_LOCK,
                        (long long) action->rsc->lock_time);
     }
 
     // List affected resource
 
     rsc_xml = create_xml_node(action_xml, crm_element_name(action->rsc->xml));
     if (pcmk_is_set(action->rsc->flags, pe_rsc_orphan)
         && (action->rsc->clone_name != NULL)) {
         /* Use the numbered instance name here, because if there is more
          * than one instance on a node, we need to make sure the command
          * goes to the right one.
          *
          * This is important even for anonymous clones, because the clone's
          * unique meta-attribute might have just been toggled from on to
          * off.
          */
         crm_debug("Using orphan clone name %s instead of %s",
                   action->rsc->id, action->rsc->clone_name);
         crm_xml_add(rsc_xml, XML_ATTR_ID, action->rsc->clone_name);
         crm_xml_add(rsc_xml, XML_ATTR_ID_LONG, action->rsc->id);
 
     } else if (!pcmk_is_set(action->rsc->flags, pe_rsc_unique)) {
         const char *xml_id = ID(action->rsc->xml);
 
         crm_debug("Using anonymous clone name %s for %s (aka %s)",
                   xml_id, action->rsc->id, action->rsc->clone_name);
 
         /* ID is what we'd like client to use
          * ID_LONG is what they might know it as instead
          *
          * ID_LONG is only strictly needed /here/ during the
          * transition period until all nodes in the cluster
          * are running the new software /and/ have rebooted
          * once (meaning that they've only ever spoken to a DC
          * supporting this feature).
          *
          * If anyone toggles the unique flag to 'on', the
          * 'instance free' name will correspond to an orphan
          * and fall into the clause above instead
          */
         crm_xml_add(rsc_xml, XML_ATTR_ID, xml_id);
         if ((action->rsc->clone_name != NULL)
             && !pcmk__str_eq(xml_id, action->rsc->clone_name,
                              pcmk__str_none)) {
             crm_xml_add(rsc_xml, XML_ATTR_ID_LONG, action->rsc->clone_name);
         } else {
             crm_xml_add(rsc_xml, XML_ATTR_ID_LONG, action->rsc->id);
         }
 
     } else {
         CRM_ASSERT(action->rsc->clone_name == NULL);
         crm_xml_add(rsc_xml, XML_ATTR_ID, action->rsc->id);
     }
 
     for (int lpc = 0; lpc < PCMK__NELEM(attr_list); lpc++) {
         crm_xml_add(rsc_xml, attr_list[lpc],
                     g_hash_table_lookup(action->rsc->meta, attr_list[lpc]));
     }
 }
 
 /*!
  * \internal
  * \brief Add action attributes to transition graph action XML
  *
  * \param[in] action  Scheduled action
  * \param[in] action_xml  Transition graph action XML for \p action
  */
 static void
 add_action_attributes(pe_action_t *action, xmlNode *action_xml)
 {
     xmlNode *args_xml = NULL;
 
     /* We create free-standing XML to start, so we can sort the attributes
      * before adding it to action_xml, which keeps the scheduler regression
      * test graphs comparable.
      */
     args_xml = create_xml_node(NULL, XML_TAG_ATTRS);
 
     crm_xml_add(args_xml, XML_ATTR_CRM_VERSION, CRM_FEATURE_SET);
     g_hash_table_foreach(action->extra, hash2field, args_xml);
 
     if ((action->rsc != NULL) && (action->node != NULL)) {
         // Get the resource instance attributes, evaluated properly for node
         GHashTable *params = pe_rsc_params(action->rsc, action->node,
                                            action->rsc->cluster);
 
-        pcmk__substitute_remote_addr(action->rsc, params, action->rsc->cluster);
+        pcmk__substitute_remote_addr(action->rsc, params);
 
         g_hash_table_foreach(params, hash2smartfield, args_xml);
 
 #if ENABLE_VERSIONED_ATTRS
         {
             xmlNode *versioned_parameters = create_xml_node(NULL, XML_TAG_RSC_VER_ATTRS);
 
             pe_get_versioned_attributes(versioned_parameters, action->rsc,
                                         action->node, action->rsc->cluster);
             if (xml_has_children(versioned_parameters)) {
                 add_node_copy(action_xml, versioned_parameters);
             }
             free_xml(versioned_parameters);
         }
 #endif
 
     } else if ((action->rsc != NULL) && (action->rsc->variant <= pe_native)) {
         GHashTable *params = pe_rsc_params(action->rsc, NULL,
                                            action->rsc->cluster);
 
         g_hash_table_foreach(params, hash2smartfield, args_xml);
 
 #if ENABLE_VERSIONED_ATTRS
         if (xml_has_children(action->rsc->versioned_parameters)) {
             add_node_copy(action_xml, action->rsc->versioned_parameters);
         }
 #endif
     }
 
 #if ENABLE_VERSIONED_ATTRS
     if (rsc_details != NULL) {
         if (xml_has_children(rsc_details->versioned_parameters)) {
             add_node_copy(action_xml, rsc_details->versioned_parameters);
         }
         if (xml_has_children(rsc_details->versioned_meta)) {
             add_node_copy(action_xml, rsc_details->versioned_meta);
         }
     }
 #endif
 
     g_hash_table_foreach(action->meta, hash2metafield, args_xml);
     if (action->rsc != NULL) {
         const char *value = g_hash_table_lookup(action->rsc->meta,
                                                 "external-ip");
         pe_resource_t *parent = action->rsc;
 
         while (parent != NULL) {
             parent->cmds->append_meta(parent, args_xml);
             parent = parent->parent;
         }
 
         if (value != NULL) {
             hash2smartfield((gpointer) "pcmk_external_ip", (gpointer) value,
                             (gpointer) args_xml);
         }
 
         pcmk__add_bundle_meta_to_xml(args_xml, action);
 
     } else if (pcmk__str_eq(action->task, CRM_OP_FENCE, pcmk__str_none)
                && (action->node != NULL)) {
         /* Pass the node's attributes as meta-attributes.
          *
          * @TODO: Determine whether it is still necessary to do this. It was
          * added in 33d99707, probably for the libfence-based implementation in
          * c9a90bd, which is no longer used.
          */
         g_hash_table_foreach(action->node->details->attrs, hash2metafield, args_xml);
     }
 
     sorted_xml(args_xml, action_xml, FALSE);
     free_xml(args_xml);
 }
 
 /*!
  * \internal
  * \brief Create the transition graph XML for a scheduled action
  *
  * \param[in] parent        Parent XML element to add action to
  * \param[in] action        Scheduled action
  * \param[in] skip_details  If false, add action details as sub-elements
  * \param[in] data_set      Cluster working set
  */
 static void
 create_graph_action(xmlNode *parent, pe_action_t *action, bool skip_details,
                     pe_working_set_t *data_set)
 {
     bool needs_node_info = true;
     bool needs_maintenance_info = false;
     xmlNode *action_xml = NULL;
 #if ENABLE_VERSIONED_ATTRS
     pe_rsc_action_details_t *rsc_details = NULL;
 #endif
 
     if ((action == NULL) || (data_set == NULL)) {
         return;
     }
 
     // Create the top-level element based on task
 
     if (pcmk__str_eq(action->task, CRM_OP_FENCE, pcmk__str_casei)) {
         /* All fences need node info; guest node fences are pseudo-events */
         action_xml = create_xml_node(parent,
                                      pcmk_is_set(action->flags, pe_action_pseudo)?
                                      XML_GRAPH_TAG_PSEUDO_EVENT :
                                      XML_GRAPH_TAG_CRM_EVENT);
 
     } else if (pcmk__str_any_of(action->task,
                                 CRM_OP_SHUTDOWN,
                                 CRM_OP_CLEAR_FAILCOUNT,
                                 CRM_OP_LRM_REFRESH, NULL)) {
         action_xml = create_xml_node(parent, XML_GRAPH_TAG_CRM_EVENT);
 
     } else if (pcmk__str_eq(action->task, CRM_OP_LRM_DELETE, pcmk__str_none)) {
         // CIB-only clean-up for shutdown locks
         action_xml = create_xml_node(parent, XML_GRAPH_TAG_CRM_EVENT);
         crm_xml_add(action_xml, PCMK__XA_MODE, XML_TAG_CIB);
 
     } else if (pcmk_is_set(action->flags, pe_action_pseudo)) {
         if (pcmk__str_eq(action->task, CRM_OP_MAINTENANCE_NODES,
                          pcmk__str_none)) {
             needs_maintenance_info = true;
         }
         action_xml = create_xml_node(parent, XML_GRAPH_TAG_PSEUDO_EVENT);
         needs_node_info = false;
 
     } else {
         action_xml = create_xml_node(parent, XML_GRAPH_TAG_RSC_OP);
 #if ENABLE_VERSIONED_ATTRS
         rsc_details = pe_rsc_action_details(action);
 #endif
     }
 
     crm_xml_add_int(action_xml, XML_ATTR_ID, action->id);
     crm_xml_add(action_xml, XML_LRM_ATTR_TASK, action->task);
 
     if ((action->rsc != NULL) && (action->rsc->clone_name != NULL)) {
         char *clone_key = NULL;
         guint interval_ms;
 
         if (pcmk__guint_from_hash(action->meta, XML_LRM_ATTR_INTERVAL_MS, 0,
                                   &interval_ms) != pcmk_rc_ok) {
             interval_ms = 0;
         }
         clone_key = clone_op_key(action, interval_ms);
         crm_xml_add(action_xml, XML_LRM_ATTR_TASK_KEY, clone_key);
         crm_xml_add(action_xml, "internal_" XML_LRM_ATTR_TASK_KEY, action->uuid);
         free(clone_key);
     } else {
         crm_xml_add(action_xml, XML_LRM_ATTR_TASK_KEY, action->uuid);
     }
 
     if (needs_node_info && (action->node != NULL)) {
         add_node_details(action, action_xml);
         g_hash_table_insert(action->meta, strdup(XML_LRM_ATTR_TARGET),
                             strdup(action->node->details->uname));
         g_hash_table_insert(action->meta, strdup(XML_LRM_ATTR_TARGET_UUID),
                             strdup(action->node->details->id));
     }
 
     if (skip_details) {
         return;
     }
 
     if ((action->rsc != NULL)
         && !pcmk_is_set(action->flags, pe_action_pseudo)) {
 
         // This is a real resource action, so add resource details
         add_resource_details(action, action_xml);
     }
 
     /* List any attributes in effect */
     add_action_attributes(action, action_xml);
 
     /* List any nodes this action is expected to make down */
     if (needs_node_info && (action->node != NULL)) {
         add_downed_nodes(action_xml, action, data_set);
     }
 
     if (needs_maintenance_info) {
         add_maintenance_nodes(action_xml, data_set);
     }
 }
 
 /*!
  * \internal
  * \brief Check whether an action should be added to the transition graph
  *
  * \param[in] action  Action to check
  *
  * \return true if action should be added to graph, otherwise false
  */
 static bool
 should_add_action_to_graph(pe_action_t *action)
 {
     if (!pcmk_is_set(action->flags, pe_action_runnable)) {
         crm_trace("Ignoring action %s (%d): unrunnable",
                   action->uuid, action->id);
         return false;
     }
 
     if (pcmk_is_set(action->flags, pe_action_optional)
         && !pcmk_is_set(action->flags, pe_action_print_always)) {
         crm_trace("Ignoring action %s (%d): optional",
                   action->uuid, action->id);
         return false;
     }
 
     /* Actions for unmanaged resources should be excluded from the graph,
      * with the exception of monitors and cancellation of recurring monitors.
      */
     if ((action->rsc != NULL)
         && !pcmk_is_set(action->rsc->flags, pe_rsc_managed)
         && !pcmk__str_eq(action->task, RSC_STATUS, pcmk__str_none)) {
         const char *interval_ms_s;
 
         /* A cancellation of a recurring monitor will get here because the task
          * is cancel rather than monitor, but the interval can still be used to
          * recognize it. The interval has been normalized to milliseconds by
          * this point, so a string comparison is sufficient.
          */
         interval_ms_s = g_hash_table_lookup(action->meta,
                                             XML_LRM_ATTR_INTERVAL_MS);
         if (pcmk__str_eq(interval_ms_s, "0", pcmk__str_null_matches)) {
             crm_trace("Ignoring action %s (%d): for unmanaged resource (%s)",
                       action->uuid, action->id, action->rsc->id);
             return false;
         }
     }
 
     /* Always add pseudo-actions, fence actions, and shutdown actions (already
      * determined to be required and runnable by this point)
      */
     if (pcmk_is_set(action->flags, pe_action_pseudo)
         || pcmk__strcase_any_of(action->task, CRM_OP_FENCE, CRM_OP_SHUTDOWN,
                                 NULL)) {
         return true;
     }
 
     if (action->node == NULL) {
         pe_err("Skipping action %s (%d) "
                "because it was not allocated to a node (bug?)",
                action->uuid, action->id);
         pcmk__log_action("Unallocated", action, false);
         return false;
     }
 
     if (pcmk_is_set(action->flags, pe_action_dc)) {
         crm_trace("Action %s (%d) should be dumped: "
                   "can run on DC instead of %s",
                   action->uuid, action->id, action->node->details->uname);
 
     } else if (pe__is_guest_node(action->node)
                && !action->node->details->remote_requires_reset) {
         crm_trace("Action %s (%d) should be dumped: "
                   "assuming will be runnable on guest node %s",
                   action->uuid, action->id, action->node->details->uname);
 
     } else if (!action->node->details->online) {
         pe_err("Skipping action %s (%d) "
                "because it was scheduled for offline node (bug?)",
                action->uuid, action->id);
         pcmk__log_action("Offline node", action, false);
         return false;
 
     } else if (action->node->details->unclean) {
         pe_err("Skipping action %s (%d) "
                "because it was scheduled for unclean node (bug?)",
                action->uuid, action->id);
         pcmk__log_action("Unclean node", action, false);
         return false;
     }
     return true;
 }
 
 /*!
  * \internal
  * \brief Check whether an ordering's flags can change an action
  *
  * \param[in] ordering  Ordering to check
  *
  * \return true if ordering has flags that can change an action, false otherwise
  */
 static bool
 ordering_can_change_actions(pe_action_wrapper_t *ordering)
 {
     return pcmk_any_flags_set(ordering->type, ~(pe_order_implies_first_printed
                                                 |pe_order_implies_then_printed
                                                 |pe_order_optional));
 }
 
 /*!
  * \internal
  * \brief Check whether an action input should be in the transition graph
  *
  * \param[in]     action  Action to check
  * \param[in,out] input   Action input to check
  *
  * \return true if input should be in graph, false otherwise
  * \note This function may not only check an input, but disable it under certian
  *       circumstances (load or anti-colocation orderings that are not needed).
  */
 static bool
 should_add_input_to_graph(pe_action_t *action, pe_action_wrapper_t *input)
 {
     if (input->state == pe_link_dumped) {
         return true;
     }
 
     if (input->type == pe_order_none) {
         crm_trace("Ignoring %s (%d) input %s (%d): "
                   "ordering disabled",
                   action->uuid, action->id,
                   input->action->uuid, input->action->id);
         return false;
 
     } else if (!pcmk_is_set(input->action->flags, pe_action_runnable)
                && !ordering_can_change_actions(input)) {
         crm_trace("Ignoring %s (%d) input %s (%d): "
                   "optional and input unrunnable",
                   action->uuid, action->id,
                   input->action->uuid, input->action->id);
         return false;
 
     } else if (!pcmk_is_set(input->action->flags, pe_action_runnable)
                && pcmk_is_set(input->type, pe_order_one_or_more)) {
         crm_trace("Ignoring %s (%d) input %s (%d): "
                   "one-or-more and input unrunnable",
                   action->uuid, action->id,
                   input->action->uuid, input->action->id);
         return false;
 
     } else if (pcmk_is_set(input->type, pe_order_implies_first_migratable)
                && !pcmk_is_set(input->action->flags, pe_action_runnable)) {
         crm_trace("Ignoring %s (%d) input %s (%d): "
                   "implies input migratable but input unrunnable",
                   action->uuid, action->id,
                   input->action->uuid, input->action->id);
         return false;
 
     } else if (pcmk_is_set(input->type, pe_order_apply_first_non_migratable)
                && pcmk_is_set(input->action->flags, pe_action_migrate_runnable)) {
         crm_trace("Ignoring %s (%d) input %s (%d): "
                   "only if input unmigratable but input unrunnable",
                   action->uuid, action->id,
                   input->action->uuid, input->action->id);
         return false;
 
     } else if ((input->type == pe_order_optional)
                && pcmk_is_set(input->action->flags, pe_action_migrate_runnable)
                && pcmk__ends_with(input->action->uuid, "_stop_0")) {
         crm_trace("Ignoring %s (%d) input %s (%d): "
                   "optional but stop in migration",
                   action->uuid, action->id,
                   input->action->uuid, input->action->id);
         return false;
 
     } else if (input->type == pe_order_load) {
         pe_node_t *input_node = input->action->node;
 
         // load orderings are relevant only if actions are for same node
 
         if (action->rsc && pcmk__str_eq(action->task, RSC_MIGRATE, pcmk__str_casei)) {
             pe_node_t *allocated = action->rsc->allocated_to;
 
             /* For load_stopped -> migrate_to orderings, we care about where it
              * has been allocated to, not where it will be executed.
              */
             if ((input_node == NULL) || (allocated == NULL)
                 || (input_node->details != allocated->details)) {
                 crm_trace("Ignoring %s (%d) input %s (%d): "
                           "load ordering node mismatch %s vs %s",
                           action->uuid, action->id,
                           input->action->uuid, input->action->id,
                           (allocated? allocated->details->uname : "<none>"),
                           (input_node? input_node->details->uname : "<none>"));
                 input->type = pe_order_none;
                 return false;
             }
 
         } else if ((input_node == NULL) || (action->node == NULL)
                    || (input_node->details != action->node->details)) {
             crm_trace("Ignoring %s (%d) input %s (%d): "
                       "load ordering node mismatch %s vs %s",
                       action->uuid, action->id,
                       input->action->uuid, input->action->id,
                       (action->node? action->node->details->uname : "<none>"),
                       (input_node? input_node->details->uname : "<none>"));
             input->type = pe_order_none;
             return false;
 
         } else if (pcmk_is_set(input->action->flags, pe_action_optional)) {
             crm_trace("Ignoring %s (%d) input %s (%d): "
                       "load ordering input optional",
                       action->uuid, action->id,
                       input->action->uuid, input->action->id);
             input->type = pe_order_none;
             return false;
         }
 
     } else if (input->type == pe_order_anti_colocation) {
         if (input->action->node && action->node
             && (input->action->node->details != action->node->details)) {
             crm_trace("Ignoring %s (%d) input %s (%d): "
                       "anti-colocation node mismatch %s vs %s",
                       action->uuid, action->id,
                       input->action->uuid, input->action->id,
                       action->node->details->uname,
                       input->action->node->details->uname);
             input->type = pe_order_none;
             return false;
 
         } else if (pcmk_is_set(input->action->flags, pe_action_optional)) {
             crm_trace("Ignoring %s (%d) input %s (%d): "
                       "anti-colocation input optional",
                       action->uuid, action->id,
                       input->action->uuid, input->action->id);
             input->type = pe_order_none;
             return false;
         }
 
     } else if (input->action->rsc
                && input->action->rsc != action->rsc
                && pcmk_is_set(input->action->rsc->flags, pe_rsc_failed)
                && !pcmk_is_set(input->action->rsc->flags, pe_rsc_managed)
                && pcmk__ends_with(input->action->uuid, "_stop_0")
                && action->rsc && pe_rsc_is_clone(action->rsc)) {
         crm_warn("Ignoring requirement that %s complete before %s:"
                  " unmanaged failed resources cannot prevent clone shutdown",
                  input->action->uuid, action->uuid);
         return false;
 
     } else if (pcmk_is_set(input->action->flags, pe_action_optional)
                && !pcmk_any_flags_set(input->action->flags,
                                       pe_action_print_always|pe_action_dumped)
                && !should_add_action_to_graph(input->action)) {
         crm_trace("Ignoring %s (%d) input %s (%d): "
                   "input optional",
                   action->uuid, action->id,
                   input->action->uuid, input->action->id);
         return false;
     }
 
     crm_trace("%s (%d) input %s %s (%d) on %s should be dumped: %s %s %#.6x",
               action->uuid, action->id, action_type_str(input->action->flags),
               input->action->uuid, input->action->id,
               action_node_str(input->action),
               action_runnable_str(input->action->flags),
               action_optional_str(input->action->flags), input->type);
     return true;
 }
 
 /*!
  * \internal
  * \brief Check whether an ordering creates an ordering loop
  *
  * \param[in] init_action  "First" action in ordering
  * \param[in] action       Callers should always set this the same as
  *                         \p init_action (this function may use a different
  *                         value for recursive calls)
  * \param[in] input        Action wrapper for "then" action in ordering
  *
  * \return true if the ordering creates a loop, otherwise false
  */
 bool
 pcmk__graph_has_loop(pe_action_t *init_action, pe_action_t *action,
                      pe_action_wrapper_t *input)
 {
     bool has_loop = false;
 
     if (pcmk_is_set(input->action->flags, pe_action_tracking)) {
         crm_trace("Breaking tracking loop: %s@%s -> %s@%s (%#.6x)",
                   input->action->uuid,
                   input->action->node? input->action->node->details->uname : "",
                   action->uuid,
                   action->node? action->node->details->uname : "",
                   input->type);
         return false;
     }
 
     // Don't need to check inputs that won't be used
     if (!should_add_input_to_graph(action, input)) {
         return false;
     }
 
     if (input->action == init_action) {
         crm_debug("Input loop found in %s@%s ->...-> %s@%s",
                   action->uuid,
                   action->node? action->node->details->uname : "",
                   init_action->uuid,
                   init_action->node? init_action->node->details->uname : "");
         return true;
     }
 
     pe__set_action_flags(input->action, pe_action_tracking);
 
     crm_trace("Checking inputs of action %s@%s input %s@%s (%#.6x)"
               "for graph loop with %s@%s ",
               action->uuid,
               action->node? action->node->details->uname : "",
               input->action->uuid,
               input->action->node? input->action->node->details->uname : "",
               input->type,
               init_action->uuid,
               init_action->node? init_action->node->details->uname : "");
 
     // Recursively check input itself for loops
     for (GList *iter = input->action->actions_before;
          iter != NULL; iter = iter->next) {
 
         if (pcmk__graph_has_loop(init_action, input->action,
                                  (pe_action_wrapper_t *) iter->data)) {
             // Recursive call already logged a debug message
             has_loop = true;
             break;
         }
     }
 
     pe__clear_action_flags(input->action, pe_action_tracking);
 
     if (!has_loop) {
         crm_trace("No input loop found in %s@%s -> %s@%s (%#.6x)",
                   input->action->uuid,
                   input->action->node? input->action->node->details->uname : "",
                   action->uuid,
                   action->node? action->node->details->uname : "",
                   input->type);
     }
     return has_loop;
 }
 
 /*!
  * \internal
  * \brief Create a synapse XML element for a transition graph
  *
  * \param[in] action    Action that synapse is for
  * \param[in] data_set  Cluster working set containing graph
  *
  * \return Newly added XML element for new graph synapse
  */
 static xmlNode *
 create_graph_synapse(pe_action_t *action, pe_working_set_t *data_set)
 {
     int synapse_priority = 0;
     xmlNode *syn = create_xml_node(data_set->graph, "synapse");
 
     crm_xml_add_int(syn, XML_ATTR_ID, data_set->num_synapse);
     data_set->num_synapse++;
 
     if (action->rsc != NULL) {
         synapse_priority = action->rsc->priority;
     }
     if (action->priority > synapse_priority) {
         synapse_priority = action->priority;
     }
     if (synapse_priority > 0) {
         crm_xml_add_int(syn, XML_CIB_ATTR_PRIORITY, synapse_priority);
     }
     return syn;
 }
 
 /*!
  * \internal
  * \brief Add an action to the transition graph XML if appropriate
  *
  * \param[in] action    Action to possibly add
  * \param[in] data_set  Cluster working set
  *
  * \note This will de-duplicate the action inputs, meaning that the
  *       pe_action_wrapper_t:type flags can no longer be relied on to retain
  *       their original settings. That means this MUST be called after
  *       pcmk__apply_orderings() is complete, and nothing after this should rely
  *       on those type flags. (For example, some code looks for type equal to
  *       some flag rather than whether the flag is set, and some code looks for
  *       particular combinations of flags -- such code must be done before
  *       pcmk__create_graph().)
  */
 void
 pcmk__add_action_to_graph(pe_action_t *action, pe_working_set_t *data_set)
 {
     xmlNode *syn = NULL;
     xmlNode *set = NULL;
     xmlNode *in = NULL;
 
     /* If we haven't already, de-duplicate inputs (even if we won't be adding
      * the action to the graph, so that crm_simulate's dot graphs don't have
      * duplicates).
      */
     if (!pcmk_is_set(action->flags, pe_action_dedup)) {
         pcmk__deduplicate_action_inputs(action);
         pe__set_action_flags(action, pe_action_dedup);
     }
 
     if (pcmk_is_set(action->flags, pe_action_dumped)    // Already added, or
         || !should_add_action_to_graph(action)) {       // shouldn't be added
         return;
     }
     pe__set_action_flags(action, pe_action_dumped);
 
     syn = create_graph_synapse(action, data_set);
     set = create_xml_node(syn, "action_set");
     in = create_xml_node(syn, "inputs");
 
     create_graph_action(set, action, false, data_set);
 
     for (GList *lpc = action->actions_before; lpc != NULL; lpc = lpc->next) {
         pe_action_wrapper_t *input = (pe_action_wrapper_t *) lpc->data;
 
         if (should_add_input_to_graph(action, input)) {
             xmlNode *input_xml = create_xml_node(in, "trigger");
 
             input->state = pe_link_dumped;
             create_graph_action(input_xml, input->action, true, data_set);
         }
     }
 }
 
 static int transition_id = -1;
 
 /*!
  * \internal
  * \brief Log a message after calculating a transition
  *
  * \param[in] filename  Where transition input is stored
  */
 void
 pcmk__log_transition_summary(const char *filename)
 {
     if (was_processing_error) {
         crm_err("Calculated transition %d (with errors)%s%s",
                 transition_id,
                 (filename == NULL)? "" : ", saving inputs in ",
                 (filename == NULL)? "" : filename);
 
     } else if (was_processing_warning) {
         crm_warn("Calculated transition %d (with warnings)%s%s",
                  transition_id,
                  (filename == NULL)? "" : ", saving inputs in ",
                  (filename == NULL)? "" : filename);
 
     } else {
         crm_notice("Calculated transition %d%s%s",
                    transition_id,
                    (filename == NULL)? "" : ", saving inputs in ",
                    (filename == NULL)? "" : filename);
     }
     if (crm_config_error) {
         crm_notice("Configuration errors found during scheduler processing,"
                    "  please run \"crm_verify -L\" to identify issues");
     }
 }
 
 /*!
  * \internal
  * \brief Create a transition graph with all cluster actions needed
  *
  * \param[in] data_set  Cluster working set
  */
 void
 pcmk__create_graph(pe_working_set_t *data_set)
 {
     GList *iter = NULL;
     const char *value = NULL;
     long long limit = 0LL;
 
     transition_id++;
     crm_trace("Creating transition graph %d", transition_id);
 
     data_set->graph = create_xml_node(NULL, XML_TAG_GRAPH);
 
     value = pe_pref(data_set->config_hash, "cluster-delay");
     crm_xml_add(data_set->graph, "cluster-delay", value);
 
     value = pe_pref(data_set->config_hash, "stonith-timeout");
     crm_xml_add(data_set->graph, "stonith-timeout", value);
 
     crm_xml_add(data_set->graph, "failed-stop-offset", "INFINITY");
 
     if (pcmk_is_set(data_set->flags, pe_flag_start_failure_fatal)) {
         crm_xml_add(data_set->graph, "failed-start-offset", "INFINITY");
     } else {
         crm_xml_add(data_set->graph, "failed-start-offset", "1");
     }
 
     value = pe_pref(data_set->config_hash, "batch-limit");
     crm_xml_add(data_set->graph, "batch-limit", value);
 
     crm_xml_add_int(data_set->graph, "transition_id", transition_id);
 
     value = pe_pref(data_set->config_hash, "migration-limit");
     if ((pcmk__scan_ll(value, &limit, 0LL) == pcmk_rc_ok) && (limit > 0)) {
         crm_xml_add(data_set->graph, "migration-limit", value);
     }
 
     if (data_set->recheck_by > 0) {
         char *recheck_epoch = NULL;
 
         recheck_epoch = crm_strdup_printf("%llu",
                                           (long long) data_set->recheck_by);
         crm_xml_add(data_set->graph, "recheck-by", recheck_epoch);
         free(recheck_epoch);
     }
 
     /* The following code will de-duplicate action inputs, so nothing past this
      * should rely on the action input type flags retaining their original
      * values.
      */
 
     // Add resource actions to graph
     for (iter = data_set->resources; iter != NULL; iter = iter->next) {
         pe_resource_t *rsc = (pe_resource_t *) iter->data;
 
         pe_rsc_trace(rsc, "Processing actions for %s", rsc->id);
         rsc->cmds->expand(rsc, data_set);
     }
 
     // Add pseudo-action for list of nodes with maintenance state update
     add_maintenance_update(data_set);
 
     // Add non-resource (node) actions
     for (iter = data_set->actions; iter != NULL; iter = iter->next) {
         pe_action_t *action = (pe_action_t *) iter->data;
 
         if ((action->rsc != NULL)
             && (action->node != NULL)
             && action->node->details->shutdown
             && !pcmk_is_set(action->rsc->flags, pe_rsc_maintenance)
             && !pcmk_any_flags_set(action->flags,
                                    pe_action_optional|pe_action_runnable)
             && pcmk__str_eq(action->task, RSC_STOP, pcmk__str_none)) {
             /* Eventually we should just ignore the 'fence' case, but for now
              * it's the best way to detect (in CTS) when CIB resource updates
              * are being lost.
              */
             if (pcmk_is_set(data_set->flags, pe_flag_have_quorum)
                 || (data_set->no_quorum_policy == no_quorum_ignore)) {
                 crm_crit("Cannot %s node '%s' because of %s:%s%s (%s)",
                          action->node->details->unclean? "fence" : "shut down",
                          action->node->details->uname, action->rsc->id,
                          pcmk_is_set(action->rsc->flags, pe_rsc_managed)? " blocked" : " unmanaged",
                          pcmk_is_set(action->rsc->flags, pe_rsc_failed)? " failed" : "",
                          action->uuid);
             }
         }
 
         pcmk__add_action_to_graph(action, data_set);
     }
 
     crm_log_xml_trace(data_set->graph, "graph");
 }
diff --git a/lib/pacemaker/pcmk_resource.c b/lib/pacemaker/pcmk_resource.c
index 2898a696e3..90739165a5 100644
--- a/lib/pacemaker/pcmk_resource.c
+++ b/lib/pacemaker/pcmk_resource.c
@@ -1,140 +1,138 @@
 /*
  * Copyright 2021-2022 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <errno.h>
 #include <glib.h>
 #include <libxml/tree.h>
 
 #include <crm/common/mainloop.h>
 #include <crm/common/results.h>
 #include <crm/common/output_internal.h>
 #include <crm/pengine/internal.h>
 
 #include <pacemaker.h>
 #include <pacemaker-internal.h>
 
 // Search path for resource operation history (takes node name and resource ID)
 #define XPATH_OP_HISTORY "//" XML_CIB_TAG_STATUS                            \
                          "/" XML_CIB_TAG_STATE "[@" XML_ATTR_UNAME "='%s']" \
                          "/" XML_CIB_TAG_LRM "/" XML_LRM_TAG_RESOURCES      \
                          "/" XML_LRM_TAG_RESOURCE "[@" XML_ATTR_ID "='%s']"
 
 static xmlNode *
 best_op(pe_resource_t *rsc, pe_node_t *node, pe_working_set_t *data_set)
 {
     char *xpath = NULL;
     xmlNode *history = NULL;
     xmlNode *best = NULL;
 
     // Find node's resource history
     xpath = crm_strdup_printf(XPATH_OP_HISTORY, node->details->uname, rsc->id);
     history = get_xpath_object(xpath, data_set->input, LOG_NEVER);
     free(xpath);
 
     // Examine each history entry
     for (xmlNode *lrm_rsc_op = first_named_child(history, XML_LRM_TAG_RSC_OP);
          lrm_rsc_op != NULL; lrm_rsc_op = crm_next_same_xml(lrm_rsc_op)) {
 
         const char *digest = crm_element_value(lrm_rsc_op,
                                                XML_LRM_ATTR_RESTART_DIGEST);
         guint interval_ms = 0;
 
         crm_element_value_ms(lrm_rsc_op, XML_LRM_ATTR_INTERVAL, &interval_ms);
 
         if (pcmk__ends_with(ID(lrm_rsc_op), "_last_failure_0")
             || (interval_ms != 0)) {
 
             // Only use last failure or recurring op if nothing else available
             if (best == NULL) {
                 best = lrm_rsc_op;
             }
             continue;
         }
 
         best = lrm_rsc_op;
         if (digest != NULL) {
             // Any non-recurring action with a restart digest is sufficient
             break;
         }
     }
     return best;
 }
 
 /*!
  * \internal
  * \brief Calculate and output resource operation digests
  *
  * \param[in]  out        Output object
  * \param[in]  rsc        Resource to calculate digests for
  * \param[in]  node       Node whose operation history should be used
  * \param[in]  overrides  Hash table of configuration parameters to override
- * \param[in]  data_set   Cluster working set (with status)
  *
  * \return Standard Pacemaker return code
  */
 int
 pcmk__resource_digests(pcmk__output_t *out, pe_resource_t *rsc,
-                       pe_node_t *node, GHashTable *overrides,
-                       pe_working_set_t *data_set)
+                       pe_node_t *node, GHashTable *overrides)
 {
     const char *task = NULL;
     xmlNode *xml_op = NULL;
     op_digest_cache_t *digests = NULL;
     guint interval_ms = 0;
     int rc = pcmk_rc_ok;
 
-    if ((out == NULL) || (rsc == NULL) || (node == NULL) || (data_set == NULL)) {
+    if ((out == NULL) || (rsc == NULL) || (node == NULL)) {
         return EINVAL;
     }
     if (rsc->variant != pe_native) {
         // Only primitives get operation digests
         return EOPNOTSUPP;
     }
 
     // Find XML of operation history to use
-    xml_op = best_op(rsc, node, data_set);
+    xml_op = best_op(rsc, node, rsc->cluster);
 
     // Generate an operation key
     if (xml_op != NULL) {
         task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
         crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms);
     }
     if (task == NULL) { // Assume start if no history is available
         task = RSC_START;
         interval_ms = 0;
     }
 
     // Calculate and show digests
     digests = pe__calculate_digests(rsc, task, &interval_ms, node, xml_op,
-                                    overrides, true, data_set);
+                                    overrides, true, rsc->cluster);
     rc = out->message(out, "digests", rsc, node, task, interval_ms, digests);
 
     pe__free_digests(digests);
     return rc;
 }
 
 int
 pcmk_resource_digests(xmlNodePtr *xml, pe_resource_t *rsc,
                       pe_node_t *node, GHashTable *overrides,
                       pe_working_set_t *data_set)
 {
     pcmk__output_t *out = NULL;
     int rc = pcmk_rc_ok;
 
     rc = pcmk__xml_output_new(&out, xml);
     if (rc != pcmk_rc_ok) {
         return rc;
     }
     pcmk__register_lib_messages(out);
-    rc = pcmk__resource_digests(out, rsc, node, overrides, data_set);
+    rc = pcmk__resource_digests(out, rsc, node, overrides);
     pcmk__xml_output_finish(out, xml);
     return rc;
 }
diff --git a/lib/pacemaker/pcmk_sched_actions.c b/lib/pacemaker/pcmk_sched_actions.c
index d3880094d9..7f98ef377a 100644
--- a/lib/pacemaker/pcmk_sched_actions.c
+++ b/lib/pacemaker/pcmk_sched_actions.c
@@ -1,1761 +1,1761 @@
 /*
  * Copyright 2004-2022 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <stdio.h>
 #include <sys/param.h>
 #include <glib.h>
 
 #include <crm/lrmd_internal.h>
 #include <pacemaker-internal.h>
 #include "libpacemaker_private.h"
 
 extern gboolean DeleteRsc(pe_resource_t *rsc, pe_node_t *node,
                           gboolean optional, pe_working_set_t *data_set);
 
 /*!
  * \internal
  * \brief Get the action flags relevant to ordering constraints
  *
  * \param[in] action  Action to check
  * \param[in] node    Node that *other* action in the ordering is on
  *                    (used only for clone resource actions)
  *
  * \return Action flags that should be used for orderings
  */
 static enum pe_action_flags
 action_flags_for_ordering(pe_action_t *action, pe_node_t *node)
 {
     bool runnable = false;
     enum pe_action_flags flags;
 
     // For non-resource actions, return the action flags
     if (action->rsc == NULL) {
         return action->flags;
     }
 
     /* For non-clone resources, or a clone action not assigned to a node,
      * return the flags as determined by the resource method without a node
      * specified.
      */
     flags = action->rsc->cmds->action_flags(action, NULL);
     if ((node == NULL) || !pe_rsc_is_clone(action->rsc)) {
         return flags;
     }
 
     /* Otherwise (i.e., for clone resource actions on a specific node), first
      * remember whether the non-node-specific action is runnable.
      */
     runnable = pcmk_is_set(flags, pe_action_runnable);
 
     // Then recheck the resource method with the node
     flags = action->rsc->cmds->action_flags(action, node);
 
     /* For clones in ordering constraints, the node-specific "runnable" doesn't
      * matter, just the non-node-specific setting (i.e., is the action runnable
      * anywhere).
      *
      * This applies only to runnable, and only for ordering constraints. This
      * function shouldn't be used for other types of constraints without
      * changes. Not very satisfying, but it's logical and appears to work well.
      */
     if (runnable && !pcmk_is_set(flags, pe_action_runnable)) {
         pe__set_raw_action_flags(flags, action->rsc->id,
                                  pe_action_runnable);
     }
     return flags;
 }
 
 /*!
  * \internal
  * \brief Get action UUID that should be used with a resource ordering
  *
  * When an action is ordered relative to an action for a collective resource
  * (clone, group, or bundle), it actually needs to be ordered after all
  * instances of the collective have completed the relevant action (for example,
  * given "start CLONE then start RSC", RSC must wait until all instances of
  * CLONE have started). Given the UUID and resource of the first action in an
  * ordering, this returns the UUID of the action that should actually be used
  * for ordering (for example, "CLONE_started_0" instead of "CLONE_start_0").
  *
  * \param[in] first_uuid    UUID of first action in ordering
  * \param[in] first_rsc     Resource of first action in ordering
  *
  * \return Newly allocated copy of UUID to use with ordering
  * \note It is the caller's responsibility to free the return value.
  */
 static char *
 action_uuid_for_ordering(const char *first_uuid, pe_resource_t *first_rsc)
 {
     guint interval_ms = 0;
     char *uuid = NULL;
     char *rid = NULL;
     char *first_task_str = NULL;
     enum action_tasks first_task = no_action;
     enum action_tasks remapped_task = no_action;
 
     // Only non-notify actions for collective resources need remapping
     if ((strstr(first_uuid, "notify") != NULL)
         || (first_rsc->variant < pe_group)) {
         goto done;
     }
 
     // Only non-recurring actions need remapping
     CRM_ASSERT(parse_op_key(first_uuid, &rid, &first_task_str, &interval_ms));
     if (interval_ms > 0) {
         goto done;
     }
 
     first_task = text2task(first_task_str);
     switch (first_task) {
         case stop_rsc:
         case start_rsc:
         case action_notify:
         case action_promote:
         case action_demote:
             remapped_task = first_task + 1;
             break;
         case stopped_rsc:
         case started_rsc:
         case action_notified:
         case action_promoted:
         case action_demoted:
             remapped_task = first_task;
             break;
         case monitor_rsc:
         case shutdown_crm:
         case stonith_node:
             break;
         default:
             crm_err("Unknown action '%s' in ordering", first_task_str);
             break;
     }
 
     if (remapped_task != no_action) {
         /* If a (clone) resource has notifications enabled, we want to order
          * relative to when all notifications have been sent for the remapped
          * task. Only outermost resources or those in bundles have
          * notifications.
          */
         if (pcmk_is_set(first_rsc->flags, pe_rsc_notify)
             && ((first_rsc->parent == NULL)
                 || (pe_rsc_is_clone(first_rsc)
                     && (first_rsc->parent->variant == pe_container)))) {
             uuid = pcmk__notify_key(rid, "confirmed-post",
                                     task2text(remapped_task));
         } else {
             uuid = pcmk__op_key(rid, task2text(remapped_task), 0);
         }
         pe_rsc_trace(first_rsc,
                      "Remapped action UUID %s to %s for ordering purposes",
                      first_uuid, uuid);
     }
 
 done:
     if (uuid == NULL) {
         uuid = strdup(first_uuid);
         CRM_ASSERT(uuid != NULL);
     }
     free(first_task_str);
     free(rid);
     return uuid;
 }
 
 /*!
  * \internal
  * \brief Get actual action that should be used with an ordering
  *
  * When an action is ordered relative to an action for a collective resource
  * (clone, group, or bundle), it actually needs to be ordered after all
  * instances of the collective have completed the relevant action (for example,
  * given "start CLONE then start RSC", RSC must wait until all instances of
  * CLONE have started). Given the first action in an ordering, this returns the
  * the action that should actually be used for ordering (for example, the
  * started action instead of the start action).
  *
  * \param[in] action  First action in an ordering
  *
  * \return Actual action that should be used for the ordering
  */
 static pe_action_t *
 action_for_ordering(pe_action_t *action)
 {
     pe_action_t *result = action;
     pe_resource_t *rsc = action->rsc;
 
     if ((rsc != NULL) && (rsc->variant >= pe_group) && (action->uuid != NULL)) {
         char *uuid = action_uuid_for_ordering(action->uuid, rsc);
 
         result = find_first_action(rsc->actions, uuid, NULL, NULL);
         if (result == NULL) {
             crm_warn("Not remapping %s to %s because %s does not have "
                      "remapped action", action->uuid, uuid, rsc->id);
             result = action;
         }
         free(uuid);
     }
     return result;
 }
 
 /*!
  * \internal
  * \brief Update flags for ordering's actions appropriately for ordering's flags
  *
  * \param[in] first        First action in an ordering
  * \param[in] then         Then action in an ordering
  * \param[in] first_flags  Action flags for \p first for ordering purposes
  * \param[in] then_flags   Action flags for \p then for ordering purposes
  * \param[in] order        Action wrapper for \p first in ordering
  * \param[in] data_set     Cluster working set
  *
  * \return Mask of pe_graph_updated_first and/or pe_graph_updated_then
  */
 static enum pe_graph_flags
 update_action_for_ordering_flags(pe_action_t *first, pe_action_t *then,
                                  enum pe_action_flags first_flags,
                                  enum pe_action_flags then_flags,
                                  pe_action_wrapper_t *order,
                                  pe_working_set_t *data_set)
 {
     enum pe_graph_flags changed = pe_graph_none;
 
     /* The node will only be used for clones. If interleaved, node will be NULL,
      * otherwise the ordering scope will be limited to the node. Normally, the
      * whole 'then' clone should restart if 'first' is restarted, so then->node
      * is needed.
      */
     pe_node_t *node = then->node;
 
     if (pcmk_is_set(order->type, pe_order_implies_then_on_node)) {
         /* For unfencing, only instances of 'then' on the same node as 'first'
          * (the unfencing operation) should restart, so reset node to
          * first->node, at which point this case is handled like a normal
          * pe_order_implies_then.
          */
         pe__clear_order_flags(order->type, pe_order_implies_then_on_node);
         pe__set_order_flags(order->type, pe_order_implies_then);
         node = first->node;
         pe_rsc_trace(then->rsc,
                      "%s then %s: mapped pe_order_implies_then_on_node to "
                      "pe_order_implies_then on %s",
                      first->uuid, then->uuid, node->details->uname);
     }
 
     if (pcmk_is_set(order->type, pe_order_implies_then)) {
         if (then->rsc != NULL) {
             changed |= then->rsc->cmds->update_actions(first, then, node,
                                                        first_flags & pe_action_optional,
                                                        pe_action_optional,
                                                        pe_order_implies_then,
                                                        data_set);
         } else if (!pcmk_is_set(first_flags, pe_action_optional)
                    && pcmk_is_set(then->flags, pe_action_optional)) {
             pe__clear_action_flags(then, pe_action_optional);
             pe__set_graph_flags(changed, first, pe_graph_updated_then);
         }
         pe_rsc_trace(then->rsc, "%s then %s: %s after pe_order_implies_then",
                      first->uuid, then->uuid,
                      (changed? "changed" : "unchanged"));
     }
 
     if (pcmk_is_set(order->type, pe_order_restart) && (then->rsc != NULL)) {
         enum pe_action_flags restart = pe_action_optional|pe_action_runnable;
 
         changed |= then->rsc->cmds->update_actions(first, then, node,
                                                    first_flags, restart,
                                                    pe_order_restart, data_set);
         pe_rsc_trace(then->rsc, "%s then %s: %s after pe_order_restart",
                      first->uuid, then->uuid,
                      (changed? "changed" : "unchanged"));
     }
 
     if (pcmk_is_set(order->type, pe_order_implies_first)) {
         if (first->rsc != NULL) {
             changed |= first->rsc->cmds->update_actions(first, then, node,
                                                         first_flags,
                                                         pe_action_optional,
                                                         pe_order_implies_first,
                                                         data_set);
         } else if (!pcmk_is_set(first_flags, pe_action_optional)
                    && pcmk_is_set(first->flags, pe_action_runnable)) {
             pe__clear_action_flags(first, pe_action_runnable);
             pe__set_graph_flags(changed, first, pe_graph_updated_first);
         }
         pe_rsc_trace(then->rsc, "%s then %s: %s after pe_order_implies_first",
                      first->uuid, then->uuid,
                      (changed? "changed" : "unchanged"));
     }
 
     if (pcmk_is_set(order->type, pe_order_promoted_implies_first)) {
         if (then->rsc != NULL) {
             changed |= then->rsc->cmds->update_actions(first, then, node,
                                                        first_flags & pe_action_optional,
                                                        pe_action_optional,
                                                        pe_order_promoted_implies_first,
                                                        data_set);
         }
         pe_rsc_trace(then->rsc,
                      "%s then %s: %s after pe_order_promoted_implies_first",
                      first->uuid, then->uuid,
                      (changed? "changed" : "unchanged"));
     }
 
     if (pcmk_is_set(order->type, pe_order_one_or_more)) {
         if (then->rsc != NULL) {
             changed |= then->rsc->cmds->update_actions(first, then, node,
                                                        first_flags,
                                                        pe_action_runnable,
                                                        pe_order_one_or_more,
                                                        data_set);
 
         } else if (pcmk_is_set(first_flags, pe_action_runnable)) {
             // We have another runnable instance of "first"
             then->runnable_before++;
 
             /* Mark "then" as runnable if it requires a certain number of
              * "before" instances to be runnable, and they now are.
              */
             if ((then->runnable_before >= then->required_runnable_before)
                 && !pcmk_is_set(then->flags, pe_action_runnable)) {
 
                 pe__set_action_flags(then, pe_action_runnable);
                 pe__set_graph_flags(changed, first, pe_graph_updated_then);
             }
         }
         pe_rsc_trace(then->rsc, "%s then %s: %s after pe_order_one_or_more",
                      first->uuid, then->uuid,
                      (changed? "changed" : "unchanged"));
     }
 
     if (pcmk_is_set(order->type, pe_order_probe) && (then->rsc != NULL)) {
         if (!pcmk_is_set(first_flags, pe_action_runnable)
             && (first->rsc->running_on != NULL)) {
 
             pe_rsc_trace(then->rsc,
                          "%s then %s: ignoring because first is stopping",
                          first->uuid, then->uuid);
             order->type = pe_order_none;
         } else {
             changed |= then->rsc->cmds->update_actions(first, then, node,
                                                        first_flags,
                                                        pe_action_runnable,
                                                        pe_order_runnable_left,
                                                        data_set);
         }
         pe_rsc_trace(then->rsc, "%s then %s: %s after pe_order_probe",
                      first->uuid, then->uuid,
                      (changed? "changed" : "unchanged"));
     }
 
     if (pcmk_is_set(order->type, pe_order_runnable_left)) {
         if (then->rsc != NULL) {
             changed |= then->rsc->cmds->update_actions(first, then, node,
                                                        first_flags,
                                                        pe_action_runnable,
                                                        pe_order_runnable_left,
                                                        data_set);
 
         } else if (!pcmk_is_set(first_flags, pe_action_runnable)
                    && pcmk_is_set(then->flags, pe_action_runnable)) {
 
             pe__clear_action_flags(then, pe_action_runnable);
             pe__set_graph_flags(changed, first, pe_graph_updated_then);
         }
         pe_rsc_trace(then->rsc, "%s then %s: %s after pe_order_runnable_left",
                      first->uuid, then->uuid,
                      (changed? "changed" : "unchanged"));
     }
 
     if (pcmk_is_set(order->type, pe_order_implies_first_migratable)) {
         if (then->rsc != NULL) {
             changed |= then->rsc->cmds->update_actions(first, then, node,
                 first_flags, pe_action_optional,
                 pe_order_implies_first_migratable, data_set);
         }
         pe_rsc_trace(then->rsc, "%s then %s: %s after "
                      "pe_order_implies_first_migratable",
                      first->uuid, then->uuid,
                      (changed? "changed" : "unchanged"));
     }
 
     if (pcmk_is_set(order->type, pe_order_pseudo_left)) {
         if (then->rsc != NULL) {
             changed |= then->rsc->cmds->update_actions(first, then, node,
                                                        first_flags,
                                                        pe_action_optional,
                                                        pe_order_pseudo_left,
                                                        data_set);
         }
         pe_rsc_trace(then->rsc, "%s then %s: %s after pe_order_pseudo_left",
                      first->uuid, then->uuid,
                      (changed? "changed" : "unchanged"));
     }
 
     if (pcmk_is_set(order->type, pe_order_optional)) {
         if (then->rsc != NULL) {
             changed |= then->rsc->cmds->update_actions(first, then, node,
                                                        first_flags,
                                                        pe_action_runnable,
                                                        pe_order_optional,
                                                        data_set);
         }
         pe_rsc_trace(then->rsc, "%s then %s: %s after pe_order_optional",
                      first->uuid, then->uuid,
                      (changed? "changed" : "unchanged"));
     }
 
     if (pcmk_is_set(order->type, pe_order_asymmetrical)) {
         if (then->rsc != NULL) {
             changed |= then->rsc->cmds->update_actions(first, then, node,
                                                        first_flags,
                                                        pe_action_runnable,
                                                        pe_order_asymmetrical,
                                                        data_set);
         }
         pe_rsc_trace(then->rsc, "%s then %s: %s after pe_order_asymmetrical",
                      first->uuid, then->uuid,
                      (changed? "changed" : "unchanged"));
     }
 
     if (pcmk_is_set(first->flags, pe_action_runnable)
         && pcmk_is_set(order->type, pe_order_implies_then_printed)
         && !pcmk_is_set(first_flags, pe_action_optional)) {
 
         pe_rsc_trace(then->rsc, "%s will be in graph because %s is required",
                      then->uuid, first->uuid);
         pe__set_action_flags(then, pe_action_print_always);
         // Don't bother marking 'then' as changed just for this
     }
 
     if (pcmk_is_set(order->type, pe_order_implies_first_printed)
         && !pcmk_is_set(then_flags, pe_action_optional)) {
 
         pe_rsc_trace(then->rsc, "%s will be in graph because %s is required",
                      first->uuid, then->uuid);
         pe__set_action_flags(first, pe_action_print_always);
         // Don't bother marking 'first' as changed just for this
     }
 
     if (pcmk_any_flags_set(order->type, pe_order_implies_then
                                         |pe_order_implies_first
                                         |pe_order_restart)
         && (first->rsc != NULL)
         && !pcmk_is_set(first->rsc->flags, pe_rsc_managed)
         && pcmk_is_set(first->rsc->flags, pe_rsc_block)
         && !pcmk_is_set(first->flags, pe_action_runnable)
         && pcmk__str_eq(first->task, RSC_STOP, pcmk__str_casei)) {
 
         if (pcmk_is_set(then->flags, pe_action_runnable)) {
             pe__clear_action_flags(then, pe_action_runnable);
             pe__set_graph_flags(changed, first, pe_graph_updated_then);
         }
         pe_rsc_trace(then->rsc, "%s then %s: %s after checking whether first "
                      "is blocked, unmanaged, unrunnable stop",
                      first->uuid, then->uuid,
                      (changed? "changed" : "unchanged"));
     }
 
     return changed;
 }
 
 // Convenience macros for logging action properties
 
 #define action_type_str(flags) \
     (pcmk_is_set((flags), pe_action_pseudo)? "pseudo-action" : "action")
 
 #define action_optional_str(flags) \
     (pcmk_is_set((flags), pe_action_optional)? "optional" : "required")
 
 #define action_runnable_str(flags) \
     (pcmk_is_set((flags), pe_action_runnable)? "runnable" : "unrunnable")
 
 #define action_node_str(a) \
     (((a)->node == NULL)? "no node" : (a)->node->details->uname)
 
 /*!
  * \internal
  * \brief Update an action's flags for all orderings where it is "then"
  *
  * \param[in] then      Action to update
  * \param[in] data_set  Cluster working set
  */
 void
 pcmk__update_action_for_orderings(pe_action_t *then, pe_working_set_t *data_set)
 {
     GList *lpc = NULL;
     enum pe_graph_flags changed = pe_graph_none;
     int last_flags = then->flags;
 
     pe_rsc_trace(then->rsc, "Updating %s %s (%s %s) on %s",
                  action_type_str(then->flags), then->uuid,
                  action_optional_str(then->flags),
                  action_runnable_str(then->flags), action_node_str(then));
 
     if (pcmk_is_set(then->flags, pe_action_requires_any)) {
         /* Initialize current known "runnable before" actions. As
          * update_action_for_ordering_flags() is called for each of then's
          * before actions, this number will increment as runnable 'first'
          * actions are encountered.
          */
         then->runnable_before = 0;
 
         if (then->required_runnable_before == 0) {
             /* @COMPAT This ordering constraint uses the deprecated
              * "require-all=false" attribute. Treat it like "clone-min=1".
              */
             then->required_runnable_before = 1;
         }
 
         /* The pe_order_one_or_more clause of update_action_for_ordering_flags()
          * (called below) will reset runnable if appropriate.
          */
         pe__clear_action_flags(then, pe_action_runnable);
     }
 
     for (lpc = then->actions_before; lpc != NULL; lpc = lpc->next) {
         pe_action_wrapper_t *other = (pe_action_wrapper_t *) lpc->data;
         pe_action_t *first = other->action;
 
         pe_node_t *then_node = then->node;
         pe_node_t *first_node = first->node;
 
         if ((first->rsc != NULL)
             && (first->rsc->variant == pe_group)
             && pcmk__str_eq(first->task, RSC_START, pcmk__str_casei)) {
 
             first_node = first->rsc->fns->location(first->rsc, NULL, FALSE);
             if (first_node != NULL) {
                 pe_rsc_trace(first->rsc, "Found node %s for 'first' %s",
                              first_node->details->uname, first->uuid);
             }
         }
 
         if ((then->rsc != NULL)
             && (then->rsc->variant == pe_group)
             && pcmk__str_eq(then->task, RSC_START, pcmk__str_casei)) {
 
             then_node = then->rsc->fns->location(then->rsc, NULL, FALSE);
             if (then_node != NULL) {
                 pe_rsc_trace(then->rsc, "Found node %s for 'then' %s",
                              then_node->details->uname, then->uuid);
             }
         }
 
         // Disable constraint if it only applies when on same node, but isn't
         if (pcmk_is_set(other->type, pe_order_same_node)
             && (first_node != NULL) && (then_node != NULL)
             && (first_node->details != then_node->details)) {
 
             pe_rsc_trace(then->rsc,
                          "Disabled ordering %s on %s then %s on %s: not same node",
                          other->action->uuid, first_node->details->uname,
                          then->uuid, then_node->details->uname);
             other->type = pe_order_none;
             continue;
         }
 
         pe__clear_graph_flags(changed, then, pe_graph_updated_first);
 
         if ((first->rsc != NULL)
             && pcmk_is_set(other->type, pe_order_then_cancels_first)
             && !pcmk_is_set(then->flags, pe_action_optional)) {
 
             /* 'then' is required, so we must abandon 'first'
              * (e.g. a required stop cancels any agent reload).
              */
             pe__set_action_flags(other->action, pe_action_optional);
             if (!strcmp(first->task, CRMD_ACTION_RELOAD_AGENT)) {
                 pe__clear_resource_flags(first->rsc, pe_rsc_reload);
             }
         }
 
         if ((first->rsc != NULL) && (then->rsc != NULL)
             && (first->rsc != then->rsc) && !is_parent(then->rsc, first->rsc)) {
             first = action_for_ordering(first);
         }
         if (first != other->action) {
             pe_rsc_trace(then->rsc, "Ordering %s after %s instead of %s",
                          then->uuid, first->uuid, other->action->uuid);
         }
 
         pe_rsc_trace(then->rsc,
                      "%s (%#.6x) then %s (%#.6x): type=%#.6x node=%s",
                      first->uuid, first->flags, then->uuid, then->flags,
                      other->type, action_node_str(first));
 
         if (first == other->action) {
             /* 'first' was not remapped (e.g. from 'start' to 'running'), which
              * could mean it is a non-resource action, a primitive resource
              * action, or already expanded.
              */
             enum pe_action_flags first_flags, then_flags;
 
             first_flags = action_flags_for_ordering(first, then_node);
             then_flags = action_flags_for_ordering(then, first_node);
 
             changed |= update_action_for_ordering_flags(first, then,
                                                         first_flags, then_flags,
                                                         other, data_set);
 
             /* 'first' was for a complex resource (clone, group, etc),
              * create a new dependency if necessary
              */
         } else if (order_actions(first, then, other->type)) {
             /* This was the first time 'first' and 'then' were associated,
              * start again to get the new actions_before list
              */
             pe__set_graph_flags(changed, then,
                                 pe_graph_updated_then|pe_graph_disable);
         }
 
         if (pcmk_is_set(changed, pe_graph_disable)) {
             pe_rsc_trace(then->rsc,
                          "Disabled ordering %s then %s in favor of %s then %s",
                          other->action->uuid, then->uuid, first->uuid,
                          then->uuid);
             pe__clear_graph_flags(changed, then, pe_graph_disable);
             other->type = pe_order_none;
         }
 
         if (pcmk_is_set(changed, pe_graph_updated_first)) {
             crm_trace("Re-processing %s and its 'after' actions "
                       "because it changed", first->uuid);
             for (GList *lpc2 = first->actions_after; lpc2 != NULL;
                  lpc2 = lpc2->next) {
                 pe_action_wrapper_t *other = (pe_action_wrapper_t *) lpc2->data;
 
                 pcmk__update_action_for_orderings(other->action, data_set);
             }
             pcmk__update_action_for_orderings(first, data_set);
         }
     }
 
     if (pcmk_is_set(then->flags, pe_action_requires_any)) {
         if (last_flags == then->flags) {
             pe__clear_graph_flags(changed, then, pe_graph_updated_then);
         } else {
             pe__set_graph_flags(changed, then, pe_graph_updated_then);
         }
     }
 
     if (pcmk_is_set(changed, pe_graph_updated_then)) {
         crm_trace("Re-processing %s and its 'after' actions because it changed",
                   then->uuid);
         if (pcmk_is_set(last_flags, pe_action_runnable)
             && !pcmk_is_set(then->flags, pe_action_runnable)) {
             pcmk__block_colocated_starts(then, data_set);
         }
         pcmk__update_action_for_orderings(then, data_set);
         for (lpc = then->actions_after; lpc != NULL; lpc = lpc->next) {
             pe_action_wrapper_t *other = (pe_action_wrapper_t *) lpc->data;
 
             pcmk__update_action_for_orderings(other->action, data_set);
         }
     }
 }
 
 /*!
  * \internal
  * \brief Trace-log an action (optionally with its dependent actions)
  *
  * \param[in] pre_text  If not NULL, prefix the log with this plus ": "
  * \param[in] action    Action to log
  * \param[in] details   If true, recursively log dependent actions
  */
 void
 pcmk__log_action(const char *pre_text, pe_action_t *action, bool details)
 {
     const char *node_uname = NULL;
     const char *node_uuid = NULL;
     const char *desc = NULL;
 
     CRM_CHECK(action != NULL, return);
 
     if (!pcmk_is_set(action->flags, pe_action_pseudo)) {
         if (action->node != NULL) {
             node_uname = action->node->details->uname;
             node_uuid = action->node->details->id;
         } else {
             node_uname = "<none>";
         }
     }
 
     switch (text2task(action->task)) {
         case stonith_node:
         case shutdown_crm:
             if (pcmk_is_set(action->flags, pe_action_pseudo)) {
                 desc = "Pseudo ";
             } else if (pcmk_is_set(action->flags, pe_action_optional)) {
                 desc = "Optional ";
             } else if (!pcmk_is_set(action->flags, pe_action_runnable)) {
                 desc = "!!Non-Startable!! ";
             } else if (pcmk_is_set(action->flags, pe_action_processed)) {
                desc = "";
             } else {
                desc = "(Provisional) ";
             }
             crm_trace("%s%s%sAction %d: %s%s%s%s%s%s",
                       ((pre_text == NULL)? "" : pre_text),
                       ((pre_text == NULL)? "" : ": "),
                       desc, action->id, action->uuid,
                       (node_uname? "\ton " : ""), (node_uname? node_uname : ""),
                       (node_uuid? "\t\t(" : ""), (node_uuid? node_uuid : ""),
                       (node_uuid? ")" : ""));
             break;
         default:
             if (pcmk_is_set(action->flags, pe_action_optional)) {
                 desc = "Optional ";
             } else if (pcmk_is_set(action->flags, pe_action_pseudo)) {
                 desc = "Pseudo ";
             } else if (!pcmk_is_set(action->flags, pe_action_runnable)) {
                 desc = "!!Non-Startable!! ";
             } else if (pcmk_is_set(action->flags, pe_action_processed)) {
                desc = "";
             } else {
                desc = "(Provisional) ";
             }
             crm_trace("%s%s%sAction %d: %s %s%s%s%s%s%s",
                       ((pre_text == NULL)? "" : pre_text),
                       ((pre_text == NULL)? "" : ": "),
                       desc, action->id, action->uuid,
                       (action->rsc? action->rsc->id : "<none>"),
                       (node_uname? "\ton " : ""), (node_uname? node_uname : ""),
                       (node_uuid? "\t\t(" : ""), (node_uuid? node_uuid : ""),
                       (node_uuid? ")" : ""));
             break;
     }
 
     if (details) {
         GList *iter = NULL;
 
         crm_trace("\t\t====== Preceding Actions");
         for (iter = action->actions_before; iter != NULL; iter = iter->next) {
             pe_action_wrapper_t *other = (pe_action_wrapper_t *) iter->data;
 
             pcmk__log_action("\t\t", other->action, false);
         }
         crm_trace("\t\t====== Subsequent Actions");
         for (iter = action->actions_after; iter != NULL; iter = iter->next) {
             pe_action_wrapper_t *other = (pe_action_wrapper_t *) iter->data;
 
             pcmk__log_action("\t\t", other->action, false);
         }
         crm_trace("\t\t====== End");
 
     } else {
         crm_trace("\t\t(before=%d, after=%d)",
                   g_list_length(action->actions_before),
                   g_list_length(action->actions_after));
     }
 }
 
 /*!
  * \internal
  * \brief Create a new pseudo-action for a resource
  *
  * \param[in] rsc   Resource to create action for
  * \param[in] task  Action name
  * \param[in] optional  Whether action should be considered optional
  * \param[in] runnable  Whethe action should be considered runnable
  *
  * \return New action object corresponding to arguments
  */
 pe_action_t *
 pcmk__new_rsc_pseudo_action(pe_resource_t *rsc, const char *task,
                             bool optional, bool runnable)
 {
     pe_action_t *action = NULL;
 
     CRM_ASSERT((rsc != NULL) && (task != NULL));
 
     action = custom_action(rsc, pcmk__op_key(rsc->id, task, 0), task, NULL,
                            optional, TRUE, rsc->cluster);
     pe__set_action_flags(action, pe_action_pseudo);
     if (runnable) {
         pe__set_action_flags(action, pe_action_runnable);
     }
     return action;
 }
 
 /*!
  * \internal
  * \brief Create an executor cancel action
  *
  * \param[in] rsc          Resource of action to cancel
  * \param[in] task         Name of action to cancel
  * \param[in] interval_ms  Interval of action to cancel
  * \param[in] node         Node of action to cancel
  * \param[in] data_set     Working set of cluster
  *
  * \return Created op
  */
 pe_action_t *
 pcmk__new_cancel_action(pe_resource_t *rsc, const char *task, guint interval_ms,
                         pe_node_t *node)
 {
     pe_action_t *cancel_op = NULL;
     char *key = NULL;
     char *interval_ms_s = NULL;
 
     CRM_ASSERT((rsc != NULL) && (task != NULL) && (node != NULL));
 
     // @TODO dangerous if possible to schedule another action with this key
     key = pcmk__op_key(rsc->id, task, interval_ms);
 
     cancel_op = custom_action(rsc, key, RSC_CANCEL, node, FALSE, TRUE,
                               rsc->cluster);
 
     pcmk__str_update(&cancel_op->task, RSC_CANCEL);
     pcmk__str_update(&cancel_op->cancel_task, task);
 
     interval_ms_s = crm_strdup_printf("%u", interval_ms);
     add_hash_param(cancel_op->meta, XML_LRM_ATTR_TASK, task);
     add_hash_param(cancel_op->meta, XML_LRM_ATTR_INTERVAL_MS, interval_ms_s);
     free(interval_ms_s);
 
     return cancel_op;
 }
 
 /*!
  * \internal
  * \brief Create a new shutdown action for a node
  *
  * \param[in] node         Node being shut down
- * \param[in] data_set     Working set of cluster
  *
  * \return Newly created shutdown action for \p node
  */
 pe_action_t *
-pcmk__new_shutdown_action(pe_node_t *node, pe_working_set_t *data_set)
+pcmk__new_shutdown_action(pe_node_t *node)
 {
     char *shutdown_id = NULL;
     pe_action_t *shutdown_op = NULL;
 
-    CRM_ASSERT((node != NULL) && (data_set != NULL));
+    CRM_ASSERT(node != NULL);
 
     shutdown_id = crm_strdup_printf("%s-%s", CRM_OP_SHUTDOWN,
                                     node->details->uname);
 
     shutdown_op = custom_action(NULL, shutdown_id, CRM_OP_SHUTDOWN, node, FALSE,
-                                TRUE, data_set);
+                                TRUE, node->details->data_set);
 
-    pcmk__order_stops_before_shutdown(node, shutdown_op, data_set);
+    pcmk__order_stops_before_shutdown(node, shutdown_op,
+                                      node->details->data_set);
     add_hash_param(shutdown_op->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE);
     return shutdown_op;
 }
 
 /*!
  * \internal
  * \brief Calculate and add an operation digest to XML
  *
  * Calculate an operation digest, which enables us to later determine when a
  * restart is needed due to the resource's parameters being changed, and add it
  * to given XML.
  *
  * \param[in] op       Operation result from executor
  * \param[in] update   XML to add digest to
  */
 static void
 add_op_digest_to_xml(lrmd_event_data_t *op, xmlNode *update)
 {
     char *digest = NULL;
     xmlNode *args_xml = NULL;
 
     if (op->params == NULL) {
         return;
     }
     args_xml = create_xml_node(NULL, XML_TAG_PARAMS);
     g_hash_table_foreach(op->params, hash2field, args_xml);
     pcmk__filter_op_for_digest(args_xml);
     digest = calculate_operation_digest(args_xml, NULL);
     crm_xml_add(update, XML_LRM_ATTR_OP_DIGEST, digest);
     free_xml(args_xml);
     free(digest);
 }
 
 #define FAKE_TE_ID     "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
 
 /*!
  * \internal
  * \brief Create XML for resource operation history update
  *
  * \param[in,out] parent          Parent XML node to add to
  * \param[in,out] op              Operation event data
  * \param[in]     caller_version  DC feature set
  * \param[in]     target_rc       Expected result of operation
  * \param[in]     node            Name of node on which operation was performed
  * \param[in]     origin          Arbitrary description of update source
  *
  * \return Newly created XML node for history update
  */
 xmlNode *
 pcmk__create_history_xml(xmlNode *parent, lrmd_event_data_t *op,
                          const char *caller_version, int target_rc,
                          const char *node, const char *origin)
 {
     char *key = NULL;
     char *magic = NULL;
     char *op_id = NULL;
     char *op_id_additional = NULL;
     char *local_user_data = NULL;
     const char *exit_reason = NULL;
 
     xmlNode *xml_op = NULL;
     const char *task = NULL;
 
     CRM_CHECK(op != NULL, return NULL);
     crm_trace("Creating history XML for %s-interval %s action for %s on %s "
               "(DC version: %s, origin: %s)",
               pcmk__readable_interval(op->interval_ms), op->op_type, op->rsc_id,
               ((node == NULL)? "no node" : node), caller_version, origin);
 
     task = op->op_type;
 
     /* Record a successful agent reload as a start, and a failed one as a
      * monitor, to make life easier for the scheduler when determining the
      * current state.
      *
      * @COMPAT We should check "reload" here only if the operation was for a
      * pre-OCF-1.1 resource agent, but we don't know that here, and we should
      * only ever get results for actions scheduled by us, so we can reasonably
      * assume any "reload" is actually a pre-1.1 agent reload.
      */
     if (pcmk__str_any_of(task, CRMD_ACTION_RELOAD, CRMD_ACTION_RELOAD_AGENT,
                          NULL)) {
         if (op->op_status == PCMK_EXEC_DONE) {
             task = CRMD_ACTION_START;
         } else {
             task = CRMD_ACTION_STATUS;
         }
     }
 
     key = pcmk__op_key(op->rsc_id, task, op->interval_ms);
     if (pcmk__str_eq(task, CRMD_ACTION_NOTIFY, pcmk__str_none)) {
         const char *n_type = crm_meta_value(op->params, "notify_type");
         const char *n_task = crm_meta_value(op->params, "notify_operation");
 
         CRM_LOG_ASSERT(n_type != NULL);
         CRM_LOG_ASSERT(n_task != NULL);
         op_id = pcmk__notify_key(op->rsc_id, n_type, n_task);
 
         if (op->op_status != PCMK_EXEC_PENDING) {
             /* Ignore notify errors.
              *
              * @TODO It might be better to keep the correct result here, and
              * ignore it in process_graph_event().
              */
             lrmd__set_result(op, PCMK_OCF_OK, PCMK_EXEC_DONE, NULL);
         }
 
     /* Migration history is preserved separately, which usually matters for
      * multiple nodes and is important for future cluster transitions.
      */
     } else if (pcmk__str_any_of(op->op_type, CRMD_ACTION_MIGRATE,
                                 CRMD_ACTION_MIGRATED, NULL)) {
         op_id = strdup(key);
 
     } else if (did_rsc_op_fail(op, target_rc)) {
         op_id = pcmk__op_key(op->rsc_id, "last_failure", 0);
         if (op->interval_ms == 0) {
             // Ensure 'last' gets updated, in case record-pending is true
             op_id_additional = pcmk__op_key(op->rsc_id, "last", 0);
         }
         exit_reason = op->exit_reason;
 
     } else if (op->interval_ms > 0) {
         op_id = strdup(key);
 
     } else {
         op_id = pcmk__op_key(op->rsc_id, "last", 0);
     }
 
   again:
     xml_op = pcmk__xe_match(parent, XML_LRM_TAG_RSC_OP, XML_ATTR_ID, op_id);
     if (xml_op == NULL) {
         xml_op = create_xml_node(parent, XML_LRM_TAG_RSC_OP);
     }
 
     if (op->user_data == NULL) {
         crm_debug("Generating fake transition key for: " PCMK__OP_FMT
                   " %d from %s", op->rsc_id, op->op_type, op->interval_ms,
                   op->call_id, origin);
         local_user_data = pcmk__transition_key(-1, op->call_id, target_rc,
                                                FAKE_TE_ID);
         op->user_data = local_user_data;
     }
 
     if (magic == NULL) {
         magic = crm_strdup_printf("%d:%d;%s", op->op_status, op->rc,
                                   (const char *) op->user_data);
     }
 
     crm_xml_add(xml_op, XML_ATTR_ID, op_id);
     crm_xml_add(xml_op, XML_LRM_ATTR_TASK_KEY, key);
     crm_xml_add(xml_op, XML_LRM_ATTR_TASK, task);
     crm_xml_add(xml_op, XML_ATTR_ORIGIN, origin);
     crm_xml_add(xml_op, XML_ATTR_CRM_VERSION, caller_version);
     crm_xml_add(xml_op, XML_ATTR_TRANSITION_KEY, op->user_data);
     crm_xml_add(xml_op, XML_ATTR_TRANSITION_MAGIC, magic);
     crm_xml_add(xml_op, XML_LRM_ATTR_EXIT_REASON, exit_reason == NULL ? "" : exit_reason);
     crm_xml_add(xml_op, XML_LRM_ATTR_TARGET, node); /* For context during triage */
 
     crm_xml_add_int(xml_op, XML_LRM_ATTR_CALLID, op->call_id);
     crm_xml_add_int(xml_op, XML_LRM_ATTR_RC, op->rc);
     crm_xml_add_int(xml_op, XML_LRM_ATTR_OPSTATUS, op->op_status);
     crm_xml_add_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, op->interval_ms);
 
     if (compare_version("2.1", caller_version) <= 0) {
         if (op->t_run || op->t_rcchange || op->exec_time || op->queue_time) {
             crm_trace("Timing data (" PCMK__OP_FMT
                       "): last=%u change=%u exec=%u queue=%u",
                       op->rsc_id, op->op_type, op->interval_ms,
                       op->t_run, op->t_rcchange, op->exec_time, op->queue_time);
 
             if ((op->interval_ms != 0) && (op->t_rcchange != 0)) {
                 // Recurring ops may have changed rc after initial run
                 crm_xml_add_ll(xml_op, XML_RSC_OP_LAST_CHANGE,
                                (long long) op->t_rcchange);
             } else {
                 crm_xml_add_ll(xml_op, XML_RSC_OP_LAST_CHANGE,
                                (long long) op->t_run);
             }
 
             crm_xml_add_int(xml_op, XML_RSC_OP_T_EXEC, op->exec_time);
             crm_xml_add_int(xml_op, XML_RSC_OP_T_QUEUE, op->queue_time);
         }
     }
 
     if (pcmk__str_any_of(op->op_type, CRMD_ACTION_MIGRATE, CRMD_ACTION_MIGRATED, NULL)) {
         /*
          * Record migrate_source and migrate_target always for migrate ops.
          */
         const char *name = XML_LRM_ATTR_MIGRATE_SOURCE;
 
         crm_xml_add(xml_op, name, crm_meta_value(op->params, name));
 
         name = XML_LRM_ATTR_MIGRATE_TARGET;
         crm_xml_add(xml_op, name, crm_meta_value(op->params, name));
     }
 
     add_op_digest_to_xml(op, xml_op);
 
     if (op_id_additional) {
         free(op_id);
         op_id = op_id_additional;
         op_id_additional = NULL;
         goto again;
     }
 
     if (local_user_data) {
         free(local_user_data);
         op->user_data = NULL;
     }
     free(magic);
     free(op_id);
     free(key);
     return xml_op;
 }
 
 /*!
  * \internal
  * \brief Check whether an action shutdown-locks a resource to a node
  *
  * If the shutdown-lock cluster property is set, resources will not be recovered
  * on a different node if cleanly stopped, and may start only on that same node.
  * This function checks whether that applies to a given action, so that the
  * transition graph can be marked appropriately.
  *
  * \param[in] action  Action to check
  *
  * \return true if \p action locks its resource to the action's node,
  *         otherwise false
  */
 bool
 pcmk__action_locks_rsc_to_node(const pe_action_t *action)
 {
     // Only resource actions taking place on resource's lock node are locked
     if ((action == NULL) || (action->rsc == NULL)
         || (action->rsc->lock_node == NULL) || (action->node == NULL)
         || (action->node->details != action->rsc->lock_node->details)) {
         return false;
     }
 
     /* During shutdown, only stops are locked (otherwise, another action such as
      * a demote would cause the controller to clear the lock)
      */
     if (action->node->details->shutdown && (action->task != NULL)
         && (strcmp(action->task, RSC_STOP) != 0)) {
         return false;
     }
 
     return true;
 }
 
 /* lowest to highest */
 static gint
 sort_action_id(gconstpointer a, gconstpointer b)
 {
     const pe_action_wrapper_t *action_wrapper2 = (const pe_action_wrapper_t *)a;
     const pe_action_wrapper_t *action_wrapper1 = (const pe_action_wrapper_t *)b;
 
     if (a == NULL) {
         return 1;
     }
     if (b == NULL) {
         return -1;
     }
     if (action_wrapper1->action->id < action_wrapper2->action->id) {
         return 1;
     }
     if (action_wrapper1->action->id > action_wrapper2->action->id) {
         return -1;
     }
     return 0;
 }
 
 /*!
  * \internal
  * \brief Remove any duplicate action inputs, merging action flags
  *
  * \param[in] action  Action whose inputs should be checked
  */
 void
 pcmk__deduplicate_action_inputs(pe_action_t *action)
 {
     GList *item = NULL;
     GList *next = NULL;
     pe_action_wrapper_t *last_input = NULL;
 
     action->actions_before = g_list_sort(action->actions_before,
                                          sort_action_id);
     for (item = action->actions_before; item != NULL; item = next) {
         pe_action_wrapper_t *input = (pe_action_wrapper_t *) item->data;
 
         next = item->next;
         if ((last_input != NULL)
             && (input->action->id == last_input->action->id)) {
             crm_trace("Input %s (%d) duplicate skipped for action %s (%d)",
                       input->action->uuid, input->action->id,
                       action->uuid, action->id);
 
             /* For the purposes of scheduling, the ordering flags no longer
              * matter, but crm_simulate looks at certain ones when creating a
              * dot graph. Combining the flags is sufficient for that purpose.
              */
             last_input->type |= input->type;
             if (input->state == pe_link_dumped) {
                 last_input->state = pe_link_dumped;
             }
 
             free(item->data);
             action->actions_before = g_list_delete_link(action->actions_before,
                                                         item);
         } else {
             last_input = input;
             input->state = pe_link_not_dumped;
         }
     }
 }
 
 /*!
  * \internal
  * \brief Output all scheduled actions
  *
  * \param[in] data_set  Cluster working set
  */
 void
 pcmk__output_actions(pe_working_set_t *data_set)
 {
     pcmk__output_t *out = data_set->priv;
 
     // Output node (non-resource) actions
     for (GList *iter = data_set->actions; iter != NULL; iter = iter->next) {
         char *node_name = NULL;
         char *task = NULL;
         pe_action_t *action = (pe_action_t *) iter->data;
 
         if (action->rsc != NULL) {
             continue; // Resource actions will be output later
 
         } else if (pcmk_is_set(action->flags, pe_action_optional)) {
             continue; // This action was not scheduled
         }
 
         if (pcmk__str_eq(action->task, CRM_OP_SHUTDOWN, pcmk__str_casei)) {
             task = strdup("Shutdown");
 
         } else if (pcmk__str_eq(action->task, CRM_OP_FENCE, pcmk__str_casei)) {
             const char *op = g_hash_table_lookup(action->meta, "stonith_action");
 
             task = crm_strdup_printf("Fence (%s)", op);
 
         } else {
             continue; // Don't display other node action types
         }
 
         if (pe__is_guest_node(action->node)) {
             node_name = crm_strdup_printf("%s (resource: %s)",
                                           action->node->details->uname,
                                           action->node->details->remote_rsc->container->id);
         } else if (action->node != NULL) {
             node_name = crm_strdup_printf("%s", action->node->details->uname);
         }
 
         out->message(out, "node-action", task, node_name, action->reason);
 
         free(node_name);
         free(task);
     }
 
     // Output resource actions
     for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) {
         pe_resource_t *rsc = (pe_resource_t *) iter->data;
 
         rsc->cmds->output_actions(rsc);
     }
 }
 
 /*!
  * \internal
  * \brief Schedule cancellation of a recurring action
  *
  * \param[in] rsc          Resource that action is for
  * \param[in] call_id      Action's call ID from history
  * \param[in] task         Action name
  * \param[in] interval_ms  Action interval
  * \param[in] node         Node that history entry is for
  * \param[in] reason       Short description of why action is being cancelled
  */
 static void
 schedule_cancel(pe_resource_t *rsc, const char *call_id, const char *task,
                 guint interval_ms, pe_node_t *node, const char *reason)
 {
     pe_action_t *cancel = NULL;
 
     CRM_CHECK((rsc != NULL) && (task != NULL)
               && (node != NULL) && (reason != NULL),
               return);
 
     crm_info("Recurring %s-interval %s for %s will be stopped on %s: %s",
              pcmk__readable_interval(interval_ms), task, rsc->id,
              pcmk__s(node->details->uname, "unknown node"), reason);
     cancel = pcmk__new_cancel_action(rsc, task, interval_ms, node);
     add_hash_param(cancel->meta, XML_LRM_ATTR_CALLID, call_id);
 
     // Cancellations happen after stops
     pcmk__new_ordering(rsc, stop_key(rsc), NULL, rsc, NULL, cancel,
                        pe_order_optional, rsc->cluster);
 }
 
 /*!
  * \internal
  * \brief Check whether action from resource history is still in configuration
  *
  * \param[in] rsc          Resource that action is for
  * \param[in] task         Action's name
  * \param[in] interval_ms  Action's interval (in milliseconds)
  *
  * \return true if action is still in resource configuration, otherwise false
  */
 static bool
 action_in_config(pe_resource_t *rsc, const char *task, guint interval_ms)
 {
     char *key = pcmk__op_key(rsc->id, task, interval_ms);
     bool config = (find_rsc_op_entry(rsc, key) != NULL);
 
     free(key);
     return config;
 }
 
 /*!
  * \internal
  * \brief Get action name needed to compare digest for configuration changes
  *
  * \param[in] task         Action name from history
  * \param[in] interval_ms  Action interval (in milliseconds)
  *
  * \return Action name whose digest should be compared
  */
 static const char *
 task_for_digest(const char *task, guint interval_ms)
 {
     /* Certain actions need to be compared against the parameters used to start
      * the resource.
      */
     if ((interval_ms == 0)
         && pcmk__str_any_of(task, RSC_STATUS, RSC_MIGRATED, RSC_PROMOTE, NULL)) {
         task = RSC_START;
     }
     return task;
 }
 
 /*!
  * \internal
  * \brief Check whether only sanitized parameters to an action changed
  *
  * When collecting CIB files for troubleshooting, crm_report will mask
  * sensitive resource parameters. If simulations were run using that, affected
  * resources would appear to need a restart, which would complicate
  * troubleshooting. To avoid that, we save a "secure digest" of non-sensitive
  * parameters. This function used that digest to check whether only masked
  * parameters are different.
  *
  * \param[in] xml_op       Resource history entry with secure digest
  * \param[in] digest_data  Operation digest information being compared
  * \param[in] data_set     Cluster working set
  *
  * \return true if only sanitized parameters changed, otherwise false
  */
 static bool
 only_sanitized_changed(xmlNode *xml_op, const op_digest_cache_t *digest_data,
                        pe_working_set_t *data_set)
 {
     const char *digest_secure = NULL;
 
     if (!pcmk_is_set(data_set->flags, pe_flag_sanitized)) {
         // The scheduler is not being run as a simulation
         return false;
     }
 
     digest_secure = crm_element_value(xml_op, XML_LRM_ATTR_SECURE_DIGEST);
 
     return (digest_data->rc != RSC_DIGEST_MATCH) && (digest_secure != NULL)
            && (digest_data->digest_secure_calc != NULL)
            && (strcmp(digest_data->digest_secure_calc, digest_secure) == 0);
 }
 
 /*!
  * \internal
  * \brief Force a restart due to a configuration change
  *
  * \param[in] rsc          Resource that action is for
  * \param[in] task         Name of action whose configuration changed
  * \param[in] interval_ms  Action interval (in milliseconds)
  * \param[in] node         Node where resource should be restarted
  */
 static void
 force_restart(pe_resource_t *rsc, const char *task, guint interval_ms,
               pe_node_t *node)
 {
     char *key = pcmk__op_key(rsc->id, task, interval_ms);
     pe_action_t *required = custom_action(rsc, key, task, NULL, FALSE, TRUE,
                                           rsc->cluster);
 
     pe_action_set_reason(required, "resource definition change", true);
     trigger_unfencing(rsc, node, "Device parameters changed", NULL,
                       rsc->cluster);
 }
 
 /*!
  * \internal
  * \brief Reschedule a recurring action
  *
  * \param[in] rsc          Resource that action is for
  * \param[in] task         Name of action being rescheduled
  * \param[in] interval_ms  Action interval (in milliseconds)
  * \param[in] node         Node where action should be rescheduled
  */
 static void
 reschedule_recurring(pe_resource_t *rsc, const char *task, guint interval_ms,
                      pe_node_t *node)
 {
     pe_action_t *op = NULL;
 
     trigger_unfencing(rsc, node, "Device parameters changed (reschedule)",
                       NULL, rsc->cluster);
     op = custom_action(rsc, pcmk__op_key(rsc->id, task, interval_ms),
                        task, node, TRUE, TRUE, rsc->cluster);
     pe__set_action_flags(op, pe_action_reschedule);
 }
 
 /*!
  * \internal
  * \brief Schedule a reload of a resource on a node
  *
  * \param[in] rsc   Resource to reload
  * \param[in] node  Where resource should be reloaded
  */
 static void
 schedule_reload(pe_resource_t *rsc, pe_node_t *node)
 {
     pe_action_t *reload = NULL;
 
     // For collective resources, just call recursively for children
     if (rsc->variant > pe_native) {
         g_list_foreach(rsc->children, (GFunc) schedule_reload, node);
         return;
     }
 
     // Skip the reload in certain situations
     if ((node == NULL)
         || !pcmk_is_set(rsc->flags, pe_rsc_managed)
         || pcmk_is_set(rsc->flags, pe_rsc_failed)) {
         pe_rsc_trace(rsc, "Skip reload of %s:%s%s %s",
                      rsc->id,
                      pcmk_is_set(rsc->flags, pe_rsc_managed)? "" : " unmanaged",
                      pcmk_is_set(rsc->flags, pe_rsc_failed)? " failed" : "",
                      (node == NULL)? "inactive" : node->details->uname);
         return;
     }
 
     /* If a resource's configuration changed while a start was pending,
      * force a full restart instead of a reload.
      */
     if (pcmk_is_set(rsc->flags, pe_rsc_start_pending)) {
         pe_rsc_trace(rsc, "%s: preventing agent reload because start pending",
                      rsc->id);
         custom_action(rsc, stop_key(rsc), CRMD_ACTION_STOP, node, FALSE, TRUE,
                       rsc->cluster);
         return;
     }
 
     // Schedule the reload
     pe__set_resource_flags(rsc, pe_rsc_reload);
     reload = custom_action(rsc, reload_key(rsc), CRMD_ACTION_RELOAD_AGENT, node,
                            FALSE, TRUE, rsc->cluster);
     pe_action_set_reason(reload, "resource definition change", FALSE);
 
     // Set orderings so that a required stop or demote cancels the reload
     pcmk__new_ordering(NULL, NULL, reload, rsc, stop_key(rsc), NULL,
                        pe_order_optional|pe_order_then_cancels_first,
                        rsc->cluster);
     pcmk__new_ordering(NULL, NULL, reload, rsc, demote_key(rsc), NULL,
                        pe_order_optional|pe_order_then_cancels_first,
                        rsc->cluster);
 }
 
 /*!
  * \internal
  * \brief Handle any configuration change for an action
  *
  * Given an action from resource history, if the resource's configuration
  * changed since the action was done, schedule any actions needed (restart,
  * reload, unfencing, rescheduling recurring actions, etc.).
  *
  * \param[in] rsc     Resource that action is for
  * \param[in] node    Node that action was on
  * \param[in] xml_op  Action XML from resource history
  *
  * \return true if action configuration changed, otherwise false
  */
 bool
 pcmk__check_action_config(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op)
 {
     guint interval_ms = 0;
     const char *task = NULL;
     const op_digest_cache_t *digest_data = NULL;
 
     CRM_CHECK((rsc != NULL) && (node != NULL) && (xml_op != NULL),
               return false);
 
     task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
     CRM_CHECK(task != NULL, return false);
 
     crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms);
 
     // If this is a recurring action, check whether it has been orphaned
     if (interval_ms > 0) {
         if (action_in_config(rsc, task, interval_ms)) {
             pe_rsc_trace(rsc, "%s-interval %s for %s on %s is in configuration",
                          pcmk__readable_interval(interval_ms), task, rsc->id,
                          node->details->uname);
         } else if (pcmk_is_set(rsc->cluster->flags,
                                pe_flag_stop_action_orphans)) {
             schedule_cancel(rsc,
                             crm_element_value(xml_op, XML_LRM_ATTR_CALLID),
                             task, interval_ms, node, "orphan");
             return true;
         } else {
             pe_rsc_debug(rsc, "%s-interval %s for %s on %s is orphaned",
                          pcmk__readable_interval(interval_ms), task, rsc->id,
                          node->details->uname);
             return true;
         }
     }
 
     crm_trace("Checking %s-interval %s for %s on %s for configuration changes",
               pcmk__readable_interval(interval_ms), task, rsc->id,
               node->details->uname);
     task = task_for_digest(task, interval_ms);
     digest_data = rsc_action_digest_cmp(rsc, xml_op, node, rsc->cluster);
 
     if (only_sanitized_changed(xml_op, digest_data, rsc->cluster)) {
         if (!pcmk__is_daemon && (rsc->cluster->priv != NULL)) {
             pcmk__output_t *out = rsc->cluster->priv;
 
             out->info(out,
                       "Only 'private' parameters to %s-interval %s for %s "
                       "on %s changed: %s",
                       pcmk__readable_interval(interval_ms), task, rsc->id,
                       node->details->uname,
                       crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC));
         }
         return false;
     }
 
     switch (digest_data->rc) {
         case RSC_DIGEST_RESTART:
             crm_log_xml_debug(digest_data->params_restart, "params:restart");
             force_restart(rsc, task, interval_ms, node);
             return true;
 
         case RSC_DIGEST_ALL:
         case RSC_DIGEST_UNKNOWN:
             // Changes that can potentially be handled by an agent reload
 
             if (interval_ms > 0) {
                 /* Recurring actions aren't reloaded per se, they are just
                  * re-scheduled so the next run uses the new parameters.
                  * The old instance will be cancelled automatically.
                  */
                 crm_log_xml_debug(digest_data->params_all, "params:reschedule");
                 reschedule_recurring(rsc, task, interval_ms, node);
 
             } else if (crm_element_value(xml_op,
                                          XML_LRM_ATTR_RESTART_DIGEST) != NULL) {
                 // Agent supports reload, so use it
                 trigger_unfencing(rsc, node,
                                   "Device parameters changed (reload)", NULL,
                                   rsc->cluster);
                 crm_log_xml_debug(digest_data->params_all, "params:reload");
                 schedule_reload(rsc, node);
 
             } else {
                 pe_rsc_trace(rsc,
                              "Restarting %s because agent doesn't support reload",
                              rsc->id);
                 crm_log_xml_debug(digest_data->params_restart,
                                   "params:restart");
                 force_restart(rsc, task, interval_ms, node);
             }
             return true;
 
         default:
             break;
     }
     return false;
 }
 
 /*!
  * \internal
  * \brief Create a list of resource's action history entries, sorted by call ID
  *
  * \param[in]  rsc          Resource whose history should be checked
  * \param[in]  rsc_entry    Resource's <lrm_rsc_op> status XML
  * \param[out] start_index  Where to store index of start-like action, if any
  * \param[out] stop_index   Where to store index of stop action, if any
  */
 static GList *
 rsc_history_as_list(pe_resource_t *rsc, xmlNode *rsc_entry,
                     int *start_index, int *stop_index)
 {
     GList *ops = NULL;
 
     for (xmlNode *rsc_op = first_named_child(rsc_entry, XML_LRM_TAG_RSC_OP);
          rsc_op != NULL; rsc_op = crm_next_same_xml(rsc_op)) {
         ops = g_list_prepend(ops, rsc_op);
     }
     ops = g_list_sort(ops, sort_op_by_callid);
     calculate_active_ops(ops, start_index, stop_index);
     return ops;
 }
 
 /*!
  * \internal
  * \brief Process a resource's action history from the CIB status
  *
  * Given a resource's action history, if the resource's configuration
  * changed since the actions were done, schedule any actions needed (restart,
  * reload, unfencing, rescheduling recurring actions, clean-up, etc.).
  * (This also cancels recurring actions for maintenance mode, which is not
  * entirely related but convenient to do here.)
  *
  * \param[in] rsc_entry  Resource's <lrm_rsc_op> status XML
  * \param[in] rsc        Resource whose history is being processed
  * \param[in] node       Node whose history is being processed
  */
 static void
 process_rsc_history(xmlNode *rsc_entry, pe_resource_t *rsc, pe_node_t *node)
 {
     int offset = -1;
     int stop_index = 0;
     int start_index = 0;
     GList *sorted_op_list = NULL;
 
     if (pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
         if (pe_rsc_is_anon_clone(uber_parent(rsc))) {
             pe_rsc_trace(rsc,
                          "Skipping configuration check "
                          "for orphaned clone instance %s",
                          rsc->id);
         } else {
             pe_rsc_trace(rsc,
                          "Skipping configuration check and scheduling clean-up "
                          "for orphaned resource %s", rsc->id);
             DeleteRsc(rsc, node, FALSE, rsc->cluster);
         }
         return;
     }
 
     if (pe_find_node_id(rsc->running_on, node->details->id) == NULL) {
         if (pcmk__rsc_agent_changed(rsc, node, rsc_entry, false)) {
             DeleteRsc(rsc, node, FALSE, rsc->cluster);
         }
         pe_rsc_trace(rsc,
                      "Skipping configuration check for %s "
                      "because no longer active on %s",
                      rsc->id, node->details->uname);
         return;
     }
 
     pe_rsc_trace(rsc, "Checking for configuration changes for %s on %s",
                  rsc->id, node->details->uname);
 
     if (pcmk__rsc_agent_changed(rsc, node, rsc_entry, true)) {
         DeleteRsc(rsc, node, FALSE, rsc->cluster);
     }
 
     sorted_op_list = rsc_history_as_list(rsc, rsc_entry, &start_index,
                                          &stop_index);
     if (start_index < stop_index) {
         return; // Resource is stopped
     }
 
     for (GList *iter = sorted_op_list; iter != NULL; iter = iter->next) {
         xmlNode *rsc_op = (xmlNode *) iter->data;
         const char *task = NULL;
         guint interval_ms = 0;
 
         if (++offset < start_index) {
             // Skip actions that happened before a start
             continue;
         }
 
         task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK);
         crm_element_value_ms(rsc_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms);
 
         if ((interval_ms > 0)
             && (pcmk_is_set(rsc->flags, pe_rsc_maintenance)
                 || node->details->maintenance)) {
             // Maintenance mode cancels recurring operations
             schedule_cancel(rsc,
                             crm_element_value(rsc_op, XML_LRM_ATTR_CALLID),
                             task, interval_ms, node, "maintenance mode");
 
         } else if ((interval_ms > 0)
                    || pcmk__strcase_any_of(task, RSC_STATUS, RSC_START,
                                            RSC_PROMOTE, RSC_MIGRATED, NULL)) {
             /* If a resource operation failed, and the operation's definition
              * has changed, clear any fail count so they can be retried fresh.
              */
 
             if (pe__bundle_needs_remote_name(rsc, rsc->cluster)) {
                 /* We haven't allocated resources to nodes yet, so if the
                  * REMOTE_CONTAINER_HACK is used, we may calculate the digest
                  * based on the literal "#uname" value rather than the properly
                  * substituted value. That would mistakenly make the action
                  * definition appear to have been changed. Defer the check until
                  * later in this case.
                  */
                 pe__add_param_check(rsc_op, rsc, node, pe_check_active,
                                     rsc->cluster);
 
             } else if (pcmk__check_action_config(rsc, node, rsc_op)
                        && (pe_get_failcount(node, rsc, NULL, pe_fc_effective,
                                             NULL, rsc->cluster) != 0)) {
                 pe__clear_failcount(rsc, node, "action definition changed",
                                     rsc->cluster);
             }
         }
     }
     g_list_free(sorted_op_list);
 }
 
 /*!
  * \internal
  * \brief Process a node's action history from the CIB status
  *
  * Given a node's resource history, if the resource's configuration changed
  * since the actions were done, schedule any actions needed (restart,
  * reload, unfencing, rescheduling recurring actions, clean-up, etc.).
  * (This also cancels recurring actions for maintenance mode, which is not
  * entirely related but convenient to do here.)
  *
  * \param[in] node      Node whose history is being processed
  * \param[in] lrm_rscs  Node's <lrm_resources> from CIB status XML
  * \param[in] data_set  Cluster working set
  */
 static void
 process_node_history(pe_node_t *node, xmlNode *lrm_rscs, pe_working_set_t *data_set)
 {
     crm_trace("Processing history for node %s", node->details->uname);
     for (xmlNode *rsc_entry = first_named_child(lrm_rscs, XML_LRM_TAG_RESOURCE);
          rsc_entry != NULL; rsc_entry = crm_next_same_xml(rsc_entry)) {
 
         if (xml_has_children(rsc_entry)) {
             GList *result = pcmk__rscs_matching_id(ID(rsc_entry), data_set);
 
             for (GList *iter = result; iter != NULL; iter = iter->next) {
                 pe_resource_t *rsc = (pe_resource_t *) iter->data;
 
                 if (rsc->variant == pe_native) {
                     process_rsc_history(rsc_entry, rsc, node);
                 }
             }
             g_list_free(result);
         }
     }
 }
 
 // XPath to find a node's resource history
 #define XPATH_NODE_HISTORY "/" XML_TAG_CIB "/" XML_CIB_TAG_STATUS             \
                            "/" XML_CIB_TAG_STATE "[@" XML_ATTR_UNAME "='%s']" \
                            "/" XML_CIB_TAG_LRM "/" XML_LRM_TAG_RESOURCES
 
 /*!
  * \internal
  * \brief Process any resource configuration changes in the CIB status
  *
  * Go through all nodes' resource history, and if a resource's configuration
  * changed since its actions were done, schedule any actions needed (restart,
  * reload, unfencing, rescheduling recurring actions, clean-up, etc.).
  * (This also cancels recurring actions for maintenance mode, which is not
  * entirely related but convenient to do here.)
  *
  * \param[in] data_set  Cluster working set
  */
 void
 pcmk__handle_rsc_config_changes(pe_working_set_t *data_set)
 {
     crm_trace("Check resource and action configuration for changes");
 
     /* Rather than iterate through the status section, iterate through the nodes
      * and search for the appropriate status subsection for each. This skips
      * orphaned nodes and lets us eliminate some cases before searching the XML.
      */
     for (GList *iter = data_set->nodes; iter != NULL; iter = iter->next) {
         pe_node_t *node = (pe_node_t *) iter->data;
 
         /* Don't bother checking actions for a node that can't run actions ...
          * unless it's in maintenance mode, in which case we still need to
          * cancel any existing recurring monitors.
          */
         if (node->details->maintenance || pcmk__node_available(node)) {
             char *xpath = NULL;
             xmlNode *history = NULL;
 
             xpath = crm_strdup_printf(XPATH_NODE_HISTORY, node->details->uname);
             history = get_xpath_object(xpath, data_set->input, LOG_NEVER);
             free(xpath);
 
             process_node_history(node, history, data_set);
         }
     }
 }
diff --git a/lib/pacemaker/pcmk_sched_allocate.c b/lib/pacemaker/pcmk_sched_allocate.c
index a8d2c01554..85df6ace80 100644
--- a/lib/pacemaker/pcmk_sched_allocate.c
+++ b/lib/pacemaker/pcmk_sched_allocate.c
@@ -1,797 +1,797 @@
 /*
  * Copyright 2004-2022 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <crm/crm.h>
 #include <crm/cib.h>
 #include <crm/msg_xml.h>
 #include <crm/common/xml.h>
 #include <crm/common/xml_internal.h>
 
 #include <glib.h>
 
 #include <crm/pengine/status.h>
 #include <pacemaker-internal.h>
 #include "libpacemaker_private.h"
 
 CRM_TRACE_INIT_DATA(pacemaker);
 
 /*!
  * \internal
  * \brief Do deferred action checks after allocation
  *
  * When unpacking the resource history, the scheduler checks for resource
  * configurations that have changed since an action was run. However, at that
  * time, bundles using the REMOTE_CONTAINER_HACK don't have their final
  * parameter information, so instead they add a deferred check to a list. This
  * function processes one entry in that list.
  *
  * \param[in] rsc       Resource that action history is for
  * \param[in] node      Node that action history is for
  * \param[in] rsc_op    Action history entry
  * \param[in] check     Type of deferred check to do
  * \param[in] data_set  Working set for cluster
  */
 static void
 check_params(pe_resource_t *rsc, pe_node_t *node, xmlNode *rsc_op,
              enum pe_check_parameters check, pe_working_set_t *data_set)
 {
     const char *reason = NULL;
     op_digest_cache_t *digest_data = NULL;
 
     switch (check) {
         case pe_check_active:
             if (pcmk__check_action_config(rsc, node, rsc_op)
                 && pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL,
                                     data_set)) {
                 reason = "action definition changed";
             }
             break;
 
         case pe_check_last_failure:
             digest_data = rsc_action_digest_cmp(rsc, rsc_op, node, data_set);
             switch (digest_data->rc) {
                 case RSC_DIGEST_UNKNOWN:
                     crm_trace("Resource %s history entry %s on %s has "
                               "no digest to compare",
                               rsc->id, ID(rsc_op), node->details->id);
                     break;
                 case RSC_DIGEST_MATCH:
                     break;
                 default:
                     reason = "resource parameters have changed";
                     break;
             }
             break;
     }
     if (reason != NULL) {
         pe__clear_failcount(rsc, node, reason, data_set);
     }
 }
 
 /*!
  * \internal
  * \brief Check whether a resource has failcount clearing scheduled on a node
  *
  * \param[in] node  Node to check
  * \param[in] rsc   Resource to check
  *
  * \return true if \p rsc has failcount clearing scheduled on \p node,
  *         otherwise false
  */
 static bool
 failcount_clear_action_exists(pe_node_t *node, pe_resource_t *rsc)
 {
     GList *list = pe__resource_actions(rsc, node, CRM_OP_CLEAR_FAILCOUNT, TRUE);
 
     if (list != NULL) {
         g_list_free(list);
         return true;
     }
     return false;
 }
 
 /*!
  * \internal
  * \brief Ban a resource from a node if it reached its failure threshold there
  *
  * \param[in] rsc       Resource to check failure threshold for
  * \param[in] node      Node to check \p rsc on
  */
 static void
 check_failure_threshold(pe_resource_t *rsc, pe_node_t *node)
 {
     // If this is a collective resource, apply recursively to children instead
     if (rsc->children != NULL) {
         g_list_foreach(rsc->children, (GFunc) check_failure_threshold,
                        node);
         return;
 
     } else if (failcount_clear_action_exists(node, rsc)) {
         /* Don't force the resource away from this node due to a failcount
          * that's going to be cleared.
          *
          * @TODO Failcount clearing can be scheduled in
          * pcmk__handle_rsc_config_changes() via process_rsc_history(), or in
          * schedule_resource_actions() via check_params(). This runs well before
          * then, so it cannot detect those, meaning we might check the migration
          * threshold when we shouldn't. Worst case, we stop or move the
          * resource, then move it back in the next transition.
          */
         return;
 
     } else {
         pe_resource_t *failed = NULL;
 
         if (pcmk__threshold_reached(rsc, node, &failed)) {
             resource_location(failed, node, -INFINITY, "__fail_limit__",
                               rsc->cluster);
         }
     }
 }
 
 /*!
  * \internal
  * \brief If resource has exclusive discovery, ban node if not allowed
  *
  * Location constraints have a resource-discovery option that allows users to
  * specify where probes are done for the affected resource. If this is set to
  * exclusive, probes will only be done on nodes listed in exclusive constraints.
  * This function bans the resource from the node if the node is not listed.
  *
  * \param[in] rsc   Resource to check
  * \param[in] node  Node to check \p rsc on
  */
 static void
 apply_exclusive_discovery(pe_resource_t *rsc, pe_node_t *node)
 {
     if (rsc->exclusive_discover || uber_parent(rsc)->exclusive_discover) {
         pe_node_t *match = NULL;
 
         // If this is a collective resource, apply recursively to children
         g_list_foreach(rsc->children, (GFunc) apply_exclusive_discovery, node);
 
         match = g_hash_table_lookup(rsc->allowed_nodes, node->details->id);
         if ((match != NULL)
             && (match->rsc_discover_mode != pe_discover_exclusive)) {
             match->weight = -INFINITY;
         }
     }
 }
 
 /*!
  * \internal
  * \brief Apply stickiness to a resource if appropriate
  *
  * \param[in] rsc       Resource to check for stickiness
  * \param[in] data_set  Cluster working set
  */
 static void
 apply_stickiness(pe_resource_t *rsc, pe_working_set_t *data_set)
 {
     pe_node_t *node = NULL;
 
     // If this is a collective resource, apply recursively to children instead
     if (rsc->children != NULL) {
         g_list_foreach(rsc->children, (GFunc) apply_stickiness, data_set);
         return;
     }
 
     /* A resource is sticky if it is managed, has stickiness configured, and is
      * active on a single node.
      */
     if (!pcmk_is_set(rsc->flags, pe_rsc_managed)
         || (rsc->stickiness < 1) || !pcmk__list_of_1(rsc->running_on)) {
         return;
     }
 
     node = rsc->running_on->data;
 
     /* In a symmetric cluster, stickiness can always be used. In an
      * asymmetric cluster, we have to check whether the resource is still
      * allowed on the node, so we don't keep the resource somewhere it is no
      * longer explicitly enabled.
      */
     if (!pcmk_is_set(rsc->cluster->flags, pe_flag_symmetric_cluster)
         && (pe_hash_table_lookup(rsc->allowed_nodes,
                                  node->details->id) == NULL)) {
         pe_rsc_debug(rsc,
                      "Ignoring %s stickiness because the cluster is "
                      "asymmetric and node %s is not explicitly allowed",
                      rsc->id, node->details->uname);
         return;
     }
 
     pe_rsc_debug(rsc, "Resource %s has %d stickiness on node %s",
                  rsc->id, rsc->stickiness, node->details->uname);
     resource_location(rsc, node, rsc->stickiness, "stickiness",
                       rsc->cluster);
 }
 
 /*!
  * \internal
  * \brief Apply shutdown locks for all resources as appropriate
  *
  * \param[in] data_set  Cluster working set
  */
 static void
 apply_shutdown_locks(pe_working_set_t *data_set)
 {
     if (!pcmk_is_set(data_set->flags, pe_flag_shutdown_lock)) {
         return;
     }
     for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) {
         pe_resource_t *rsc = (pe_resource_t *) iter->data;
 
         rsc->cmds->shutdown_lock(rsc);
     }
 }
 
 /*!
  * \internal
  * \brief Calculate the number of available nodes in the cluster
  *
  * \param[in] data_set  Cluster working set
  */
 static void
 count_available_nodes(pe_working_set_t *data_set)
 {
     if (pcmk_is_set(data_set->flags, pe_flag_no_compat)) {
         return;
     }
 
     // @COMPAT for API backward compatibility only (cluster does not use value)
     for (GList *iter = data_set->nodes; iter != NULL; iter = iter->next) {
         pe_node_t *node = (pe_node_t *) iter->data;
 
         if ((node != NULL) && (node->weight >= 0) && node->details->online
             && (node->details->type != node_ping)) {
             data_set->max_valid_nodes++;
         }
     }
     crm_trace("Online node count: %d", data_set->max_valid_nodes);
 }
 
 /*
  * \internal
  * \brief Apply node-specific scheduling criteria
  *
  * After the CIB has been unpacked, process node-specific scheduling criteria
  * including shutdown locks, location constraints, resource stickiness,
  * migration thresholds, and exclusive resource discovery.
  */
 static void
 apply_node_criteria(pe_working_set_t *data_set)
 {
     crm_trace("Applying node-specific scheduling criteria");
     apply_shutdown_locks(data_set);
     count_available_nodes(data_set);
     pcmk__apply_locations(data_set);
     g_list_foreach(data_set->resources, (GFunc) apply_stickiness, data_set);
 
     for (GList *node_iter = data_set->nodes; node_iter != NULL;
          node_iter = node_iter->next) {
         for (GList *rsc_iter = data_set->resources; rsc_iter != NULL;
              rsc_iter = rsc_iter->next) {
             pe_node_t *node = (pe_node_t *) node_iter->data;
             pe_resource_t *rsc = (pe_resource_t *) rsc_iter->data;
 
             check_failure_threshold(rsc, node);
             apply_exclusive_discovery(rsc, node);
         }
     }
 }
 
 /*!
  * \internal
  * \brief Allocate resources to nodes
  *
  * \param[in] data_set  Cluster working set
  */
 static void
 allocate_resources(pe_working_set_t *data_set)
 {
     GList *iter = NULL;
 
     crm_trace("Allocating resources to nodes");
 
     if (!pcmk__str_eq(data_set->placement_strategy, "default", pcmk__str_casei)) {
         pcmk__sort_resources(data_set);
     }
     pcmk__show_node_capacities("Original", data_set);
 
     if (pcmk_is_set(data_set->flags, pe_flag_have_remote_nodes)) {
         /* Allocate remote connection resources first (which will also allocate
          * any colocation dependencies). If the connection is migrating, always
          * prefer the partial migration target.
          */
         for (iter = data_set->resources; iter != NULL; iter = iter->next) {
             pe_resource_t *rsc = (pe_resource_t *) iter->data;
 
             if (rsc->is_remote_node) {
                 pe_rsc_trace(rsc, "Allocating remote connection resource '%s'",
                              rsc->id);
                 rsc->cmds->allocate(rsc, rsc->partial_migration_target,
                                     data_set);
             }
         }
     }
 
     /* now do the rest of the resources */
     for (iter = data_set->resources; iter != NULL; iter = iter->next) {
         pe_resource_t *rsc = (pe_resource_t *) iter->data;
 
         if (!rsc->is_remote_node) {
             pe_rsc_trace(rsc, "Allocating %s resource '%s'",
                          crm_element_name(rsc->xml), rsc->id);
             rsc->cmds->allocate(rsc, NULL, data_set);
         }
     }
 
     pcmk__show_node_capacities("Remaining", data_set);
 }
 
 /*!
  * \internal
  * \brief Schedule fail count clearing on online nodes if resource is orphaned
  *
  * \param[in] rsc       Resource to check
  * \param[in] data_set  Cluster working set
  */
 static void
 clear_failcounts_if_orphaned(pe_resource_t *rsc, pe_working_set_t *data_set)
 {
     if (!pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
         return;
     }
     crm_trace("Clear fail counts for orphaned resource %s", rsc->id);
 
     /* There's no need to recurse into rsc->children because those
      * should just be unallocated clone instances.
      */
 
     for (GList *iter = data_set->nodes; iter != NULL; iter = iter->next) {
         pe_node_t *node = (pe_node_t *) iter->data;
         pe_action_t *clear_op = NULL;
 
         if (!node->details->online) {
             continue;
         }
         if (pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL,
                              data_set) == 0) {
             continue;
         }
 
         clear_op = pe__clear_failcount(rsc, node, "it is orphaned", data_set);
 
         /* We can't use order_action_then_stop() here because its
          * pe_order_preserve breaks things
          */
         pcmk__new_ordering(clear_op->rsc, NULL, clear_op, rsc, stop_key(rsc),
                            NULL, pe_order_optional, data_set);
     }
 }
 
 /*!
  * \internal
  * \brief Schedule any resource actions needed
  *
  * \param[in] data_set  Cluster working set
  */
 static void
 schedule_resource_actions(pe_working_set_t *data_set)
 {
     // Process deferred action checks
     pe__foreach_param_check(data_set, check_params);
     pe__free_param_checks(data_set);
 
     if (pcmk_is_set(data_set->flags, pe_flag_startup_probes)) {
         crm_trace("Scheduling probes");
         pcmk__schedule_probes(data_set);
     }
 
     if (pcmk_is_set(data_set->flags, pe_flag_stop_rsc_orphans)) {
         g_list_foreach(data_set->resources,
                        (GFunc) clear_failcounts_if_orphaned, data_set);
     }
 
     crm_trace("Scheduling resource actions");
     for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) {
         pe_resource_t *rsc = (pe_resource_t *) iter->data;
 
         rsc->cmds->create_actions(rsc, data_set);
     }
 }
 
 /*!
  * \internal
  * \brief Check whether a resource or any of its descendants are managed
  *
  * \param[in] rsc  Resource to check
  *
  * \return true if resource or any descendent is managed, otherwise false
  */
 static bool
 is_managed(const pe_resource_t *rsc)
 {
     if (pcmk_is_set(rsc->flags, pe_rsc_managed)) {
         return true;
     }
     for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
         if (is_managed((pe_resource_t *) iter->data)) {
             return true;
         }
     }
     return false;
 }
 
 /*!
  * \internal
  * \brief Check whether any resources in the cluster are managed
  *
  * \param[in] data_set  Cluster working set
  *
  * \return true if any resource is managed, otherwise false
  */
 static bool
 any_managed_resources(pe_working_set_t *data_set)
 {
     for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) {
         if (is_managed((pe_resource_t *) iter->data)) {
             return true;
         }
     }
     return false;
 }
 
 /*!
  * \internal
  * \brief Check whether a node requires fencing
  *
  * \param[in] node          Node to check
  * \param[in] have_managed  Whether any resource in cluster is managed
  * \param[in] data_set      Cluster working set
  *
  * \return true if \p node should be fenced, otherwise false
  */
 static bool
 needs_fencing(pe_node_t *node, bool have_managed, pe_working_set_t *data_set)
 {
     return have_managed && node->details->unclean
            && pe_can_fence(data_set, node);
 }
 
 /*!
  * \internal
  * \brief Check whether a node requires shutdown
  *
  * \param[in] node          Node to check
  *
  * \return true if \p node should be shut down, otherwise false
  */
 static bool
 needs_shutdown(pe_node_t *node)
 {
     if (pe__is_guest_or_remote_node(node)) {
        /* Do not send shutdown actions for Pacemaker Remote nodes.
         * @TODO We might come up with a good use for this in the future.
         */
         return false;
     }
     return node->details->online && node->details->shutdown;
 }
 
 /*!
  * \internal
  * \brief Track and order non-DC fencing
  *
  * \param[in] list    List of existing non-DC fencing actions
  * \param[in] action  Fencing action to prepend to \p list
  *
  * \return (Possibly new) head of \p list
  */
 static GList *
 add_nondc_fencing(GList *list, pe_action_t *action, pe_working_set_t *data_set)
 {
     if (!pcmk_is_set(data_set->flags, pe_flag_concurrent_fencing)
         && (list != NULL)) {
         /* Concurrent fencing is disabled, so order each non-DC
          * fencing in a chain. If there is any DC fencing or
          * shutdown, it will be ordered after the last action in the
          * chain later.
          */
         order_actions((pe_action_t *) list->data, action, pe_order_optional);
     }
     return g_list_prepend(list, action);
 }
 
 /*!
  * \internal
  * \brief Schedule a node for fencing
  *
  * \param[in] node      Node that requires fencing
  * \param[in] data_set  Cluster working set
  */
 static pe_action_t *
 schedule_fencing(pe_node_t *node, pe_working_set_t *data_set)
 {
     pe_action_t *fencing = pe_fence_op(node, NULL, FALSE, "node is unclean",
                                        FALSE, data_set);
 
     pe_warn("Scheduling node %s for fencing", node->details->uname);
     pcmk__order_vs_fence(fencing, data_set);
     return fencing;
 }
 
 /*!
  * \internal
  * \brief Create and order node fencing and shutdown actions
  *
  * \param[in] data_set  Cluster working set
  */
 static void
 schedule_fencing_and_shutdowns(pe_working_set_t *data_set)
 {
     pe_action_t *dc_down = NULL;
     bool integrity_lost = false;
     bool have_managed = any_managed_resources(data_set);
     GList *fencing_ops = NULL;
     GList *shutdown_ops = NULL;
 
     crm_trace("Scheduling fencing and shutdowns as needed");
     if (!have_managed) {
         crm_notice("No fencing will be done until there are resources to manage");
     }
 
     // Check each node for whether it needs fencing or shutdown
     for (GList *iter = data_set->nodes; iter != NULL; iter = iter->next) {
         pe_node_t *node = (pe_node_t *) iter->data;
         pe_action_t *fencing = NULL;
 
         /* Guest nodes are "fenced" by recovering their container resource,
          * so handle them separately.
          */
         if (pe__is_guest_node(node)) {
             if (node->details->remote_requires_reset && have_managed
                 && pe_can_fence(data_set, node)) {
-                pcmk__fence_guest(node, data_set);
+                pcmk__fence_guest(node);
             }
             continue;
         }
 
         if (needs_fencing(node, have_managed, data_set)) {
             fencing = schedule_fencing(node, data_set);
 
             // Track DC and non-DC fence actions separately
             if (node->details->is_dc) {
                 dc_down = fencing;
             } else {
                 fencing_ops = add_nondc_fencing(fencing_ops, fencing, data_set);
             }
 
         } else if (needs_shutdown(node)) {
-            pe_action_t *down_op = pcmk__new_shutdown_action(node, data_set);
+            pe_action_t *down_op = pcmk__new_shutdown_action(node);
 
             // Track DC and non-DC shutdown actions separately
             if (node->details->is_dc) {
                 dc_down = down_op;
             } else {
                 shutdown_ops = g_list_prepend(shutdown_ops, down_op);
             }
         }
 
         if ((fencing == NULL) && node->details->unclean) {
             integrity_lost = true;
             pe_warn("Node %s is unclean but cannot be fenced",
                     node->details->uname);
         }
     }
 
     if (integrity_lost) {
         if (!pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
             pe_warn("Resource functionality and data integrity cannot be "
                     "guaranteed (configure, enable, and test fencing to "
                     "correct this)");
 
         } else if (!pcmk_is_set(data_set->flags, pe_flag_have_quorum)) {
             crm_notice("Unclean nodes will not be fenced until quorum is "
                        "attained or no-quorum-policy is set to ignore");
         }
     }
 
     if (dc_down != NULL) {
         /* Order any non-DC shutdowns before any DC shutdown, to avoid repeated
          * DC elections. However, we don't want to order non-DC shutdowns before
          * a DC *fencing*, because even though we don't want a node that's
          * shutting down to become DC, the DC fencing could be ordered before a
          * clone stop that's also ordered before the shutdowns, thus leading to
          * a graph loop.
          */
         if (pcmk__str_eq(dc_down->task, CRM_OP_SHUTDOWN, pcmk__str_none)) {
             pcmk__order_after_each(dc_down, shutdown_ops);
         }
 
         // Order any non-DC fencing before any DC fencing or shutdown
 
         if (pcmk_is_set(data_set->flags, pe_flag_concurrent_fencing)) {
             /* With concurrent fencing, order each non-DC fencing action
              * separately before any DC fencing or shutdown.
              */
             pcmk__order_after_each(dc_down, fencing_ops);
         } else if (fencing_ops != NULL) {
             /* Without concurrent fencing, the non-DC fencing actions are
              * already ordered relative to each other, so we just need to order
              * the DC fencing after the last action in the chain (which is the
              * first item in the list).
              */
             order_actions((pe_action_t *) fencing_ops->data, dc_down,
                           pe_order_optional);
         }
     }
     g_list_free(fencing_ops);
     g_list_free(shutdown_ops);
 }
 
 static void
 log_resource_details(pe_working_set_t *data_set)
 {
     pcmk__output_t *out = data_set->priv;
     GList *all = NULL;
 
     /* We need a list of nodes that we are allowed to output information for.
      * This is necessary because out->message for all the resource-related
      * messages expects such a list, due to the `crm_mon --node=` feature.  Here,
      * we just make it a list of all the nodes.
      */
     all = g_list_prepend(all, (gpointer) "*");
 
     for (GList *item = data_set->resources; item != NULL; item = item->next) {
         pe_resource_t *rsc = (pe_resource_t *) item->data;
 
         // Log all resources except inactive orphans
         if (!pcmk_is_set(rsc->flags, pe_rsc_orphan)
             || (rsc->role != RSC_ROLE_STOPPED)) {
             out->message(out, crm_map_element_name(rsc->xml), 0, rsc, all, all);
         }
     }
 
     g_list_free(all);
 }
 
 static void
 log_all_actions(pe_working_set_t *data_set)
 {
     /* This only ever outputs to the log, so ignore whatever output object was
      * previously set and just log instead.
      */
     pcmk__output_t *prev_out = data_set->priv;
     pcmk__output_t *out = NULL;
 
     if (pcmk__log_output_new(&out) != pcmk_rc_ok) {
         return;
     }
 
     pe__register_messages(out);
     pcmk__register_lib_messages(out);
     pcmk__output_set_log_level(out, LOG_NOTICE);
     data_set->priv = out;
 
     out->begin_list(out, NULL, NULL, "Actions");
     pcmk__output_actions(data_set);
     out->end_list(out);
     out->finish(out, CRM_EX_OK, true, NULL);
     pcmk__output_free(out);
 
     data_set->priv = prev_out;
 }
 
 /*!
  * \internal
  * \brief Log all required but unrunnable actions at trace level
  *
  * \param[in] data_set  Cluster working set
  */
 static void
 log_unrunnable_actions(pe_working_set_t *data_set)
 {
     const uint64_t flags = pe_action_optional|pe_action_runnable|pe_action_pseudo;
 
     crm_trace("Required but unrunnable actions:");
     for (GList *iter = data_set->actions; iter != NULL; iter = iter->next) {
         pe_action_t *action = (pe_action_t *) iter->data;
 
         if (!pcmk_any_flags_set(action->flags, flags)) {
             pcmk__log_action("\t", action, true);
         }
     }
 }
 
 /*!
  * \internal
  * \brief Unpack the CIB for scheduling
  *
  * \param[in] cib       CIB XML to unpack (may be NULL if previously unpacked)
  * \param[in] flags     Working set flags to set in addition to defaults
  * \param[in] data_set  Cluster working set
  */
 static void
 unpack_cib(xmlNode *cib, unsigned long long flags, pe_working_set_t *data_set)
 {
     if (pcmk_is_set(data_set->flags, pe_flag_have_status)) {
         crm_trace("Reusing previously calculated cluster status");
         pe__set_working_set_flags(data_set, flags);
         return;
     }
 
     CRM_ASSERT(cib != NULL);
     crm_trace("Calculating cluster status");
 
     /* This will zero the entire struct without freeing anything first, so
      * callers should never call pcmk__schedule_actions() with a populated data
      * set unless pe_flag_have_status is set (i.e. cluster_status() was
      * previously called, whether directly or via pcmk__schedule_actions()).
      */
     set_working_set_defaults(data_set);
 
     pe__set_working_set_flags(data_set, flags);
     data_set->input = cib;
     cluster_status(data_set); // Sets pe_flag_have_status
 }
 
 /*!
  * \internal
  * \brief Run the scheduler for a given CIB
  *
  * \param[in]     cib       CIB XML to use as scheduler input
  * \param[in]     flags     Working set flags to set in addition to defaults
  * \param[in,out] data_set  Cluster working set
  */
 void
 pcmk__schedule_actions(xmlNode *cib, unsigned long long flags,
                        pe_working_set_t *data_set)
 {
     unpack_cib(cib, flags, data_set);
     pcmk__set_allocation_methods(data_set);
     pcmk__apply_node_health(data_set);
     pcmk__unpack_constraints(data_set);
     if (pcmk_is_set(data_set->flags, pe_flag_check_config)) {
         return;
     }
 
     if (!pcmk_is_set(data_set->flags, pe_flag_quick_location) &&
          pcmk__is_daemon) {
         log_resource_details(data_set);
     }
 
     apply_node_criteria(data_set);
 
     if (pcmk_is_set(data_set->flags, pe_flag_quick_location)) {
         return;
     }
 
     pcmk__create_internal_constraints(data_set);
     pcmk__handle_rsc_config_changes(data_set);
     allocate_resources(data_set);
     schedule_resource_actions(data_set);
 
     /* Remote ordering constraints need to happen prior to calculating fencing
      * because it is one more place we can mark nodes as needing fencing.
      */
     pcmk__order_remote_connection_actions(data_set);
 
     schedule_fencing_and_shutdowns(data_set);
     pcmk__apply_orderings(data_set);
     log_all_actions(data_set);
     pcmk__create_graph(data_set);
 
     if (get_crm_log_level() == LOG_TRACE) {
         log_unrunnable_actions(data_set);
     }
 }
diff --git a/lib/pacemaker/pcmk_sched_bundle.c b/lib/pacemaker/pcmk_sched_bundle.c
index f6a483aad0..f1e3022a3c 100644
--- a/lib/pacemaker/pcmk_sched_bundle.c
+++ b/lib/pacemaker/pcmk_sched_bundle.c
@@ -1,1129 +1,1128 @@
 /*
  * Copyright 2004-2022 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <stdbool.h>
 
 #include <crm/msg_xml.h>
 #include <pacemaker-internal.h>
 
 #include "libpacemaker_private.h"
 
 #define PE__VARIANT_BUNDLE 1
 #include <lib/pengine/variant.h>
 
 static bool
 is_bundle_node(pe__bundle_variant_data_t *data, pe_node_t *node)
 {
     for (GList *gIter = data->replicas; gIter != NULL; gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         if (node->details == replica->node->details) {
             return TRUE;
         }
     }
     return FALSE;
 }
 
-gint sort_clone_instance(gconstpointer a, gconstpointer b, gpointer data_set);
 void distribute_children(pe_resource_t *rsc, GList *children, GList *nodes,
                          int max, int per_host_max, pe_working_set_t * data_set);
 
 static GList *
 get_container_list(pe_resource_t *rsc)
 {
     GList *containers = NULL;
 
     if (rsc->variant == pe_container) {
         pe__bundle_variant_data_t *data = NULL;
 
         get_bundle_variant_data(data, rsc);
         for (GList *gIter = data->replicas; gIter != NULL;
              gIter = gIter->next) {
             pe__bundle_replica_t *replica = gIter->data;
 
             containers = g_list_append(containers, replica->container);
         }
     }
     return containers;
 }
 
 static inline GList *
 get_containers_or_children(pe_resource_t *rsc)
 {
     return (rsc->variant == pe_container)?
            get_container_list(rsc) : rsc->children;
 }
 
 pe_node_t *
 pcmk__bundle_allocate(pe_resource_t *rsc, pe_node_t *prefer,
                       pe_working_set_t *data_set)
 {
     GList *containers = NULL;
     GList *nodes = NULL;
     pe__bundle_variant_data_t *bundle_data = NULL;
 
     CRM_CHECK(rsc != NULL, return NULL);
 
     get_bundle_variant_data(bundle_data, rsc);
 
     pe__set_resource_flags(rsc, pe_rsc_allocating);
     containers = get_container_list(rsc);
 
     pe__show_node_weights(!pcmk_is_set(data_set->flags, pe_flag_show_scores),
                           rsc, __func__, rsc->allowed_nodes, data_set);
 
     nodes = g_hash_table_get_values(rsc->allowed_nodes);
     nodes = pcmk__sort_nodes(nodes, NULL, data_set);
-    containers = g_list_sort_with_data(containers, sort_clone_instance, data_set);
+    containers = g_list_sort(containers, pcmk__cmp_instance);
     distribute_children(rsc, containers, nodes, bundle_data->nreplicas,
                         bundle_data->nreplicas_per_host, data_set);
     g_list_free(nodes);
     g_list_free(containers);
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
         pe_node_t *container_host = NULL;
 
         CRM_ASSERT(replica);
         if (replica->ip) {
             pe_rsc_trace(rsc, "Allocating bundle %s IP %s",
                          rsc->id, replica->ip->id);
             replica->ip->cmds->allocate(replica->ip, prefer, data_set);
         }
 
         container_host = replica->container->allocated_to;
         if (replica->remote && pe__is_guest_or_remote_node(container_host)) {
             /* We need 'nested' connection resources to be on the same
              * host because pacemaker-remoted only supports a single
              * active connection
              */
             pcmk__new_colocation("child-remote-with-docker-remote", NULL,
                                  INFINITY, replica->remote,
                                  container_host->details->remote_rsc, NULL,
                                  NULL, true, data_set);
         }
 
         if (replica->remote) {
             pe_rsc_trace(rsc, "Allocating bundle %s connection %s",
                          rsc->id, replica->remote->id);
             replica->remote->cmds->allocate(replica->remote, prefer,
                                             data_set);
         }
 
         // Explicitly allocate replicas' children before bundle child
         if (replica->child) {
             pe_node_t *node = NULL;
             GHashTableIter iter;
 
             g_hash_table_iter_init(&iter, replica->child->allowed_nodes);
             while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & node)) {
                 if (node->details != replica->node->details) {
                     node->weight = -INFINITY;
                 } else if (!pcmk__threshold_reached(replica->child, node,
                                                     NULL)) {
                     node->weight = INFINITY;
                 }
             }
 
             pe__set_resource_flags(replica->child->parent, pe_rsc_allocating);
             pe_rsc_trace(rsc, "Allocating bundle %s replica child %s",
                          rsc->id, replica->child->id);
             replica->child->cmds->allocate(replica->child, replica->node,
                                            data_set);
             pe__clear_resource_flags(replica->child->parent,
                                        pe_rsc_allocating);
         }
     }
 
     if (bundle_data->child) {
         pe_node_t *node = NULL;
         GHashTableIter iter;
         g_hash_table_iter_init(&iter, bundle_data->child->allowed_nodes);
         while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & node)) {
             if (is_bundle_node(bundle_data, node)) {
                 node->weight = 0;
             } else {
                 node->weight = -INFINITY;
             }
         }
         pe_rsc_trace(rsc, "Allocating bundle %s child %s",
                      rsc->id, bundle_data->child->id);
         bundle_data->child->cmds->allocate(bundle_data->child, prefer, data_set);
     }
 
     pe__clear_resource_flags(rsc, pe_rsc_allocating|pe_rsc_provisional);
     return NULL;
 }
 
 
 void
 pcmk__bundle_create_actions(pe_resource_t *rsc, pe_working_set_t *data_set)
 {
     pe_action_t *action = NULL;
     GList *containers = NULL;
     pe__bundle_variant_data_t *bundle_data = NULL;
 
     CRM_CHECK(rsc != NULL, return);
 
     containers = get_container_list(rsc);
     get_bundle_variant_data(bundle_data, rsc);
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         CRM_ASSERT(replica);
         if (replica->ip) {
             replica->ip->cmds->create_actions(replica->ip, data_set);
         }
         if (replica->container) {
             replica->container->cmds->create_actions(replica->container,
                                                      data_set);
         }
         if (replica->remote) {
             replica->remote->cmds->create_actions(replica->remote, data_set);
         }
     }
 
     clone_create_pseudo_actions(rsc, containers, NULL, NULL,  data_set);
 
     if (bundle_data->child) {
         bundle_data->child->cmds->create_actions(bundle_data->child, data_set);
 
         if (pcmk_is_set(bundle_data->child->flags, pe_rsc_promotable)) {
             /* promote */
             pcmk__new_rsc_pseudo_action(rsc, RSC_PROMOTE, true, true);
             action = pcmk__new_rsc_pseudo_action(rsc, RSC_PROMOTED, true, true);
             action->priority = INFINITY;
 
             /* demote */
             pcmk__new_rsc_pseudo_action(rsc, RSC_DEMOTE, true, true);
             action = pcmk__new_rsc_pseudo_action(rsc, RSC_DEMOTED, true, true);
             action->priority = INFINITY;
         }
     }
 
     g_list_free(containers);
 }
 
 void
 pcmk__bundle_internal_constraints(pe_resource_t *rsc,
                                   pe_working_set_t *data_set)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
 
     CRM_CHECK(rsc != NULL, return);
 
     get_bundle_variant_data(bundle_data, rsc);
 
     if (bundle_data->child) {
         pcmk__order_resource_actions(rsc, RSC_START, bundle_data->child,
                                      RSC_START, pe_order_implies_first_printed,
                                      data_set);
         pcmk__order_resource_actions(rsc, RSC_STOP, bundle_data->child,
                                      RSC_STOP, pe_order_implies_first_printed,
                                      data_set);
 
         if (bundle_data->child->children) {
             pcmk__order_resource_actions(bundle_data->child, RSC_STARTED, rsc,
                                          RSC_STARTED,
                                          pe_order_implies_then_printed,
                                          data_set);
             pcmk__order_resource_actions(bundle_data->child, RSC_STOPPED, rsc,
                                          RSC_STOPPED,
                                          pe_order_implies_then_printed,
                                          data_set);
         } else {
             pcmk__order_resource_actions(bundle_data->child, RSC_START, rsc,
                                          RSC_STARTED,
                                          pe_order_implies_then_printed,
                                          data_set);
             pcmk__order_resource_actions(bundle_data->child, RSC_STOP, rsc,
                                          RSC_STOPPED,
                                          pe_order_implies_then_printed,
                                          data_set);
         }
     }
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         CRM_ASSERT(replica);
         CRM_ASSERT(replica->container);
 
         replica->container->cmds->internal_constraints(replica->container,
                                                        data_set);
 
         pcmk__order_starts(rsc, replica->container,
                            pe_order_runnable_left|pe_order_implies_first_printed,
                            data_set);
 
         if (replica->child) {
             pcmk__order_stops(rsc, replica->child,
                               pe_order_implies_first_printed, data_set);
         }
         pcmk__order_stops(rsc, replica->container,
                           pe_order_implies_first_printed, data_set);
         pcmk__order_resource_actions(replica->container, RSC_START, rsc,
                                      RSC_STARTED, pe_order_implies_then_printed,
                                      data_set);
         pcmk__order_resource_actions(replica->container, RSC_STOP, rsc,
                                      RSC_STOPPED, pe_order_implies_then_printed,
                                      data_set);
 
         if (replica->ip) {
             replica->ip->cmds->internal_constraints(replica->ip, data_set);
 
             // Start IP then container
             pcmk__order_starts(replica->ip, replica->container,
                                pe_order_runnable_left|pe_order_preserve,
                                data_set);
             pcmk__order_stops(replica->container, replica->ip,
                               pe_order_implies_first|pe_order_preserve,
                               data_set);
 
             pcmk__new_colocation("ip-with-docker", NULL, INFINITY, replica->ip,
                                  replica->container, NULL, NULL, true,
                                  data_set);
         }
 
         if (replica->remote) {
             /* This handles ordering and colocating remote relative to container
              * (via "resource-with-container"). Since IP is also ordered and
              * colocated relative to the container, we don't need to do anything
              * explicit here with IP.
              */
             replica->remote->cmds->internal_constraints(replica->remote,
                                                         data_set);
         }
 
         if (replica->child) {
             CRM_ASSERT(replica->remote);
 
             // "Start remote then child" is implicit in scheduler's remote logic
         }
 
     }
 
     if (bundle_data->child) {
         bundle_data->child->cmds->internal_constraints(bundle_data->child, data_set);
         if (pcmk_is_set(bundle_data->child->flags, pe_rsc_promotable)) {
             promote_demote_constraints(rsc, data_set);
 
             /* child demoted before global demoted */
             pcmk__order_resource_actions(bundle_data->child, RSC_DEMOTED, rsc,
                                          RSC_DEMOTED,
                                          pe_order_implies_then_printed,
                                          data_set);
 
             /* global demote before child demote */
             pcmk__order_resource_actions(rsc, RSC_DEMOTE, bundle_data->child,
                                          RSC_DEMOTE,
                                          pe_order_implies_first_printed,
                                          data_set);
 
             /* child promoted before global promoted */
             pcmk__order_resource_actions(bundle_data->child, RSC_PROMOTED, rsc,
                                          RSC_PROMOTED,
                                          pe_order_implies_then_printed,
                                          data_set);
 
             /* global promote before child promote */
             pcmk__order_resource_actions(rsc, RSC_PROMOTE, bundle_data->child,
                                          RSC_PROMOTE,
                                          pe_order_implies_first_printed,
                                          data_set);
         }
     }
 }
 
 static pe_resource_t *
 compatible_replica_for_node(pe_resource_t *rsc_lh, pe_node_t *candidate,
                             pe_resource_t *rsc, enum rsc_role_e filter,
                             gboolean current)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
 
     CRM_CHECK(candidate != NULL, return NULL);
     get_bundle_variant_data(bundle_data, rsc);
 
     crm_trace("Looking for compatible child from %s for %s on %s",
               rsc_lh->id, rsc->id, candidate->details->uname);
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         if (is_child_compatible(replica->container, candidate, filter, current)) {
             crm_trace("Pairing %s with %s on %s",
                       rsc_lh->id, replica->container->id,
                       candidate->details->uname);
             return replica->container;
         }
     }
 
     crm_trace("Can't pair %s with %s", rsc_lh->id, rsc->id);
     return NULL;
 }
 
 static pe_resource_t *
 compatible_replica(pe_resource_t *rsc_lh, pe_resource_t *rsc,
                    enum rsc_role_e filter, gboolean current,
                    pe_working_set_t *data_set)
 {
     GList *scratch = NULL;
     pe_resource_t *pair = NULL;
     pe_node_t *active_node_lh = NULL;
 
     active_node_lh = rsc_lh->fns->location(rsc_lh, NULL, current);
     if (active_node_lh) {
         return compatible_replica_for_node(rsc_lh, active_node_lh, rsc, filter,
                                            current);
     }
 
     scratch = g_hash_table_get_values(rsc_lh->allowed_nodes);
     scratch = pcmk__sort_nodes(scratch, NULL, data_set);
 
     for (GList *gIter = scratch; gIter != NULL; gIter = gIter->next) {
         pe_node_t *node = (pe_node_t *) gIter->data;
 
         pair = compatible_replica_for_node(rsc_lh, node, rsc, filter, current);
         if (pair) {
             goto done;
         }
     }
 
     pe_rsc_debug(rsc, "Can't pair %s with %s", rsc_lh->id, (rsc? rsc->id : "none"));
   done:
     g_list_free(scratch);
     return pair;
 }
 
 void
 pcmk__bundle_rsc_colocation_lh(pe_resource_t *dependent, pe_resource_t *primary,
                                pcmk__colocation_t *constraint,
                                pe_working_set_t *data_set)
 {
     /* -- Never called --
      *
      * Instead we add the colocation constraints to the child and call from there
      */
     CRM_ASSERT(FALSE);
 }
 
 int copies_per_node(pe_resource_t * rsc) 
 {
     /* Strictly speaking, there should be a 'copies_per_node' addition
      * to the resource function table and each case would be a
      * function.  However that would be serious overkill to return an
      * int.  In fact, it seems to me that both function tables
      * could/should be replaced by resources.{c,h} full of
      * rsc_{some_operation} functions containing a switch as below
      * which calls out to functions named {variant}_{some_operation}
      * as needed.
      */
     switch(rsc->variant) {
         case pe_unknown:
             return 0;
         case pe_native:
         case pe_group:
             return 1;
         case pe_clone:
             {
                 const char *max_clones_node = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION_NODEMAX);
 
                 if (max_clones_node == NULL) {
                     return 1;
 
                 } else {
                     int max_i;
 
                     pcmk__scan_min_int(max_clones_node, &max_i, 0);
                     return max_i;
                 }
             }
         case pe_container:
             {
                 pe__bundle_variant_data_t *data = NULL;
                 get_bundle_variant_data(data, rsc);
                 return data->nreplicas_per_host;
             }
     }
     return 0;
 }
 
 void
 pcmk__bundle_rsc_colocation_rh(pe_resource_t *dependent, pe_resource_t *primary,
                                pcmk__colocation_t *constraint,
                                pe_working_set_t *data_set)
 {
     GList *allocated_primaries = NULL;
     pe__bundle_variant_data_t *bundle_data = NULL;
 
     CRM_CHECK(constraint != NULL, return);
     CRM_CHECK(dependent != NULL,
               pe_err("dependent was NULL for %s", constraint->id); return);
     CRM_CHECK(primary != NULL,
               pe_err("primary was NULL for %s", constraint->id); return);
     CRM_ASSERT(dependent->variant == pe_native);
 
     if (pcmk_is_set(primary->flags, pe_rsc_provisional)) {
         pe_rsc_trace(primary, "%s is still provisional", primary->id);
         return;
 
     } else if(constraint->dependent->variant > pe_group) {
         pe_resource_t *primary_replica = compatible_replica(dependent, primary,
                                                             RSC_ROLE_UNKNOWN,
                                                             FALSE, data_set);
 
         if (primary_replica) {
             pe_rsc_debug(primary, "Pairing %s with %s",
                          dependent->id, primary_replica->id);
             dependent->cmds->rsc_colocation_lh(dependent, primary_replica,
                                                constraint, data_set);
 
         } else if (constraint->score >= INFINITY) {
             crm_notice("Cannot pair %s with instance of %s",
                        dependent->id, primary->id);
             pcmk__assign_resource(dependent, NULL, true);
 
         } else {
             pe_rsc_debug(primary, "Cannot pair %s with instance of %s",
                          dependent->id, primary->id);
         }
 
         return;
     }
 
     get_bundle_variant_data(bundle_data, primary);
     pe_rsc_trace(primary, "Processing constraint %s: %s -> %s %d",
                  constraint->id, dependent->id, primary->id, constraint->score);
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         if (constraint->score < INFINITY) {
             replica->container->cmds->rsc_colocation_rh(dependent,
                                                         replica->container,
                                                         constraint, data_set);
 
         } else {
             pe_node_t *chosen = replica->container->fns->location(replica->container,
                                                                   NULL, FALSE);
 
             if ((chosen == NULL)
                 || is_set_recursive(replica->container, pe_rsc_block, TRUE)) {
                 continue;
             }
             if ((constraint->primary_role >= RSC_ROLE_PROMOTED)
                 && (replica->child == NULL)) {
                 continue;
             }
             if ((constraint->primary_role >= RSC_ROLE_PROMOTED)
                 && (replica->child->next_role < RSC_ROLE_PROMOTED)) {
                 continue;
             }
 
             pe_rsc_trace(primary, "Allowing %s: %s %d",
                          constraint->id, chosen->details->uname,
                          chosen->weight);
             allocated_primaries = g_list_prepend(allocated_primaries, chosen);
         }
     }
 
     if (constraint->score >= INFINITY) {
         node_list_exclude(dependent->allowed_nodes, allocated_primaries, FALSE);
     }
     g_list_free(allocated_primaries);
 }
 
 enum pe_action_flags
 pcmk__bundle_action_flags(pe_action_t *action, pe_node_t *node)
 {
     GList *containers = NULL;
     enum pe_action_flags flags = 0;
     pe__bundle_variant_data_t *data = NULL;
 
     get_bundle_variant_data(data, action->rsc);
     if(data->child) {
         enum action_tasks task = get_complex_task(data->child, action->task, TRUE);
         switch(task) {
             case no_action:
             case action_notify:
             case action_notified:
             case action_promote:
             case action_promoted:
             case action_demote:
             case action_demoted:
                 return summary_action_flags(action, data->child->children, node);
             default:
                 break;
         }
     }
 
     containers = get_container_list(action->rsc);
     flags = summary_action_flags(action, containers, node);
     g_list_free(containers);
     return flags;
 }
 
 pe_resource_t *
 find_compatible_child_by_node(pe_resource_t * local_child, pe_node_t * local_node, pe_resource_t * rsc,
                               enum rsc_role_e filter, gboolean current)
 {
     GList *gIter = NULL;
     GList *children = NULL;
 
     if (local_node == NULL) {
         crm_err("Can't colocate unrunnable child %s with %s", local_child->id, rsc->id);
         return NULL;
     }
 
     crm_trace("Looking for compatible child from %s for %s on %s",
               local_child->id, rsc->id, local_node->details->uname);
 
     children = get_containers_or_children(rsc);
     for (gIter = children; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         if(is_child_compatible(child_rsc, local_node, filter, current)) {
             crm_trace("Pairing %s with %s on %s",
                       local_child->id, child_rsc->id, local_node->details->uname);
             return child_rsc;
         }
     }
 
     crm_trace("Can't pair %s with %s", local_child->id, rsc->id);
     if(children != rsc->children) {
         g_list_free(children);
     }
     return NULL;
 }
 
 static pe__bundle_replica_t *
 replica_for_container(pe_resource_t *rsc, pe_resource_t *container,
                       pe_node_t *node)
 {
     if (rsc->variant == pe_container) {
         pe__bundle_variant_data_t *data = NULL;
 
         get_bundle_variant_data(data, rsc);
         for (GList *gIter = data->replicas; gIter != NULL;
              gIter = gIter->next) {
             pe__bundle_replica_t *replica = gIter->data;
 
             if (replica->child
                 && (container == replica->container)
                 && (node->details == replica->node->details)) {
                 return replica;
             }
         }
     }
     return NULL;
 }
 
 static enum pe_graph_flags
 multi_update_interleave_actions(pe_action_t *first, pe_action_t *then,
                                 pe_node_t *node, enum pe_action_flags flags,
                                 enum pe_action_flags filter,
                                 enum pe_ordering type,
                                 pe_working_set_t *data_set)
 {
     GList *gIter = NULL;
     GList *children = NULL;
     gboolean current = FALSE;
     enum pe_graph_flags changed = pe_graph_none;
 
     /* Fix this - lazy */
     if (pcmk__ends_with(first->uuid, "_stopped_0")
         || pcmk__ends_with(first->uuid, "_demoted_0")) {
         current = TRUE;
     }
 
     children = get_containers_or_children(then->rsc);
     for (gIter = children; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *then_child = gIter->data;
         pe_resource_t *first_child = find_compatible_child(then_child,
                                                            first->rsc,
                                                            RSC_ROLE_UNKNOWN,
                                                            current, data_set);
         if (first_child == NULL && current) {
             crm_trace("Ignore");
 
         } else if (first_child == NULL) {
             crm_debug("No match found for %s (%d / %s / %s)", then_child->id, current, first->uuid, then->uuid);
 
             /* Me no like this hack - but what else can we do?
              *
              * If there is no-one active or about to be active
              *   on the same node as then_child, then they must
              *   not be allowed to start
              */
             if (type & (pe_order_runnable_left | pe_order_implies_then) /* Mandatory */ ) {
                 pe_rsc_info(then->rsc, "Inhibiting %s from being active", then_child->id);
                 if (pcmk__assign_resource(then_child, NULL, true)) {
                     pe__set_graph_flags(changed, first, pe_graph_updated_then);
                 }
             }
 
         } else {
             pe_action_t *first_action = NULL;
             pe_action_t *then_action = NULL;
 
             enum action_tasks task = clone_child_action(first);
             const char *first_task = task2text(task);
 
             pe__bundle_replica_t *first_replica = NULL;
             pe__bundle_replica_t *then_replica = NULL;
 
             first_replica = replica_for_container(first->rsc, first_child,
                                                   node);
             if (strstr(first->task, "stop") && first_replica && first_replica->child) {
                 /* Except for 'stopped' we should be looking at the
                  * in-container resource, actions for the child will
                  * happen later and are therefor more likely to align
                  * with the user's intent.
                  */
                 first_action = find_first_action(first_replica->child->actions,
                                                  NULL, task2text(task), node);
             } else {
                 first_action = find_first_action(first_child->actions, NULL, task2text(task), node);
             }
 
             then_replica = replica_for_container(then->rsc, then_child, node);
             if (strstr(then->task, "mote")
                 && then_replica && then_replica->child) {
                 /* Promote/demote actions will never be found for the
                  * container resource, look in the child instead
                  *
                  * Alternatively treat:
                  *  'XXXX then promote YYYY' as 'XXXX then start container for YYYY', and
                  *  'demote XXXX then stop YYYY' as 'stop container for XXXX then stop YYYY'
                  */
                 then_action = find_first_action(then_replica->child->actions,
                                                 NULL, then->task, node);
             } else {
                 then_action = find_first_action(then_child->actions, NULL, then->task, node);
             }
 
             if (first_action == NULL) {
                 if (!pcmk_is_set(first_child->flags, pe_rsc_orphan)
                     && !pcmk__str_any_of(first_task, RSC_STOP, RSC_DEMOTE, NULL)) {
                     crm_err("Internal error: No action found for %s in %s (first)",
                             first_task, first_child->id);
 
                 } else {
                     crm_trace("No action found for %s in %s%s (first)",
                               first_task, first_child->id,
                               pcmk_is_set(first_child->flags, pe_rsc_orphan)? " (ORPHAN)" : "");
                 }
                 continue;
             }
 
             /* We're only interested if 'then' is neither stopping nor being demoted */ 
             if (then_action == NULL) {
                 if (!pcmk_is_set(then_child->flags, pe_rsc_orphan)
                     && !pcmk__str_any_of(then->task, RSC_STOP, RSC_DEMOTE, NULL)) {
                     crm_err("Internal error: No action found for %s in %s (then)",
                             then->task, then_child->id);
 
                 } else {
                     crm_trace("No action found for %s in %s%s (then)",
                               then->task, then_child->id,
                               pcmk_is_set(then_child->flags, pe_rsc_orphan)? " (ORPHAN)" : "");
                 }
                 continue;
             }
 
             if (order_actions(first_action, then_action, type)) {
                 crm_debug("Created constraint for %s (%d) -> %s (%d) %.6x",
                           first_action->uuid,
                           pcmk_is_set(first_action->flags, pe_action_optional),
                           then_action->uuid,
                           pcmk_is_set(then_action->flags, pe_action_optional),
                           type);
                 pe__set_graph_flags(changed, first,
                                     pe_graph_updated_first|pe_graph_updated_then);
             }
             if(first_action && then_action) {
                 changed |= then_child->cmds->update_actions(first_action,
                     then_action, node,
                     first_child->cmds->action_flags(first_action, node),
                     filter, type, data_set);
             } else {
                 crm_err("Nothing found either for %s (%p) or %s (%p) %s",
                         first_child->id, first_action,
                         then_child->id, then_action, task2text(task));
             }
         }
     }
 
     if(children != then->rsc->children) {
         g_list_free(children);
     }
     return changed;
 }
 
 static bool
 can_interleave_actions(pe_action_t *first, pe_action_t *then)
 {
     bool interleave = FALSE;
     pe_resource_t *rsc = NULL;
     const char *interleave_s = NULL;
 
     if(first->rsc == NULL || then->rsc == NULL) {
         crm_trace("Not interleaving %s with %s (both must be resources)", first->uuid, then->uuid);
         return FALSE;
     } else if(first->rsc == then->rsc) {
         crm_trace("Not interleaving %s with %s (must belong to different resources)", first->uuid, then->uuid);
         return FALSE;
     } else if(first->rsc->variant < pe_clone || then->rsc->variant < pe_clone) {
         crm_trace("Not interleaving %s with %s (both sides must be clones or bundles)", first->uuid, then->uuid);
         return FALSE;
     }
 
     if (pcmk__ends_with(then->uuid, "_stop_0")
         || pcmk__ends_with(then->uuid, "_demote_0")) {
         rsc = first->rsc;
     } else {
         rsc = then->rsc;
     }
 
     interleave_s = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INTERLEAVE);
     interleave = crm_is_true(interleave_s);
     crm_trace("Interleave %s -> %s: %s (based on %s)",
               first->uuid, then->uuid, interleave ? "yes" : "no", rsc->id);
 
     return interleave;
 }
 
 enum pe_graph_flags
 pcmk__multi_update_actions(pe_action_t *first, pe_action_t *then,
                            pe_node_t *node, enum pe_action_flags flags,
                            enum pe_action_flags filter, enum pe_ordering type,
                            pe_working_set_t *data_set)
 {
     enum pe_graph_flags changed = pe_graph_none;
 
     crm_trace("%s -> %s", first->uuid, then->uuid);
 
     if(can_interleave_actions(first, then)) {
         changed = multi_update_interleave_actions(first, then, node, flags,
                                                   filter, type, data_set);
 
     } else if(then->rsc) {
         GList *gIter = NULL;
         GList *children = NULL;
 
         // Handle the 'primitive' ordering case
         changed |= native_update_actions(first, then, node, flags, filter,
                                          type, data_set);
 
         // Now any children (or containers in the case of a bundle)
         children = get_containers_or_children(then->rsc);
         for (gIter = children; gIter != NULL; gIter = gIter->next) {
             pe_resource_t *then_child = (pe_resource_t *) gIter->data;
             enum pe_graph_flags then_child_changed = pe_graph_none;
             pe_action_t *then_child_action = find_first_action(then_child->actions, NULL, then->task, node);
 
             if (then_child_action) {
                 enum pe_action_flags then_child_flags = then_child->cmds->action_flags(then_child_action, node);
 
                 if (pcmk_is_set(then_child_flags, pe_action_runnable)) {
                     then_child_changed |= then_child->cmds->update_actions(first,
                         then_child_action, node, flags, filter, type, data_set);
                 }
                 changed |= then_child_changed;
                 if (then_child_changed & pe_graph_updated_then) {
                     for (GList *lpc = then_child_action->actions_after; lpc != NULL; lpc = lpc->next) {
                         pe_action_wrapper_t *next = (pe_action_wrapper_t *) lpc->data;
 
                         pcmk__update_action_for_orderings(next->action,
                                                           data_set);
                     }
                 }
             }
         }
 
         if(children != then->rsc->children) {
             g_list_free(children);
         }
     }
     return changed;
 }
 
 void
 pcmk__bundle_rsc_location(pe_resource_t *rsc, pe__location_t *constraint)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
     get_bundle_variant_data(bundle_data, rsc);
 
     pcmk__apply_location(constraint, rsc);
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         if (replica->container) {
             replica->container->cmds->rsc_location(replica->container,
                                                    constraint);
         }
         if (replica->ip) {
             replica->ip->cmds->rsc_location(replica->ip, constraint);
         }
     }
 
     if (bundle_data->child
         && ((constraint->role_filter == RSC_ROLE_UNPROMOTED)
             || (constraint->role_filter == RSC_ROLE_PROMOTED))) {
         bundle_data->child->cmds->rsc_location(bundle_data->child, constraint);
         bundle_data->child->rsc_location = g_list_prepend(bundle_data->child->rsc_location,
                                                           constraint);
     }
 }
 
 void
 pcmk__bundle_expand(pe_resource_t *rsc, pe_working_set_t * data_set)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
 
     CRM_CHECK(rsc != NULL, return);
 
     get_bundle_variant_data(bundle_data, rsc);
 
     if (bundle_data->child) {
         bundle_data->child->cmds->expand(bundle_data->child, data_set);
     }
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         CRM_ASSERT(replica);
         if (replica->remote && replica->container
             && pe__bundle_needs_remote_name(replica->remote, data_set)) {
 
             /* REMOTE_CONTAINER_HACK: Allow remote nodes to run containers that
              * run pacemaker-remoted inside, without needing a separate IP for
              * the container. This is done by configuring the inner remote's
              * connection host as the magic string "#uname", then
              * replacing it with the underlying host when needed.
              */
             xmlNode *nvpair = get_xpath_object("//nvpair[@name='" XML_RSC_ATTR_REMOTE_RA_ADDR "']",
                                                replica->remote->xml, LOG_ERR);
             const char *calculated_addr = NULL;
 
             // Replace the value in replica->remote->xml (if appropriate)
             calculated_addr = pe__add_bundle_remote_name(replica->remote,
                                                          data_set,
                                                          nvpair, "value");
             if (calculated_addr) {
                 /* Since this is for the bundle as a resource, and not any
                  * particular action, replace the value in the default
                  * parameters (not evaluated for node). create_graph_action()
                  * will grab it from there to replace it in node-evaluated
                  * parameters.
                  */
                 GHashTable *params = pe_rsc_params(replica->remote,
                                                    NULL, data_set);
 
                 crm_trace("Set address for bundle connection %s to bundle host %s",
                           replica->remote->id, calculated_addr);
                 g_hash_table_replace(params,
                                      strdup(XML_RSC_ATTR_REMOTE_RA_ADDR),
                                      strdup(calculated_addr));
             } else {
                 /* The only way to get here is if the remote connection is
                  * neither currently running nor scheduled to run. That means we
                  * won't be doing any operations that require addr (only start
                  * requires it; we additionally use it to compare digests when
                  * unpacking status, promote, and migrate_from history, but
                  * that's already happened by this point).
                  */
                 crm_info("Unable to determine address for bundle %s remote connection",
                          rsc->id);
             }
         }
         if (replica->ip) {
             replica->ip->cmds->expand(replica->ip, data_set);
         }
         if (replica->container) {
             replica->container->cmds->expand(replica->container, data_set);
         }
         if (replica->remote) {
             replica->remote->cmds->expand(replica->remote, data_set);
         }
     }
 }
 
 gboolean
 pcmk__bundle_create_probe(pe_resource_t *rsc, pe_node_t *node,
                           pe_action_t *complete, gboolean force,
                           pe_working_set_t * data_set)
 {
     bool any_created = FALSE;
     pe__bundle_variant_data_t *bundle_data = NULL;
 
     CRM_CHECK(rsc != NULL, return FALSE);
 
     get_bundle_variant_data(bundle_data, rsc);
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         CRM_ASSERT(replica);
         if (replica->ip) {
             any_created |= replica->ip->cmds->create_probe(replica->ip, node,
                                                            complete, force,
                                                            data_set);
         }
         if (replica->child && (node->details == replica->node->details)) {
             any_created |= replica->child->cmds->create_probe(replica->child,
                                                               node, complete,
                                                               force, data_set);
         }
         if (replica->container) {
             bool created = replica->container->cmds->create_probe(replica->container,
                                                                   node, complete,
                                                                   force, data_set);
 
             if(created) {
                 any_created = TRUE;
                 /* If we're limited to one replica per host (due to
                  * the lack of an IP range probably), then we don't
                  * want any of our peer containers starting until
                  * we've established that no other copies are already
                  * running.
                  *
                  * Partly this is to ensure that nreplicas_per_host is
                  * observed, but also to ensure that the containers
                  * don't fail to start because the necessary port
                  * mappings (which won't include an IP for uniqueness)
                  * are already taken
                  */
 
                 for (GList *tIter = bundle_data->replicas;
                      tIter && (bundle_data->nreplicas_per_host == 1);
                      tIter = tIter->next) {
                     pe__bundle_replica_t *other = tIter->data;
 
                     if ((other != replica) && (other != NULL)
                         && (other->container != NULL)) {
 
                         pcmk__new_ordering(replica->container,
                                            pcmk__op_key(replica->container->id, RSC_STATUS, 0),
                                            NULL, other->container,
                                            pcmk__op_key(other->container->id, RSC_START, 0),
                                            NULL,
                                            pe_order_optional|pe_order_same_node,
                                            data_set);
                     }
                 }
             }
         }
         if (replica->container && replica->remote
             && replica->remote->cmds->create_probe(replica->remote, node,
                                                    complete, force,
                                                    data_set)) {
 
             /* Do not probe the remote resource until we know where the
              * container is running. This is required for REMOTE_CONTAINER_HACK
              * to correctly probe remote resources.
              */
             char *probe_uuid = pcmk__op_key(replica->remote->id, RSC_STATUS,
                                                0);
             pe_action_t *probe = find_first_action(replica->remote->actions,
                                                    probe_uuid, NULL, node);
 
             free(probe_uuid);
             if (probe) {
                 any_created = TRUE;
                 crm_trace("Ordering %s probe on %s",
                           replica->remote->id, node->details->uname);
                 pcmk__new_ordering(replica->container,
                                    pcmk__op_key(replica->container->id, RSC_START, 0),
                                    NULL, replica->remote, NULL, probe,
                                    pe_order_probe, data_set);
             }
         }
     }
     return any_created;
 }
 
 void
 pcmk__bundle_append_meta(pe_resource_t *rsc, xmlNode *xml)
 {
 }
 
 void
 pcmk__output_bundle_actions(pe_resource_t *rsc)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
 
     CRM_CHECK(rsc != NULL, return);
 
     get_bundle_variant_data(bundle_data, rsc);
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         CRM_ASSERT(replica);
         if (replica->ip != NULL) {
             replica->ip->cmds->output_actions(replica->ip);
         }
         if (replica->container != NULL) {
             replica->container->cmds->output_actions(replica->container);
         }
         if (replica->remote != NULL) {
             replica->remote->cmds->output_actions(replica->remote);
         }
         if (replica->child != NULL) {
             replica->child->cmds->output_actions(replica->child);
         }
     }
 }
 
 // Bundle implementation of resource_alloc_functions_t:add_utilization()
 void
 pcmk__bundle_add_utilization(pe_resource_t *rsc, pe_resource_t *orig_rsc,
                              GList *all_rscs, GHashTable *utilization)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
     pe__bundle_replica_t *replica = NULL;
 
     if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
         return;
     }
 
     get_bundle_variant_data(bundle_data, rsc);
     if (bundle_data->replicas == NULL) {
         return;
     }
 
     /* All bundle replicas are identical, so using the utilization of the first
      * is sufficient for any. Only the implicit container resource can have
      * utilization values.
      */
     replica = (pe__bundle_replica_t *) bundle_data->replicas->data;
     if (replica->container != NULL) {
         replica->container->cmds->add_utilization(replica->container, orig_rsc,
                                                   all_rscs, utilization);
     }
 }
 
 // Bundle implementation of resource_alloc_functions_t:shutdown_lock()
 void
 pcmk__bundle_shutdown_lock(pe_resource_t *rsc)
 {
     return; // Bundles currently don't support shutdown locks
 }
diff --git a/lib/pacemaker/pcmk_sched_clone.c b/lib/pacemaker/pcmk_sched_clone.c
index a621c5ba59..d0bd47f9e8 100644
--- a/lib/pacemaker/pcmk_sched_clone.c
+++ b/lib/pacemaker/pcmk_sched_clone.c
@@ -1,1569 +1,1172 @@
 /*
  * Copyright 2004-2022 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <crm/msg_xml.h>
 #include <pacemaker-internal.h>
 
 #include "libpacemaker_private.h"
 
 #define VARIANT_CLONE 1
 #include <lib/pengine/variant.h>
 
-gint sort_clone_instance(gconstpointer a, gconstpointer b, gpointer data_set);
 static void append_parent_colocation(pe_resource_t * rsc, pe_resource_t * child, gboolean all);
 
-static gint
-sort_rsc_id(gconstpointer a, gconstpointer b)
-{
-    const pe_resource_t *resource1 = (const pe_resource_t *)a;
-    const pe_resource_t *resource2 = (const pe_resource_t *)b;
-    long num1, num2;
-
-    CRM_ASSERT(resource1 != NULL);
-    CRM_ASSERT(resource2 != NULL);
-
-    /*
-     * Sort clone instances numerically by instance number, so instance :10
-     * comes after :9.
-     */
-    num1 = strtol(strrchr(resource1->id, ':') + 1, NULL, 10);
-    num2 = strtol(strrchr(resource2->id, ':') + 1, NULL, 10);
-    if (num1 < num2) {
-        return -1;
-    } else if (num1 > num2) {
-        return 1;
-    }
-    return 0;
-}
-
-static pe_node_t *
-parent_node_instance(const pe_resource_t * rsc, pe_node_t * node)
-{
-    pe_node_t *ret = NULL;
-
-    if (node != NULL && rsc->parent) {
-        ret = pe_hash_table_lookup(rsc->parent->allowed_nodes, node->details->id);
-    } else if(node != NULL) {
-        ret = pe_hash_table_lookup(rsc->allowed_nodes, node->details->id);
-    }
-    return ret;
-}
-
-static gboolean
-did_fail(const pe_resource_t * rsc)
-{
-    GList *gIter = rsc->children;
-
-    if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
-        return TRUE;
-    }
-
-    for (; gIter != NULL; gIter = gIter->next) {
-        pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
-
-        if (did_fail(child_rsc)) {
-            return TRUE;
-        }
-    }
-    return FALSE;
-}
-
-/*!
- * \internal
- * \brief Compare instances based on colocation scores.
- *
- * Determines the relative order in which \c rsc1 and \c rsc2 should be
- * allocated. If one resource compares less than the other, then it
- * should be allocated first.
- *
- * \param[in] rsc1  The first instance to compare.
- * \param[in] rsc2  The second instance to compare.
- * \param[in] data_set  Cluster working set.
- *
- * \return -1 if `rsc1 < rsc2`,
- *          0 if `rsc1 == rsc2`, or
- *          1 if `rsc1 > rsc2`
- */
-static int
-order_instance_by_colocation(const pe_resource_t *rsc1,
-                             const pe_resource_t *rsc2,
-                             pe_working_set_t *data_set)
-{
-    int rc = 0;
-    pe_node_t *n = NULL;
-    pe_node_t *node1 = NULL;
-    pe_node_t *node2 = NULL;
-    pe_node_t *current_node1 = pe__current_node(rsc1);
-    pe_node_t *current_node2 = pe__current_node(rsc2);
-    GList *list1 = NULL;
-    GList *list2 = NULL;
-    GHashTable *hash1 = pcmk__strkey_table(NULL, free);
-    GHashTable *hash2 = pcmk__strkey_table(NULL, free);
-
-    /* Clone instances must have parents */
-    CRM_ASSERT(rsc1->parent != NULL);
-    CRM_ASSERT(rsc2->parent != NULL);
-
-    n = pe__copy_node(current_node1);
-    g_hash_table_insert(hash1, (gpointer) n->details->id, n);
-
-    n = pe__copy_node(current_node2);
-    g_hash_table_insert(hash2, (gpointer) n->details->id, n);
-
-    /* Apply rsc1's parental colocations */
-    for (GList *gIter = rsc1->parent->rsc_cons; gIter != NULL;
-         gIter = gIter->next) {
-
-        pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data;
-
-        crm_trace("Applying %s to %s", constraint->id, rsc1->id);
-
-        hash1 = pcmk__native_merge_weights(constraint->primary, rsc1->id, hash1,
-                                           constraint->node_attribute,
-                                           constraint->score / (float) INFINITY,
-                                           0);
-    }
-
-    for (GList *gIter = rsc1->parent->rsc_cons_lhs; gIter != NULL;
-         gIter = gIter->next) {
-
-        pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data;
-
-        if (!pcmk__colocation_has_influence(constraint, rsc1)) {
-            continue;
-        }
-        crm_trace("Applying %s to %s", constraint->id, rsc1->id);
-
-        hash1 = pcmk__native_merge_weights(constraint->dependent, rsc1->id,
-                                           hash1, constraint->node_attribute,
-                                           constraint->score / (float) INFINITY,
-                                           pe_weights_positive);
-    }
-
-    /* Apply rsc2's parental colocations */
-    for (GList *gIter = rsc2->parent->rsc_cons; gIter != NULL;
-         gIter = gIter->next) {
-
-        pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data;
-
-        crm_trace("Applying %s to %s", constraint->id, rsc2->id);
-
-        hash2 = pcmk__native_merge_weights(constraint->primary, rsc2->id, hash2,
-                                           constraint->node_attribute,
-                                           constraint->score / (float) INFINITY,
-                                           0);
-    }
-
-    for (GList *gIter = rsc2->parent->rsc_cons_lhs; gIter;
-         gIter = gIter->next) {
-
-        pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data;
-
-        if (!pcmk__colocation_has_influence(constraint, rsc2)) {
-            continue;
-        }
-        crm_trace("Applying %s to %s", constraint->id, rsc2->id);
-
-        hash2 = pcmk__native_merge_weights(constraint->dependent, rsc2->id,
-                                           hash2, constraint->node_attribute,
-                                           constraint->score / (float) INFINITY,
-                                           pe_weights_positive);
-    }
-
-    /* Current location score */
-    node1 = g_hash_table_lookup(hash1, current_node1->details->id);
-    node2 = g_hash_table_lookup(hash2, current_node2->details->id);
-
-    if (node1->weight < node2->weight) {
-        if (node1->weight < 0) {
-            crm_trace("%s > %s: current score: %d %d",
-                      rsc1->id, rsc2->id, node1->weight, node2->weight);
-            rc = -1;
-            goto out;
-
-        } else {
-            crm_trace("%s < %s: current score: %d %d",
-                      rsc1->id, rsc2->id, node1->weight, node2->weight);
-            rc = 1;
-            goto out;
-        }
-
-    } else if (node1->weight > node2->weight) {
-        crm_trace("%s > %s: current score: %d %d",
-                  rsc1->id, rsc2->id, node1->weight, node2->weight);
-        rc = -1;
-        goto out;
-    }
-
-    /* All location scores */
-    list1 = g_hash_table_get_values(hash1);
-    list2 = g_hash_table_get_values(hash2);
-
-    list1 = pcmk__sort_nodes(list1, current_node1, data_set);
-    list2 = pcmk__sort_nodes(list2, current_node2, data_set);
-
-    for (GList *gIter1 = list1, *gIter2 = list2;
-         (gIter1 != NULL) && (gIter2 != NULL);
-         gIter1 = gIter1->next, gIter2 = gIter2->next) {
-
-        node1 = (pe_node_t *) gIter1->data;
-        node2 = (pe_node_t *) gIter2->data;
-
-        if (node1 == NULL) {
-            crm_trace("%s < %s: colocated score NULL", rsc1->id, rsc2->id);
-            rc = 1;
-            break;
-
-        } else if (node2 == NULL) {
-            crm_trace("%s > %s: colocated score NULL", rsc1->id, rsc2->id);
-            rc = -1;
-            break;
-        }
-
-        if (node1->weight < node2->weight) {
-            crm_trace("%s < %s: colocated score", rsc1->id, rsc2->id);
-            rc = 1;
-            break;
-
-        } else if (node1->weight > node2->weight) {
-            crm_trace("%s > %s: colocated score", rsc1->id, rsc2->id);
-            rc = -1;
-            break;
-        }
-    }
-
-out:
-    g_hash_table_destroy(hash1);
-    g_hash_table_destroy(hash2);
-    g_list_free(list1);
-    g_list_free(list2);
-
-    return rc;
-}
-
-gint
-sort_clone_instance(gconstpointer a, gconstpointer b, gpointer data_set)
-{
-    int rc = 0;
-    pe_node_t *node1 = NULL;
-    pe_node_t *node2 = NULL;
-    pe_node_t *current_node1 = NULL;
-    pe_node_t *current_node2 = NULL;
-    unsigned int nnodes1 = 0;
-    unsigned int nnodes2 = 0;
-
-    gboolean can1 = TRUE;
-    gboolean can2 = TRUE;
-
-    const pe_resource_t *resource1 = (const pe_resource_t *)a;
-    const pe_resource_t *resource2 = (const pe_resource_t *)b;
-
-    CRM_ASSERT(resource1 != NULL);
-    CRM_ASSERT(resource2 != NULL);
-
-    /* allocation order:
-     *  - active instances
-     *  - instances running on nodes with the least copies
-     *  - active instances on nodes that can't support them or are to be fenced
-     *  - failed instances
-     *  - inactive instances
-     */
-
-    current_node1 = pe__find_active_on(resource1, &nnodes1, NULL);
-    current_node2 = pe__find_active_on(resource2, &nnodes2, NULL);
-
-    /* If both instances are running and at least one is multiply
-     * active, give precedence to the one that's running on fewer nodes.
-     */
-    if ((nnodes1 > 0) && (nnodes2 > 0)) {
-        if (nnodes1 < nnodes2) {
-            crm_trace("%s < %s: running_on", resource1->id, resource2->id);
-            return -1;
-
-        } else if (nnodes1 > nnodes2) {
-            crm_trace("%s > %s: running_on", resource1->id, resource2->id);
-            return 1;
-        }
-    }
-
-    /* Instance whose current location is available sorts first */
-    node1 = current_node1;
-    node2 = current_node2;
-    if (node1 != NULL) {
-        pe_node_t *match = pe_hash_table_lookup(resource1->allowed_nodes, node1->details->id);
-
-        if (match == NULL || match->weight < 0) {
-            crm_trace("%s: current location is unavailable", resource1->id);
-            node1 = NULL;
-            can1 = FALSE;
-        }
-    }
-
-    if (node2 != NULL) {
-        pe_node_t *match = pe_hash_table_lookup(resource2->allowed_nodes, node2->details->id);
-
-        if (match == NULL || match->weight < 0) {
-            crm_trace("%s: current location is unavailable", resource2->id);
-            node2 = NULL;
-            can2 = FALSE;
-        }
-    }
-
-    if (can1 && !can2) {
-        crm_trace("%s < %s: availability of current location", resource1->id,
-                  resource2->id);
-        return -1;
-
-    } else if (!can1 && can2) {
-        crm_trace("%s > %s: availability of current location", resource1->id,
-                  resource2->id);
-        return 1;
-    }
-
-    /* Higher-priority instance sorts first */
-    if (resource1->priority > resource2->priority) {
-        crm_trace("%s < %s: priority", resource1->id, resource2->id);
-        return -1;
-
-    } else if (resource1->priority < resource2->priority) {
-        crm_trace("%s > %s: priority", resource1->id, resource2->id);
-        return 1;
-    }
-
-    /* Active instance sorts first */
-    if (node1 == NULL && node2 == NULL) {
-        crm_trace("%s == %s: not active", resource1->id, resource2->id);
-        return 0;
-
-    } else if (node1 == NULL) {
-        crm_trace("%s > %s: active", resource1->id, resource2->id);
-        return 1;
-
-    } else if (node2 == NULL) {
-        crm_trace("%s < %s: active", resource1->id, resource2->id);
-        return -1;
-    }
-
-    /* Instance whose current node can run resources sorts first */
-    can1 = pcmk__node_available(node1);
-    can2 = pcmk__node_available(node2);
-    if (can1 && !can2) {
-        crm_trace("%s < %s: can", resource1->id, resource2->id);
-        return -1;
-
-    } else if (!can1 && can2) {
-        crm_trace("%s > %s: can", resource1->id, resource2->id);
-        return 1;
-    }
-
-    /* Is the parent allowed to run on the instance's current node?
-     * Instance with parent allowed sorts first.
-     */
-    node1 = parent_node_instance(resource1, node1);
-    node2 = parent_node_instance(resource2, node2);
-    if (node1 == NULL && node2 == NULL) {
-        crm_trace("%s == %s: not allowed", resource1->id, resource2->id);
-        return 0;
-
-    } else if (node1 == NULL) {
-        crm_trace("%s > %s: not allowed", resource1->id, resource2->id);
-        return 1;
-
-    } else if (node2 == NULL) {
-        crm_trace("%s < %s: not allowed", resource1->id, resource2->id);
-        return -1;
-    }
-
-    /* Does one node have more instances allocated?
-     * Instance whose current node has fewer instances sorts first.
-     */
-    if (node1->count < node2->count) {
-        crm_trace("%s < %s: count", resource1->id, resource2->id);
-        return -1;
-
-    } else if (node1->count > node2->count) {
-        crm_trace("%s > %s: count", resource1->id, resource2->id);
-        return 1;
-    }
-
-    /* Failed instance sorts first */
-    can1 = did_fail(resource1);
-    can2 = did_fail(resource2);
-    if (can1 && !can2) {
-        crm_trace("%s > %s: failed", resource1->id, resource2->id);
-        return 1;
-    } else if (!can1 && can2) {
-        crm_trace("%s < %s: failed", resource1->id, resource2->id);
-        return -1;
-    }
-
-    rc = order_instance_by_colocation(resource1, resource2, data_set);
-    if (rc != 0) {
-        return rc;
-    }
-
-    /* Default to lexicographic order by ID */
-    rc = strcmp(resource1->id, resource2->id);
-    crm_trace("%s %c %s: default", resource1->id, rc < 0 ? '<' : '>', resource2->id);
-    return rc;
-}
-
 static pe_node_t *
 can_run_instance(pe_resource_t * rsc, pe_node_t * node, int limit)
 {
     pe_node_t *local_node = NULL;
 
     if (node == NULL && rsc->allowed_nodes) {
         GHashTableIter iter;
         g_hash_table_iter_init(&iter, rsc->allowed_nodes);
         while (g_hash_table_iter_next(&iter, NULL, (void **)&local_node)) {
             can_run_instance(rsc, local_node, limit);
         }
         return NULL;
     }
 
     if (!node) {
         /* make clang analyzer happy */
         goto bail;
 
     } else if (!pcmk__node_available(node)) {
         goto bail;
 
     } else if (pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
         goto bail;
     }
 
-    local_node = parent_node_instance(rsc, node);
+    local_node = pcmk__top_allowed_node(rsc, node);
 
     if (local_node == NULL) {
         crm_warn("%s cannot run on %s: node not allowed", rsc->id, node->details->uname);
         goto bail;
 
     } else if (local_node->weight < 0) {
         common_update_score(rsc, node->details->id, local_node->weight);
         pe_rsc_trace(rsc, "%s cannot run on %s: Parent node weight doesn't allow it.",
                      rsc->id, node->details->uname);
 
     } else if (local_node->count < limit) {
         pe_rsc_trace(rsc, "%s can run on %s (already running %d)",
                      rsc->id, node->details->uname, local_node->count);
         return local_node;
 
     } else {
         pe_rsc_trace(rsc, "%s cannot run on %s: node full (%d >= %d)",
                      rsc->id, node->details->uname, local_node->count, limit);
     }
 
   bail:
     if (node) {
         common_update_score(rsc, node->details->id, -INFINITY);
     }
     return NULL;
 }
 
 static pe_node_t *
 allocate_instance(pe_resource_t *rsc, pe_node_t *prefer, gboolean all_coloc,
                   int limit, pe_working_set_t *data_set)
 {
     pe_node_t *chosen = NULL;
     GHashTable *backup = NULL;
 
     CRM_ASSERT(rsc);
     pe_rsc_trace(rsc, "Checking allocation of %s (preferring %s, using %s parent colocations)",
                  rsc->id, (prefer? prefer->details->uname: "none"),
                  (all_coloc? "all" : "some"));
 
     if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
         return rsc->fns->location(rsc, NULL, FALSE);
 
     } else if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) {
         pe_rsc_debug(rsc, "Dependency loop detected involving %s", rsc->id);
         return NULL;
     }
 
     /* Only include positive colocation preferences of dependent resources
      * if not every node will get a copy of the clone
      */
     append_parent_colocation(rsc->parent, rsc, all_coloc);
 
     if (prefer) {
         pe_node_t *local_prefer = g_hash_table_lookup(rsc->allowed_nodes, prefer->details->id);
 
         if (local_prefer == NULL || local_prefer->weight < 0) {
             pe_rsc_trace(rsc, "Not pre-allocating %s to %s - unavailable", rsc->id,
                          prefer->details->uname);
             return NULL;
         }
     }
 
     can_run_instance(rsc, NULL, limit);
 
     backup = pcmk__copy_node_table(rsc->allowed_nodes);
     pe_rsc_trace(rsc, "Allocating instance %s", rsc->id);
     chosen = rsc->cmds->allocate(rsc, prefer, data_set);
     if (chosen && prefer && (chosen->details != prefer->details)) {
         crm_info("Not pre-allocating %s to %s because %s is better",
                  rsc->id, prefer->details->uname, chosen->details->uname);
         g_hash_table_destroy(rsc->allowed_nodes);
         rsc->allowed_nodes = backup;
         pcmk__unassign_resource(rsc);
         chosen = NULL;
         backup = NULL;
     }
     if (chosen) {
-        pe_node_t *local_node = parent_node_instance(rsc, chosen);
+        pe_node_t *local_node = pcmk__top_allowed_node(rsc, chosen);
 
         if (local_node) {
             local_node->count++;
 
         } else if (pcmk_is_set(rsc->flags, pe_rsc_managed)) {
             /* what to do? we can't enforce per-node limits in this case */
             pcmk__config_err("%s not found in %s (list of %d)",
                              chosen->details->id, rsc->parent->id,
                              g_hash_table_size(rsc->parent->allowed_nodes));
         }
     }
 
     if(backup) {
         g_hash_table_destroy(backup);
     }
     return chosen;
 }
 
 static void
 append_parent_colocation(pe_resource_t * rsc, pe_resource_t * child, gboolean all)
 {
 
     GList *gIter = NULL;
 
     gIter = rsc->rsc_cons;
     for (; gIter != NULL; gIter = gIter->next) {
         pcmk__colocation_t *cons = (pcmk__colocation_t *) gIter->data;
 
         if (all || cons->score < 0 || cons->score == INFINITY) {
             child->rsc_cons = g_list_prepend(child->rsc_cons, cons);
         }
     }
 
     gIter = rsc->rsc_cons_lhs;
     for (; gIter != NULL; gIter = gIter->next) {
         pcmk__colocation_t *cons = (pcmk__colocation_t *) gIter->data;
 
         if (!pcmk__colocation_has_influence(cons, child)) {
            continue;
         }
         if (all || cons->score < 0) {
             child->rsc_cons_lhs = g_list_prepend(child->rsc_cons_lhs, cons);
         }
     }
 }
 
 
 void
 distribute_children(pe_resource_t *rsc, GList *children, GList *nodes,
                     int max, int per_host_max, pe_working_set_t * data_set);
 
 void
 distribute_children(pe_resource_t *rsc, GList *children, GList *nodes,
                     int max, int per_host_max, pe_working_set_t * data_set) 
 {
     int loop_max = 0;
     int allocated = 0;
     int available_nodes = 0;
     bool all_coloc = false;
 
     /* count now tracks the number of clones currently allocated */
     for(GList *nIter = nodes; nIter != NULL; nIter = nIter->next) {
         pe_node_t *node = nIter->data;
 
         node->count = 0;
         if (pcmk__node_available(node)) {
             available_nodes++;
         }
     }
 
     all_coloc = (max < available_nodes) ? true : false;
 
     if(available_nodes) {
         loop_max = max / available_nodes;
     }
     if (loop_max < 1) {
         loop_max = 1;
     }
 
     pe_rsc_debug(rsc, "Allocating up to %d %s instances to a possible %d nodes (at most %d per host, %d optimal)",
                  max, rsc->id, available_nodes, per_host_max, loop_max);
 
     /* Pre-allocate as many instances as we can to their current location */
     for (GList *gIter = children; gIter != NULL && allocated < max; gIter = gIter->next) {
         pe_resource_t *child = (pe_resource_t *) gIter->data;
         pe_node_t *child_node = NULL;
         pe_node_t *local_node = NULL;
 
         if ((child->running_on == NULL)
             || !pcmk_is_set(child->flags, pe_rsc_provisional)
             || pcmk_is_set(child->flags, pe_rsc_failed)) {
 
             continue;
         }
 
         child_node = pe__current_node(child);
-        local_node = parent_node_instance(child, child_node);
+        local_node = pcmk__top_allowed_node(child, child_node);
 
         pe_rsc_trace(rsc,
                      "Checking pre-allocation of %s to %s (%d remaining of %d)",
                      child->id, child_node->details->uname, max - allocated,
                      max);
 
         if (!pcmk__node_available(child_node) || (child_node->weight < 0)) {
             pe_rsc_trace(rsc, "Not pre-allocating because %s can not run %s",
                          child_node->details->uname, child->id);
             continue;
         }
 
         if ((local_node != NULL) && (local_node->count >= loop_max)) {
             pe_rsc_trace(rsc,
                          "Not pre-allocating because %s already allocated "
                          "optimal instances", child_node->details->uname);
             continue;
         }
 
         if (allocate_instance(child, child_node, all_coloc, per_host_max,
                               data_set)) {
             pe_rsc_trace(rsc, "Pre-allocated %s to %s", child->id,
                          child_node->details->uname);
             allocated++;
         }
     }
 
     pe_rsc_trace(rsc, "Done pre-allocating (%d of %d)", allocated, max);
 
     for (GList *gIter = children; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child = (pe_resource_t *) gIter->data;
 
         if (child->running_on != NULL) {
             pe_node_t *child_node = pe__current_node(child);
-            pe_node_t *local_node = parent_node_instance(child, child_node);
+            pe_node_t *local_node = pcmk__top_allowed_node(child, child_node);
 
             if (local_node == NULL) {
                 crm_err("%s is running on %s which isn't allowed",
                         child->id, child_node->details->uname);
             }
         }
 
         if (!pcmk_is_set(child->flags, pe_rsc_provisional)) {
         } else if (allocated >= max) {
             pe_rsc_debug(rsc, "Child %s not allocated - limit reached %d %d", child->id, allocated, max);
             resource_location(child, NULL, -INFINITY, "clone:limit_reached", data_set);
         } else {
             if (allocate_instance(child, NULL, all_coloc, per_host_max,
                                   data_set)) {
                 allocated++;
             }
         }
     }
 
     pe_rsc_debug(rsc, "Allocated %d %s instances of a possible %d",
                  allocated, rsc->id, max);
 }
 
 
 pe_node_t *
 pcmk__clone_allocate(pe_resource_t *rsc, pe_node_t *prefer,
                      pe_working_set_t *data_set)
 {
     GList *nodes = NULL;
     clone_variant_data_t *clone_data = NULL;
 
     get_clone_variant_data(clone_data, rsc);
 
     if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
         return NULL;
 
     } else if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) {
         pe_rsc_debug(rsc, "Dependency loop detected involving %s", rsc->id);
         return NULL;
     }
 
     if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
         pcmk__add_promotion_scores(rsc);
     }
 
     pe__set_resource_flags(rsc, pe_rsc_allocating);
 
-    /* this information is used by sort_clone_instance() when deciding in which 
-     * order to allocate clone instances
+    /* This information is used by pcmk__cmp_instance() when deciding the order
+     * in which to assign clone instances to nodes.
      */
     for (GList *gIter = rsc->rsc_cons; gIter != NULL; gIter = gIter->next) {
         pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data;
 
         pe_rsc_trace(rsc, "%s: Allocating %s first",
                      rsc->id, constraint->primary->id);
         constraint->primary->cmds->allocate(constraint->primary, prefer,
                                             data_set);
     }
 
     for (GList *gIter = rsc->rsc_cons_lhs; gIter != NULL; gIter = gIter->next) {
         pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data;
 
         if (!pcmk__colocation_has_influence(constraint, NULL)) {
             continue;
         }
         rsc->allowed_nodes = constraint->dependent->cmds->merge_weights(
             constraint->dependent, rsc->id, rsc->allowed_nodes,
             constraint->node_attribute, (float)constraint->score / INFINITY,
             (pe_weights_rollback | pe_weights_positive));
     }
 
     pe__show_node_weights(!pcmk_is_set(data_set->flags, pe_flag_show_scores),
                           rsc, __func__, rsc->allowed_nodes, data_set);
 
     nodes = g_hash_table_get_values(rsc->allowed_nodes);
     nodes = pcmk__sort_nodes(nodes, NULL, data_set);
-    rsc->children = g_list_sort_with_data(rsc->children, sort_clone_instance, data_set);
+    rsc->children = g_list_sort(rsc->children, pcmk__cmp_instance);
     distribute_children(rsc, rsc->children, nodes, clone_data->clone_max, clone_data->clone_node_max, data_set);
     g_list_free(nodes);
 
     if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
         pcmk__set_instance_roles(rsc, data_set);
     }
 
     pe__clear_resource_flags(rsc, pe_rsc_provisional|pe_rsc_allocating);
     pe_rsc_trace(rsc, "Done allocating %s", rsc->id);
     return NULL;
 }
 
 static void
 clone_update_pseudo_status(pe_resource_t * rsc, gboolean * stopping, gboolean * starting,
                            gboolean * active)
 {
     GList *gIter = NULL;
 
     if (rsc->children) {
 
         gIter = rsc->children;
         for (; gIter != NULL; gIter = gIter->next) {
             pe_resource_t *child = (pe_resource_t *) gIter->data;
 
             clone_update_pseudo_status(child, stopping, starting, active);
         }
 
         return;
     }
 
     CRM_ASSERT(active != NULL);
     CRM_ASSERT(starting != NULL);
     CRM_ASSERT(stopping != NULL);
 
     if (rsc->running_on) {
         *active = TRUE;
     }
 
     gIter = rsc->actions;
     for (; gIter != NULL; gIter = gIter->next) {
         pe_action_t *action = (pe_action_t *) gIter->data;
 
         if (*starting && *stopping) {
             return;
 
         } else if (pcmk_is_set(action->flags, pe_action_optional)) {
             pe_rsc_trace(rsc, "Skipping optional: %s", action->uuid);
             continue;
 
         } else if (!pcmk_any_flags_set(action->flags,
                                        pe_action_pseudo|pe_action_runnable)) {
             pe_rsc_trace(rsc, "Skipping unrunnable: %s", action->uuid);
             continue;
 
         } else if (pcmk__str_eq(RSC_STOP, action->task, pcmk__str_casei)) {
             pe_rsc_trace(rsc, "Stopping due to: %s", action->uuid);
             *stopping = TRUE;
 
         } else if (pcmk__str_eq(RSC_START, action->task, pcmk__str_casei)) {
             if (!pcmk_is_set(action->flags, pe_action_runnable)) {
                 pe_rsc_trace(rsc, "Skipping pseudo-op: %s run=%d, pseudo=%d",
                              action->uuid,
                              pcmk_is_set(action->flags, pe_action_runnable),
                              pcmk_is_set(action->flags, pe_action_pseudo));
             } else {
                 pe_rsc_trace(rsc, "Starting due to: %s", action->uuid);
                 pe_rsc_trace(rsc, "%s run=%d, pseudo=%d",
                              action->uuid,
                              pcmk_is_set(action->flags, pe_action_runnable),
                              pcmk_is_set(action->flags, pe_action_pseudo));
                 *starting = TRUE;
             }
         }
     }
 }
 
 static pe_action_t *
 find_rsc_action(pe_resource_t *rsc, const char *task)
 {
     pe_action_t *match = NULL;
     GList *actions = pe__resource_actions(rsc, NULL, task, FALSE);
 
     for (GList *item = actions; item != NULL; item = item->next) {
         pe_action_t *op = (pe_action_t *) item->data;
 
         if (!pcmk_is_set(op->flags, pe_action_optional)) {
             if (match != NULL) {
                 // More than one match, don't return any
                 match = NULL;
                 break;
             }
             match = op;
         }
     }
     g_list_free(actions);
     return match;
 }
 
 static void
 child_ordering_constraints(pe_resource_t * rsc, pe_working_set_t * data_set)
 {
     pe_action_t *stop = NULL;
     pe_action_t *start = NULL;
     pe_action_t *last_stop = NULL;
     pe_action_t *last_start = NULL;
     GList *gIter = NULL;
     clone_variant_data_t *clone_data = NULL;
 
     get_clone_variant_data(clone_data, rsc);
 
     if (clone_data->ordered == FALSE) {
         return;
     }
     /* we have to maintain a consistent sorted child list when building order constraints */
-    rsc->children = g_list_sort(rsc->children, sort_rsc_id);
+    rsc->children = g_list_sort(rsc->children, pcmk__cmp_instance_number);
 
     for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child = (pe_resource_t *) gIter->data;
 
         stop = find_rsc_action(child, RSC_STOP);
         if (stop) {
             if (last_stop) {
                 /* child/child relative stop */
                 order_actions(stop, last_stop, pe_order_optional);
             }
             last_stop = stop;
         }
 
         start = find_rsc_action(child, RSC_START);
         if (start) {
             if (last_start) {
                 /* child/child relative start */
                 order_actions(last_start, start, pe_order_optional);
             }
             last_start = start;
         }
     }
 }
 
 void
 clone_create_actions(pe_resource_t *rsc, pe_working_set_t *data_set)
 {
     clone_variant_data_t *clone_data = NULL;
 
     get_clone_variant_data(clone_data, rsc);
     clone_create_pseudo_actions(rsc, rsc->children, &clone_data->start_notify, &clone_data->stop_notify,data_set);
     child_ordering_constraints(rsc, data_set);
     if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
         create_promotable_actions(rsc, data_set);
     }
 }
 
 void
 clone_create_pseudo_actions(
     pe_resource_t * rsc, GList *children, notify_data_t **start_notify, notify_data_t **stop_notify,  pe_working_set_t * data_set)
 {
     gboolean child_active = FALSE;
     gboolean child_starting = FALSE;
     gboolean child_stopping = FALSE;
     gboolean allow_dependent_migrations = TRUE;
 
     pe_action_t *stop = NULL;
     pe_action_t *stopped = NULL;
 
     pe_action_t *start = NULL;
     pe_action_t *started = NULL;
 
     pe_rsc_trace(rsc, "Creating actions for %s", rsc->id);
 
     for (GList *gIter = children; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
         gboolean starting = FALSE;
         gboolean stopping = FALSE;
 
         child_rsc->cmds->create_actions(child_rsc, data_set);
         clone_update_pseudo_status(child_rsc, &stopping, &starting, &child_active);
         if (stopping && starting) {
             allow_dependent_migrations = FALSE;
         }
 
         child_stopping |= stopping;
         child_starting |= starting;
     }
 
     /* start */
     start = pcmk__new_rsc_pseudo_action(rsc, RSC_START, !child_starting, true);
     started = pcmk__new_rsc_pseudo_action(rsc, RSC_STARTED, !child_starting,
                                           false);
     started->priority = INFINITY;
 
     if (child_active || child_starting) {
         pe__set_action_flags(started, pe_action_runnable);
     }
 
     if (start_notify != NULL && *start_notify == NULL) {
         *start_notify = pcmk__clone_notif_pseudo_ops(rsc, RSC_START, start,
                                                      started);
     }
 
     /* stop */
     stop = pcmk__new_rsc_pseudo_action(rsc, RSC_STOP, !child_stopping, true);
     stopped = pcmk__new_rsc_pseudo_action(rsc, RSC_STOPPED, !child_stopping,
                                           true);
     stopped->priority = INFINITY;
     if (allow_dependent_migrations) {
         pe__set_action_flags(stop, pe_action_migrate_runnable);
     }
 
     if (stop_notify != NULL && *stop_notify == NULL) {
         *stop_notify = pcmk__clone_notif_pseudo_ops(rsc, RSC_STOP, stop,
                                                     stopped);
 
         if (start_notify && *start_notify && *stop_notify) {
             order_actions((*stop_notify)->post_done, (*start_notify)->pre, pe_order_optional);
         }
     }
 }
 
 void
 clone_internal_constraints(pe_resource_t *rsc, pe_working_set_t *data_set)
 {
     pe_resource_t *last_rsc = NULL;
     GList *gIter;
     clone_variant_data_t *clone_data = NULL;
 
     get_clone_variant_data(clone_data, rsc);
 
     pe_rsc_trace(rsc, "Internal constraints for %s", rsc->id);
     pcmk__order_resource_actions(rsc, RSC_STOPPED, rsc, RSC_START,
                                  pe_order_optional, data_set);
     pcmk__order_resource_actions(rsc, RSC_START, rsc, RSC_STARTED,
                                  pe_order_runnable_left, data_set);
     pcmk__order_resource_actions(rsc, RSC_STOP, rsc, RSC_STOPPED,
                                  pe_order_runnable_left, data_set);
 
     if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
         pcmk__order_resource_actions(rsc, RSC_DEMOTED, rsc, RSC_STOP,
                                      pe_order_optional, data_set);
         pcmk__order_resource_actions(rsc, RSC_STARTED, rsc, RSC_PROMOTE,
                                      pe_order_runnable_left, data_set);
     }
 
     if (clone_data->ordered) {
         /* we have to maintain a consistent sorted child list when building order constraints */
-        rsc->children = g_list_sort(rsc->children, sort_rsc_id);
+        rsc->children = g_list_sort(rsc->children, pcmk__cmp_instance_number);
     }
     for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         child_rsc->cmds->internal_constraints(child_rsc, data_set);
 
         pcmk__order_starts(rsc, child_rsc,
                            pe_order_runnable_left|pe_order_implies_first_printed,
                            data_set);
         pcmk__order_resource_actions(child_rsc, RSC_START, rsc, RSC_STARTED,
                                      pe_order_implies_then_printed, data_set);
         if (clone_data->ordered && last_rsc) {
             pcmk__order_starts(last_rsc, child_rsc, pe_order_optional,
                                data_set);
         }
 
         pcmk__order_stops(rsc, child_rsc, pe_order_implies_first_printed,
                           data_set);
         pcmk__order_resource_actions(child_rsc, RSC_STOP, rsc, RSC_STOPPED,
                                      pe_order_implies_then_printed, data_set);
         if (clone_data->ordered && last_rsc) {
             pcmk__order_stops(child_rsc, last_rsc, pe_order_optional, data_set);
         }
 
         last_rsc = child_rsc;
     }
     if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
         promotable_constraints(rsc, data_set);
     }
 }
 
 gboolean
 is_child_compatible(pe_resource_t *child_rsc, pe_node_t * local_node, enum rsc_role_e filter, gboolean current) 
 {
     pe_node_t *node = NULL;
     enum rsc_role_e next_role = child_rsc->fns->state(child_rsc, current);
 
     CRM_CHECK(child_rsc && local_node, return FALSE);
     if (is_set_recursive(child_rsc, pe_rsc_block, TRUE) == FALSE) {
         /* We only want instances that haven't failed */
         node = child_rsc->fns->location(child_rsc, NULL, current);
     }
 
     if (filter != RSC_ROLE_UNKNOWN && next_role != filter) {
         crm_trace("Filtered %s", child_rsc->id);
         return FALSE;
     }
 
     if (node && (node->details == local_node->details)) {
         return TRUE;
 
     } else if (node) {
         crm_trace("%s - %s vs %s", child_rsc->id, node->details->uname,
                   local_node->details->uname);
 
     } else {
         crm_trace("%s - not allocated %d", child_rsc->id, current);
     }
     return FALSE;
 }
 
 pe_resource_t *
 find_compatible_child(pe_resource_t *local_child, pe_resource_t *rsc,
                       enum rsc_role_e filter, gboolean current,
                       pe_working_set_t *data_set)
 {
     pe_resource_t *pair = NULL;
     GList *gIter = NULL;
     GList *scratch = NULL;
     pe_node_t *local_node = NULL;
 
     local_node = local_child->fns->location(local_child, NULL, current);
     if (local_node) {
         return find_compatible_child_by_node(local_child, local_node, rsc, filter, current);
     }
 
     scratch = g_hash_table_get_values(local_child->allowed_nodes);
     scratch = pcmk__sort_nodes(scratch, NULL, data_set);
 
     gIter = scratch;
     for (; gIter != NULL; gIter = gIter->next) {
         pe_node_t *node = (pe_node_t *) gIter->data;
 
         pair = find_compatible_child_by_node(local_child, node, rsc, filter, current);
         if (pair) {
             goto done;
         }
     }
 
     pe_rsc_debug(rsc, "Can't pair %s with %s", local_child->id, rsc->id);
   done:
     g_list_free(scratch);
     return pair;
 }
 
 void
 clone_rsc_colocation_lh(pe_resource_t *dependent, pe_resource_t *primary,
                         pcmk__colocation_t *constraint,
                         pe_working_set_t *data_set)
 {
     /* -- Never called --
      *
      * Instead we add the colocation constraints to the child and call from there
      */
     CRM_ASSERT(FALSE);
 }
 
 void
 clone_rsc_colocation_rh(pe_resource_t *dependent, pe_resource_t *primary,
                         pcmk__colocation_t *constraint,
                         pe_working_set_t *data_set)
 {
     GList *gIter = NULL;
     gboolean do_interleave = FALSE;
     const char *interleave_s = NULL;
 
     CRM_CHECK(constraint != NULL, return);
     CRM_CHECK(dependent != NULL,
               pe_err("dependent was NULL for %s", constraint->id); return);
     CRM_CHECK(primary != NULL,
               pe_err("primary was NULL for %s", constraint->id); return);
     CRM_CHECK(dependent->variant == pe_native, return);
 
     pe_rsc_trace(primary, "Processing constraint %s: %s -> %s %d",
                  constraint->id, dependent->id, primary->id, constraint->score);
 
     if (pcmk_is_set(primary->flags, pe_rsc_promotable)) {
         if (pcmk_is_set(primary->flags, pe_rsc_provisional)) {
             pe_rsc_trace(primary, "%s is still provisional", primary->id);
             return;
         } else if (constraint->primary_role == RSC_ROLE_UNKNOWN) {
             pe_rsc_trace(primary, "Handling %s as a clone colocation",
                          constraint->id);
         } else {
             promotable_colocation_rh(dependent, primary, constraint, data_set);
             return;
         }
     }
 
     /* only the LHS side needs to be labeled as interleave */
     interleave_s = g_hash_table_lookup(constraint->dependent->meta,
                                        XML_RSC_ATTR_INTERLEAVE);
     if (crm_is_true(interleave_s)
         && (constraint->dependent->variant > pe_group)) {
         /* @TODO Do we actually care about multiple primary copies sharing a
          * dependent copy anymore?
          */
         if (copies_per_node(constraint->dependent) != copies_per_node(constraint->primary)) {
             pcmk__config_err("Cannot interleave %s and %s because they do not "
                              "support the same number of instances per node",
                              constraint->dependent->id,
                              constraint->primary->id);
 
         } else {
             do_interleave = TRUE;
         }
     }
 
     if (pcmk_is_set(primary->flags, pe_rsc_provisional)) {
         pe_rsc_trace(primary, "%s is still provisional", primary->id);
         return;
 
     } else if (do_interleave) {
         pe_resource_t *primary_instance = NULL;
 
         primary_instance = find_compatible_child(dependent, primary,
                                                  RSC_ROLE_UNKNOWN, FALSE,
                                                  data_set);
         if (primary_instance != NULL) {
             pe_rsc_debug(primary, "Pairing %s with %s",
                          dependent->id, primary_instance->id);
             dependent->cmds->rsc_colocation_lh(dependent, primary_instance,
                                                constraint, data_set);
 
         } else if (constraint->score >= INFINITY) {
             crm_notice("Cannot pair %s with instance of %s",
                        dependent->id, primary->id);
             pcmk__assign_resource(dependent, NULL, true);
 
         } else {
             pe_rsc_debug(primary, "Cannot pair %s with instance of %s",
                          dependent->id, primary->id);
         }
 
         return;
 
     } else if (constraint->score >= INFINITY) {
         GList *affected_nodes = NULL;
 
         gIter = primary->children;
         for (; gIter != NULL; gIter = gIter->next) {
             pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
             pe_node_t *chosen = child_rsc->fns->location(child_rsc, NULL, FALSE);
 
             if (chosen != NULL && is_set_recursive(child_rsc, pe_rsc_block, TRUE) == FALSE) {
                 pe_rsc_trace(primary, "Allowing %s: %s %d",
                              constraint->id, chosen->details->uname,
                              chosen->weight);
                 affected_nodes = g_list_prepend(affected_nodes, chosen);
             }
         }
 
         node_list_exclude(dependent->allowed_nodes, affected_nodes, FALSE);
         g_list_free(affected_nodes);
         return;
     }
 
     gIter = primary->children;
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         child_rsc->cmds->rsc_colocation_rh(dependent, child_rsc, constraint,
                                            data_set);
     }
 }
 
 enum action_tasks
 clone_child_action(pe_action_t * action)
 {
     enum action_tasks result = no_action;
     pe_resource_t *child = (pe_resource_t *) action->rsc->children->data;
 
     if (pcmk__strcase_any_of(action->task, "notify", "notified", NULL)) {
 
         /* Find the action we're notifying about instead */
 
         int stop = 0;
         char *key = action->uuid;
         int lpc = strlen(key);
 
         for (; lpc > 0; lpc--) {
             if (key[lpc] == '_' && stop == 0) {
                 stop = lpc;
 
             } else if (key[lpc] == '_') {
                 char *task_mutable = NULL;
 
                 lpc++;
                 task_mutable = strdup(key + lpc);
                 task_mutable[stop - lpc] = 0;
 
                 crm_trace("Extracted action '%s' from '%s'", task_mutable, key);
                 result = get_complex_task(child, task_mutable, TRUE);
                 free(task_mutable);
                 break;
             }
         }
 
     } else {
         result = get_complex_task(child, action->task, TRUE);
     }
     return result;
 }
 
 #define pe__clear_action_summary_flags(flags, action, flag) do {        \
         flags = pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE,     \
                                      "Action summary", action->rsc->id, \
                                      flags, flag, #flag);               \
     } while (0)
 
 enum pe_action_flags
 summary_action_flags(pe_action_t * action, GList *children, pe_node_t * node)
 {
     GList *gIter = NULL;
     gboolean any_runnable = FALSE;
     gboolean check_runnable = TRUE;
     enum action_tasks task = clone_child_action(action);
     enum pe_action_flags flags = (pe_action_optional | pe_action_runnable | pe_action_pseudo);
     const char *task_s = task2text(task);
 
     for (gIter = children; gIter != NULL; gIter = gIter->next) {
         pe_action_t *child_action = NULL;
         pe_resource_t *child = (pe_resource_t *) gIter->data;
 
         child_action = find_first_action(child->actions, NULL, task_s, child->children ? NULL : node);
         pe_rsc_trace(action->rsc, "Checking for %s in %s on %s (%s)", task_s, child->id,
                      node ? node->details->uname : "none", child_action?child_action->uuid:"NA");
         if (child_action) {
             enum pe_action_flags child_flags = child->cmds->action_flags(child_action, node);
 
             if (pcmk_is_set(flags, pe_action_optional)
                 && !pcmk_is_set(child_flags, pe_action_optional)) {
                 pe_rsc_trace(child, "%s is mandatory because of %s", action->uuid,
                              child_action->uuid);
                 pe__clear_action_summary_flags(flags, action, pe_action_optional);
                 pe__clear_action_flags(action, pe_action_optional);
             }
             if (pcmk_is_set(child_flags, pe_action_runnable)) {
                 any_runnable = TRUE;
             }
         }
     }
 
     if (check_runnable && any_runnable == FALSE) {
         pe_rsc_trace(action->rsc, "%s is not runnable because no children are", action->uuid);
         pe__clear_action_summary_flags(flags, action, pe_action_runnable);
         if (node == NULL) {
             pe__clear_action_flags(action, pe_action_runnable);
         }
     }
 
     return flags;
 }
 
 enum pe_action_flags
 clone_action_flags(pe_action_t * action, pe_node_t * node)
 {
     return summary_action_flags(action, action->rsc->children, node);
 }
 
 void
 clone_rsc_location(pe_resource_t *rsc, pe__location_t *constraint)
 {
     GList *gIter = rsc->children;
 
     pe_rsc_trace(rsc, "Processing location constraint %s for %s", constraint->id, rsc->id);
 
     pcmk__apply_location(constraint, rsc);
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         child_rsc->cmds->rsc_location(child_rsc, constraint);
     }
 }
 
 void
 clone_expand(pe_resource_t * rsc, pe_working_set_t * data_set)
 {
     GList *gIter = NULL;
     clone_variant_data_t *clone_data = NULL;
 
     get_clone_variant_data(clone_data, rsc);
 
     g_list_foreach(rsc->actions, (GFunc) rsc->cmds->action_flags, NULL);
 
     pcmk__create_notifications(rsc, clone_data->start_notify);
     pcmk__create_notifications(rsc, clone_data->stop_notify);
     pcmk__create_notifications(rsc, clone_data->promote_notify);
     pcmk__create_notifications(rsc, clone_data->demote_notify);
 
     /* Now that the notifcations have been created we can expand the children */
 
     gIter = rsc->children;
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         child_rsc->cmds->expand(child_rsc, data_set);
     }
 
     native_expand(rsc, data_set);
 
     /* The notifications are in the graph now, we can destroy the notify_data */
     pcmk__free_notification_data(clone_data->demote_notify);
     clone_data->demote_notify = NULL;
     pcmk__free_notification_data(clone_data->stop_notify);
     clone_data->stop_notify = NULL;
     pcmk__free_notification_data(clone_data->start_notify);
     clone_data->start_notify = NULL;
     pcmk__free_notification_data(clone_data->promote_notify);
     clone_data->promote_notify = NULL;
 }
 
 // Check whether a resource or any of its children is known on node
 static bool
 rsc_known_on(const pe_resource_t *rsc, const pe_node_t *node)
 {
     if (rsc->children) {
         for (GList *child_iter = rsc->children; child_iter != NULL;
              child_iter = child_iter->next) {
 
             pe_resource_t *child = (pe_resource_t *) child_iter->data;
 
             if (rsc_known_on(child, node)) {
                 return TRUE;
             }
         }
 
     } else if (rsc->known_on) {
         GHashTableIter iter;
         pe_node_t *known_node = NULL;
 
         g_hash_table_iter_init(&iter, rsc->known_on);
         while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &known_node)) {
             if (node->details == known_node->details) {
                 return TRUE;
             }
         }
     }
     return FALSE;
 }
 
 // Look for an instance of clone that is known on node
 static pe_resource_t *
 find_instance_on(const pe_resource_t *clone, const pe_node_t *node)
 {
     for (GList *gIter = clone->children; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child = (pe_resource_t *) gIter->data;
 
         if (rsc_known_on(child, node)) {
             return child;
         }
     }
     return NULL;
 }
 
 // For unique clones, probe each instance separately
 static gboolean
 probe_unique_clone(pe_resource_t *rsc, pe_node_t *node, pe_action_t *complete,
                    gboolean force, pe_working_set_t *data_set)
 {
     gboolean any_created = FALSE;
 
     for (GList *child_iter = rsc->children; child_iter != NULL;
          child_iter = child_iter->next) {
 
         pe_resource_t *child = (pe_resource_t *) child_iter->data;
 
         any_created |= child->cmds->create_probe(child, node, complete, force,
                                                  data_set);
     }
     return any_created;
 }
 
 // For anonymous clones, only a single instance needs to be probed
 static gboolean
 probe_anonymous_clone(pe_resource_t *rsc, pe_node_t *node,
                       pe_action_t *complete, gboolean force,
                       pe_working_set_t *data_set)
 {
     // First, check if we probed an instance on this node last time
     pe_resource_t *child = find_instance_on(rsc, node);
 
     // Otherwise, check if we plan to start an instance on this node
     if (child == NULL) {
         for (GList *child_iter = rsc->children; child_iter && !child;
              child_iter = child_iter->next) {
 
             pe_node_t *local_node = NULL;
             pe_resource_t *child_rsc = (pe_resource_t *) child_iter->data;
 
             if (child_rsc) { /* make clang analyzer happy */
                 local_node = child_rsc->fns->location(child_rsc, NULL, FALSE);
                 if (local_node && (local_node->details == node->details)) {
                     child = child_rsc;
                 }
             }
         }
     }
 
     // Otherwise, use the first clone instance
     if (child == NULL) {
         child = rsc->children->data;
     }
     CRM_ASSERT(child);
     return child->cmds->create_probe(child, node, complete, force, data_set);
 }
 
 gboolean
 clone_create_probe(pe_resource_t * rsc, pe_node_t * node, pe_action_t * complete,
                    gboolean force, pe_working_set_t * data_set)
 {
     gboolean any_created = FALSE;
 
     CRM_ASSERT(rsc);
 
-    rsc->children = g_list_sort(rsc->children, sort_rsc_id);
+    rsc->children = g_list_sort(rsc->children, pcmk__cmp_instance_number);
     if (rsc->children == NULL) {
         pe_warn("Clone %s has no children", rsc->id);
         return FALSE;
     }
 
     if (rsc->exclusive_discover) {
         pe_node_t *allowed = g_hash_table_lookup(rsc->allowed_nodes, node->details->id);
         if (allowed && allowed->rsc_discover_mode != pe_discover_exclusive) {
             /* exclusive discover is enabled and this node is not marked
              * as a node this resource should be discovered on
              *
              * remove the node from allowed_nodes so that the
              * notification contains only nodes that we might ever run
              * on
              */
             g_hash_table_remove(rsc->allowed_nodes, node->details->id);
 
             /* Bit of a shortcut - might as well take it */
             return FALSE;
         }
     }
 
     if (pcmk_is_set(rsc->flags, pe_rsc_unique)) {
         any_created = probe_unique_clone(rsc, node, complete, force, data_set);
     } else {
         any_created = probe_anonymous_clone(rsc, node, complete, force,
                                             data_set);
     }
     return any_created;
 }
 
 void
 clone_append_meta(pe_resource_t * rsc, xmlNode * xml)
 {
     char *name = NULL;
     clone_variant_data_t *clone_data = NULL;
 
     get_clone_variant_data(clone_data, rsc);
 
     name = crm_meta_name(XML_RSC_ATTR_UNIQUE);
     crm_xml_add(xml, name, pe__rsc_bool_str(rsc, pe_rsc_unique));
     free(name);
 
     name = crm_meta_name(XML_RSC_ATTR_NOTIFY);
     crm_xml_add(xml, name, pe__rsc_bool_str(rsc, pe_rsc_notify));
     free(name);
 
     name = crm_meta_name(XML_RSC_ATTR_INCARNATION_MAX);
     crm_xml_add_int(xml, name, clone_data->clone_max);
     free(name);
 
     name = crm_meta_name(XML_RSC_ATTR_INCARNATION_NODEMAX);
     crm_xml_add_int(xml, name, clone_data->clone_node_max);
     free(name);
 
     if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
         name = crm_meta_name(XML_RSC_ATTR_PROMOTED_MAX);
         crm_xml_add_int(xml, name, clone_data->promoted_max);
         free(name);
 
         name = crm_meta_name(XML_RSC_ATTR_PROMOTED_NODEMAX);
         crm_xml_add_int(xml, name, clone_data->promoted_node_max);
         free(name);
 
         /* @COMPAT Maintain backward compatibility with resource agents that
          * expect the old names (deprecated since 2.0.0).
          */
         name = crm_meta_name(PCMK_XE_PROMOTED_MAX_LEGACY);
         crm_xml_add_int(xml, name, clone_data->promoted_max);
         free(name);
 
         name = crm_meta_name(PCMK_XE_PROMOTED_NODE_MAX_LEGACY);
         crm_xml_add_int(xml, name, clone_data->promoted_node_max);
         free(name);
     }
 }
 
 // Clone implementation of resource_alloc_functions_t:add_utilization()
 void
 pcmk__clone_add_utilization(pe_resource_t *rsc, pe_resource_t *orig_rsc,
                             GList *all_rscs, GHashTable *utilization)
 {
     bool existing = false;
     pe_resource_t *child = NULL;
 
     if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
         return;
     }
 
     // Look for any child already existing in the list
     for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
         child = (pe_resource_t *) iter->data;
         if (g_list_find(all_rscs, child)) {
             existing = true; // Keep checking remaining children
         } else {
             // If this is a clone of a group, look for group's members
             for (GList *member_iter = child->children; member_iter != NULL;
                  member_iter = member_iter->next) {
 
                 pe_resource_t *member = (pe_resource_t *) member_iter->data;
 
                 if (g_list_find(all_rscs, member) != NULL) {
                     // Add *child's* utilization, not group member's
                     child->cmds->add_utilization(child, orig_rsc, all_rscs,
                                                  utilization);
                     existing = true;
                     break;
                 }
             }
         }
     }
 
     if (!existing && (rsc->children != NULL)) {
         // If nothing was found, still add first child's utilization
         child = (pe_resource_t *) rsc->children->data;
 
         child->cmds->add_utilization(child, orig_rsc, all_rscs, utilization);
     }
 }
 
 // Clone implementation of resource_alloc_functions_t:shutdown_lock()
 void
 pcmk__clone_shutdown_lock(pe_resource_t *rsc)
 {
     return; // Clones currently don't support shutdown locks
 }
diff --git a/lib/pacemaker/pcmk_sched_fencing.c b/lib/pacemaker/pcmk_sched_fencing.c
index 78e5440fc3..19cba1bc7e 100644
--- a/lib/pacemaker/pcmk_sched_fencing.c
+++ b/lib/pacemaker/pcmk_sched_fencing.c
@@ -1,467 +1,453 @@
 /*
  * Copyright 2004-2022 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <glib.h>
 
 #include <crm/crm.h>
 #include <crm/pengine/status.h>
 #include <pacemaker-internal.h>
 #include "libpacemaker_private.h"
 
 /*!
  * \internal
  * \brief Check whether a resource is known on a particular node
  *
  * \param[in] rsc   Resource to check
  * \param[in] node  Node to check
  *
  * \return TRUE if resource (or parent if an anonymous clone) is known
  */
 static bool
 rsc_is_known_on(pe_resource_t *rsc, const pe_node_t *node)
 {
    if (pe_hash_table_lookup(rsc->known_on, node->details->id)) {
        return TRUE;
 
    } else if ((rsc->variant == pe_native)
               && pe_rsc_is_anon_clone(rsc->parent)
               && pe_hash_table_lookup(rsc->parent->known_on, node->details->id)) {
        /* We check only the parent, not the uber-parent, because we cannot
         * assume that the resource is known if it is in an anonymously cloned
         * group (which may be only partially known).
         */
        return TRUE;
    }
    return FALSE;
 }
 
 /*!
  * \internal
  * \brief Order a resource's start and promote actions relative to fencing
  *
  * \param[in] rsc         Resource to be ordered
  * \param[in] stonith_op  Fence action
  * \param[in] data_set    Cluster working set
  */
 static void
 order_start_vs_fencing(pe_resource_t *rsc, pe_action_t *stonith_op,
                        pe_working_set_t *data_set)
 {
     pe_node_t *target;
     GList *gIter = NULL;
 
     CRM_CHECK(stonith_op && stonith_op->node, return);
     target = stonith_op->node;
 
     for (gIter = rsc->actions; gIter != NULL; gIter = gIter->next) {
         pe_action_t *action = (pe_action_t *) gIter->data;
 
         switch (action->needs) {
             case rsc_req_nothing:
                 // Anything other than start or promote requires nothing
                 break;
 
             case rsc_req_stonith:
                 order_actions(stonith_op, action, pe_order_optional);
                 break;
 
             case rsc_req_quorum:
                 if (pcmk__str_eq(action->task, RSC_START, pcmk__str_casei)
                     && pe_hash_table_lookup(rsc->allowed_nodes, target->details->id)
                     && !rsc_is_known_on(rsc, target)) {
 
                     /* If we don't know the status of the resource on the node
                      * we're about to shoot, we have to assume it may be active
                      * there. Order the resource start after the fencing. This
                      * is analogous to waiting for all the probes for a resource
                      * to complete before starting it.
                      *
                      * The most likely explanation is that the DC died and took
                      * its status with it.
                      */
                     pe_rsc_debug(rsc, "Ordering %s after %s recovery", action->uuid,
                                  target->details->uname);
                     order_actions(stonith_op, action,
                                   pe_order_optional | pe_order_runnable_left);
                 }
                 break;
         }
     }
 }
 
 /*!
  * \internal
  * \brief Order a resource's stop and demote actions relative to fencing
  *
  * \param[in] rsc         Resource to be ordered
  * \param[in] stonith_op  Fence action
  * \param[in] data_set    Cluster working set
  */
 static void
 order_stop_vs_fencing(pe_resource_t *rsc, pe_action_t *stonith_op,
                       pe_working_set_t *data_set)
 {
     GList *gIter = NULL;
     GList *action_list = NULL;
     bool order_implicit = false;
 
     pe_resource_t *top = uber_parent(rsc);
     pe_action_t *parent_stop = NULL;
     pe_node_t *target;
 
     CRM_CHECK(stonith_op && stonith_op->node, return);
     target = stonith_op->node;
 
     /* Get a list of stop actions potentially implied by the fencing */
     action_list = pe__resource_actions(rsc, target, RSC_STOP, FALSE);
 
     /* If resource requires fencing, implicit actions must occur after fencing.
      *
      * Implied stops and demotes of resources running on guest nodes are always
      * ordered after fencing, even if the resource does not require fencing,
      * because guest node "fencing" is actually just a resource stop.
      */
     if (pcmk_is_set(rsc->flags, pe_rsc_needs_fencing)
         || pe__is_guest_node(target)) {
 
         order_implicit = true;
     }
 
     if (action_list && order_implicit) {
         parent_stop = find_first_action(top->actions, NULL, RSC_STOP, NULL);
     }
 
     for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
         pe_action_t *action = (pe_action_t *) gIter->data;
 
         // The stop would never complete, so convert it into a pseudo-action.
         pe__set_action_flags(action, pe_action_pseudo|pe_action_runnable);
 
         if (order_implicit) {
             pe__set_action_flags(action, pe_action_implied_by_stonith);
 
             /* Order the stonith before the parent stop (if any).
              *
              * Also order the stonith before the resource stop, unless the
              * resource is inside a bundle -- that would cause a graph loop.
              * We can rely on the parent stop's ordering instead.
              *
              * User constraints must not order a resource in a guest node
              * relative to the guest node container resource. The
              * pe_order_preserve flag marks constraints as generated by the
              * cluster and thus immune to that check (and is irrelevant if
              * target is not a guest).
              */
             if (!pe_rsc_is_bundled(rsc)) {
                 order_actions(stonith_op, action, pe_order_preserve);
             }
             order_actions(stonith_op, parent_stop, pe_order_preserve);
         }
 
         if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
             crm_notice("Stop of failed resource %s is implicit %s %s is fenced",
                        rsc->id, (order_implicit? "after" : "because"),
                        target->details->uname);
         } else {
             crm_info("%s is implicit %s %s is fenced",
                      action->uuid, (order_implicit? "after" : "because"),
                      target->details->uname);
         }
 
         if (pcmk_is_set(rsc->flags, pe_rsc_notify)) {
             pcmk__order_notifs_after_fencing(action, rsc, stonith_op);
         }
 
 #if 0
         /* It might be a good idea to stop healthy resources on a node about to
          * be fenced, when possible.
          *
          * However, fencing must be done before a failed resource's
          * (pseudo-)stop action, so that could create a loop. For example, given
          * a group of A and B running on node N with a failed stop of B:
          *
          *    fence N -> stop B (pseudo-op) -> stop A -> fence N
          *
          * The block below creates the stop A -> fence N ordering and therefore
          * must (at least for now) be disabled. Instead, run the block above and
          * treat all resources on N as B would be (i.e., as a pseudo-op after
          * the fencing).
          *
          * @TODO Maybe break the "A requires B" dependency in
          * pcmk__update_action_for_orderings() and use this block for healthy
          * resources instead of the above.
          */
          crm_info("Moving healthy resource %s off %s before fencing",
                   rsc->id, node->details->uname);
          pcmk__new_ordering(rsc, stop_key(rsc), NULL, NULL,
                             strdup(CRM_OP_FENCE), stonith_op,
                             pe_order_optional, data_set);
 #endif
     }
 
     g_list_free(action_list);
 
     /* Get a list of demote actions potentially implied by the fencing */
     action_list = pe__resource_actions(rsc, target, RSC_DEMOTE, FALSE);
 
     for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
         pe_action_t *action = (pe_action_t *) gIter->data;
 
         if (!(action->node->details->online) || action->node->details->unclean
             || pcmk_is_set(rsc->flags, pe_rsc_failed)) {
 
             if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
                 pe_rsc_info(rsc,
                             "Demote of failed resource %s is implicit after %s is fenced",
                             rsc->id, target->details->uname);
             } else {
                 pe_rsc_info(rsc, "%s is implicit after %s is fenced",
                             action->uuid, target->details->uname);
             }
 
             /* The demote would never complete and is now implied by the
              * fencing, so convert it into a pseudo-action.
              */
             pe__set_action_flags(action, pe_action_pseudo|pe_action_runnable);
 
             if (pe_rsc_is_bundled(rsc)) {
                 // Do nothing, let recovery be ordered after parent's implied stop
 
             } else if (order_implicit) {
                 order_actions(stonith_op, action, pe_order_preserve|pe_order_optional);
             }
         }
     }
 
     g_list_free(action_list);
 }
 
 /*!
  * \internal
  * \brief Order resource actions properly relative to fencing
  *
  * \param[in] rsc         Resource whose actions should be ordered
  * \param[in] stonith_op  Fencing operation to be ordered against
  * \param[in] data_set    Cluster working set
  */
 static void
 rsc_stonith_ordering(pe_resource_t *rsc, pe_action_t *stonith_op,
                      pe_working_set_t *data_set)
 {
     if (rsc->children) {
         GList *gIter = NULL;
 
         for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
             pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
             rsc_stonith_ordering(child_rsc, stonith_op, data_set);
         }
 
     } else if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
         pe_rsc_trace(rsc,
                      "Skipping fencing constraints for unmanaged resource: %s",
                      rsc->id);
 
     } else {
         order_start_vs_fencing(rsc, stonith_op, data_set);
         order_stop_vs_fencing(rsc, stonith_op, data_set);
     }
 }
 
 /*!
  * \internal
  * \brief Order all actions appropriately relative to a fencing operation
  *
  * Ensure start operations of affected resources are ordered after fencing,
  * imply stop and demote operations of affected resources by marking them as
  * pseudo-actions, etc.
  *
  * \param[in]     stonith_op  Fencing operation
  * \param[in,out] data_set    Working set of cluster
  */
 void
 pcmk__order_vs_fence(pe_action_t *stonith_op, pe_working_set_t *data_set)
 {
     CRM_CHECK(stonith_op && data_set, return);
     for (GList *r = data_set->resources; r != NULL; r = r->next) {
         rsc_stonith_ordering((pe_resource_t *) r->data, stonith_op, data_set);
     }
 }
 
 /*!
  * \internal
  * \brief Order an action after unfencing
  *
  * \param[in] rsc       Resource that action is for
  * \param[in] node      Node that action is on
  * \param[in] action    Action to be ordered after unfencing
  * \param[in] order     Ordering flags
  * \param[in] data_set  Cluster working set
  */
 void
 pcmk__order_vs_unfence(pe_resource_t *rsc, pe_node_t *node, pe_action_t *action,
                        enum pe_ordering order, pe_working_set_t *data_set)
 {
     /* When unfencing is in use, we order unfence actions before any probe or
      * start of resources that require unfencing, and also of fence devices.
      *
      * This might seem to violate the principle that fence devices require
      * only quorum. However, fence agents that unfence often don't have enough
      * information to even probe or start unless the node is first unfenced.
      */
-    if (pcmk__is_unfence_device(rsc, data_set)
+    if ((pcmk_is_set(rsc->flags, pe_rsc_fence_device)
+         && pcmk_is_set(data_set->flags, pe_flag_enable_unfencing))
         || pcmk_is_set(rsc->flags, pe_rsc_needs_unfencing)) {
 
         /* Start with an optional ordering. Requiring unfencing would result in
          * the node being unfenced, and all its resources being stopped,
          * whenever a new resource is added -- which would be highly suboptimal.
          */
         pe_action_t *unfence = pe_fence_op(node, "on", TRUE, NULL, FALSE, data_set);
 
         order_actions(unfence, action, order);
 
         if (!pcmk__node_unfenced(node)) {
             // But unfencing is required if it has never been done
             char *reason = crm_strdup_printf("required by %s %s",
                                              rsc->id, action->task);
 
             trigger_unfencing(NULL, node, reason, NULL, data_set);
             free(reason);
         }
     }
 }
 
 /*!
  * \internal
  * \brief Create pseudo-op for guest node fence, and order relative to it
  *
  * \param[in] node      Guest node to fence
- * \param[in] data_set  Working set of CIB state
  */
 void
-pcmk__fence_guest(pe_node_t *node, pe_working_set_t *data_set)
+pcmk__fence_guest(pe_node_t *node)
 {
-    pe_resource_t *container = node->details->remote_rsc->container;
+    pe_resource_t *container = NULL;
     pe_action_t *stop = NULL;
     pe_action_t *stonith_op = NULL;
 
     /* The fence action is just a label; we don't do anything differently for
      * off vs. reboot. We specify it explicitly, rather than let it default to
      * cluster's default action, because we are not _initiating_ fencing -- we
      * are creating a pseudo-event to describe fencing that is already occurring
      * by other means (container recovery).
      */
     const char *fence_action = "off";
 
+    CRM_ASSERT(node != NULL);
+
     /* Check whether guest's container resource has any explicit stop or
      * start (the stop may be implied by fencing of the guest's host).
      */
+    container = node->details->remote_rsc->container;
     if (container) {
         stop = find_first_action(container->actions, NULL, CRMD_ACTION_STOP,
                                  NULL);
 
         if (find_first_action(container->actions, NULL, CRMD_ACTION_START,
                               NULL)) {
             fence_action = "reboot";
         }
     }
 
     /* Create a fence pseudo-event, so we have an event to order actions
      * against, and the controller can always detect it.
      */
     stonith_op = pe_fence_op(node, fence_action, FALSE, "guest is unclean",
-                             FALSE, data_set);
+                             FALSE, node->details->data_set);
     pe__set_action_flags(stonith_op, pe_action_pseudo|pe_action_runnable);
 
     /* We want to imply stops/demotes after the guest is stopped, not wait until
      * it is restarted, so we always order pseudo-fencing after stop, not start
      * (even though start might be closer to what is done for a real reboot).
      */
     if ((stop != NULL) && pcmk_is_set(stop->flags, pe_action_pseudo)) {
         pe_action_t *parent_stonith_op = pe_fence_op(stop->node, NULL, FALSE,
-                                                     NULL, FALSE, data_set);
+                                                     NULL, FALSE,
+                                                     node->details->data_set);
 
         crm_info("Implying guest node %s is down (action %d) after %s fencing",
                  node->details->uname, stonith_op->id,
                  stop->node->details->uname);
         order_actions(parent_stonith_op, stonith_op,
                       pe_order_runnable_left|pe_order_implies_then);
 
     } else if (stop) {
         order_actions(stop, stonith_op,
                       pe_order_runnable_left|pe_order_implies_then);
         crm_info("Implying guest node %s is down (action %d) "
                  "after container %s is stopped (action %d)",
                  node->details->uname, stonith_op->id,
                  container->id, stop->id);
     } else {
         /* If we're fencing the guest node but there's no stop for the guest
          * resource, we must think the guest is already stopped. However, we may
          * think so because its resource history was just cleaned. To avoid
          * unnecessarily considering the guest node down if it's really up,
          * order the pseudo-fencing after any stop of the connection resource,
          * which will be ordered after any container (re-)probe.
          */
         stop = find_first_action(node->details->remote_rsc->actions, NULL,
                                  RSC_STOP, NULL);
 
         if (stop) {
             order_actions(stop, stonith_op, pe_order_optional);
             crm_info("Implying guest node %s is down (action %d) "
                      "after connection is stopped (action %d)",
                      node->details->uname, stonith_op->id, stop->id);
         } else {
             /* Not sure why we're fencing, but everything must already be
              * cleanly stopped.
              */
             crm_info("Implying guest node %s is down (action %d) ",
                      node->details->uname, stonith_op->id);
         }
     }
 
     // Order/imply other actions relative to pseudo-fence as with real fence
-    pcmk__order_vs_fence(stonith_op, data_set);
+    pcmk__order_vs_fence(stonith_op, node->details->data_set);
 }
 
 /*!
  * \internal
  * \brief Check whether node has already been unfenced
  *
  * \param[in] node  Node to check
  *
  * \return true if node has a nonzero #node-unfenced attribute (or none),
  *         otherwise false
  */
 bool
 pcmk__node_unfenced(pe_node_t *node)
 {
     const char *unfenced = pe_node_attribute_raw(node, CRM_ATTR_UNFENCED);
 
     return !pcmk__str_eq(unfenced, "0", pcmk__str_null_matches);
 }
-
-/*!
- * \internal
- * \brief Check whether a resource is a fencing device that supports unfencing
- *
- * \param[in] rsc       Resource to check
- * \param[in] data_set  Cluster working set
- *
- * \return true if \p rsc is a fencing device that supports unfencing,
- *         otherwise false
- */
-bool
-pcmk__is_unfence_device(const pe_resource_t *rsc,
-                        const pe_working_set_t *data_set)
-{
-    return pcmk_is_set(rsc->flags, pe_rsc_fence_device)
-           && pcmk_is_set(data_set->flags, pe_flag_enable_unfencing);
-}
diff --git a/lib/pacemaker/pcmk_sched_native.c b/lib/pacemaker/pcmk_sched_native.c
index 4f17186988..3e7cf160fc 100644
--- a/lib/pacemaker/pcmk_sched_native.c
+++ b/lib/pacemaker/pcmk_sched_native.c
@@ -1,2638 +1,2640 @@
 /*
  * Copyright 2004-2022 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <stdbool.h>
 
 #include <crm/pengine/rules.h>
 #include <crm/msg_xml.h>
 #include <crm/common/xml_internal.h>
 #include <pacemaker-internal.h>
 #include <crm/services.h>
 
 #include "libpacemaker_private.h"
 
 // The controller removes the resource from the CIB, making this redundant
 // #define DELETE_THEN_REFRESH 1
 
 #define INFINITY_HACK   (INFINITY * -100)
 
 #define VARIANT_NATIVE 1
 #include <lib/pengine/variant.h>
 
 extern bool pcmk__is_daemon;
 
 static void Recurring(pe_resource_t *rsc, pe_action_t *start, pe_node_t *node,
                       pe_working_set_t *data_set);
 static void RecurringOp(pe_resource_t *rsc, pe_action_t *start, pe_node_t *node,
                         xmlNode *operation, pe_working_set_t *data_set);
 static void Recurring_Stopped(pe_resource_t *rsc, pe_action_t *start, pe_node_t *node,
                               pe_working_set_t *data_set);
 static void RecurringOp_Stopped(pe_resource_t *rsc, pe_action_t *start, pe_node_t *node,
                                 xmlNode *operation, pe_working_set_t *data_set);
 
 gboolean DeleteRsc(pe_resource_t * rsc, pe_node_t * node, gboolean optional, pe_working_set_t * data_set);
 gboolean StopRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set);
 gboolean StartRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set);
 gboolean DemoteRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set);
 gboolean PromoteRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional,
                     pe_working_set_t * data_set);
 gboolean RoleError(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set);
 gboolean NullOp(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set);
 
 /* This array says what the *next* role should be when transitioning from one
  * role to another. For example going from Stopped to Promoted, the next role is
  * RSC_ROLE_UNPROMOTED, because the resource must be started before being promoted.
  * The current state then becomes Started, which is fed into this array again,
  * giving a next role of RSC_ROLE_PROMOTED.
  */
 static enum rsc_role_e rsc_state_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = {
 /* Current state  Next state*/
 /*                 Unknown           Stopped           Started           Unpromoted           Promoted */
 /* Unknown */    { RSC_ROLE_UNKNOWN, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED,    RSC_ROLE_STOPPED },
 /* Stopped */    { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STARTED, RSC_ROLE_UNPROMOTED, RSC_ROLE_UNPROMOTED },
 /* Started */    { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STARTED, RSC_ROLE_UNPROMOTED, RSC_ROLE_PROMOTED },
 /* Unpromoted */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_UNPROMOTED, RSC_ROLE_PROMOTED },
 /* Promoted  */  { RSC_ROLE_STOPPED, RSC_ROLE_UNPROMOTED, RSC_ROLE_UNPROMOTED, RSC_ROLE_UNPROMOTED, RSC_ROLE_PROMOTED },
 };
 
 typedef gboolean (*rsc_transition_fn)(pe_resource_t *rsc, pe_node_t *next,
                                       gboolean optional,
                                       pe_working_set_t *data_set);
 
 // This array picks the function needed to transition from one role to another
 static rsc_transition_fn rsc_action_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = {
 /* Current state   Next state                                            */
 /*                 Unknown    Stopped    Started    Unpromoted Promoted  */
 /* Unknown */    { RoleError, StopRsc,   RoleError, RoleError, RoleError,    },
 /* Stopped */    { RoleError, NullOp,    StartRsc,  StartRsc,  RoleError,    },
 /* Started */    { RoleError, StopRsc,   NullOp,    NullOp,    PromoteRsc,   },
 /* Unpromoted */ { RoleError, StopRsc,   StopRsc,   NullOp,    PromoteRsc,   },
 /* Promoted  */  { RoleError, DemoteRsc, DemoteRsc, DemoteRsc, NullOp,       },
 };
 
 #define clear_node_weights_flags(nw_flags, nw_rsc, flags_to_clear) do {     \
         flags = pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE,         \
                                      "Node weight", (nw_rsc)->id, (flags),  \
                                      (flags_to_clear), #flags_to_clear);    \
     } while (0)
 
 static bool
 native_choose_node(pe_resource_t * rsc, pe_node_t * prefer, pe_working_set_t * data_set)
 {
     GList *nodes = NULL;
     pe_node_t *chosen = NULL;
     pe_node_t *best = NULL;
     int multiple = 1;
     int length = 0;
     bool result = false;
 
-    pcmk__ban_insufficient_capacity(rsc, &prefer, data_set);
+    pcmk__ban_insufficient_capacity(rsc, &prefer);
 
     if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
         return rsc->allocated_to != NULL;
     }
 
     // Sort allowed nodes by weight
     if (rsc->allowed_nodes) {
         length = g_hash_table_size(rsc->allowed_nodes);
     }
     if (length > 0) {
         nodes = g_hash_table_get_values(rsc->allowed_nodes);
         nodes = pcmk__sort_nodes(nodes, pe__current_node(rsc), data_set);
 
         // First node in sorted list has the best score
         best = g_list_nth_data(nodes, 0);
     }
 
     if (prefer && nodes) {
         chosen = g_hash_table_lookup(rsc->allowed_nodes, prefer->details->id);
 
         if (chosen == NULL) {
             pe_rsc_trace(rsc, "Preferred node %s for %s was unknown",
                          prefer->details->uname, rsc->id);
 
         /* Favor the preferred node as long as its weight is at least as good as
          * the best allowed node's.
          *
          * An alternative would be to favor the preferred node even if the best
          * node is better, when the best node's weight is less than INFINITY.
          */
         } else if ((chosen->weight < 0) || (chosen->weight < best->weight)) {
             pe_rsc_trace(rsc, "Preferred node %s for %s was unsuitable",
                          chosen->details->uname, rsc->id);
             chosen = NULL;
 
         } else if (!pcmk__node_available(chosen)) {
             pe_rsc_trace(rsc, "Preferred node %s for %s was unavailable",
                          chosen->details->uname, rsc->id);
             chosen = NULL;
 
         } else {
             pe_rsc_trace(rsc,
                          "Chose preferred node %s for %s (ignoring %d candidates)",
                          chosen->details->uname, rsc->id, length);
         }
     }
 
     if ((chosen == NULL) && nodes) {
         /* Either there is no preferred node, or the preferred node is not
          * available, but there are other nodes allowed to run the resource.
          */
 
         chosen = best;
         pe_rsc_trace(rsc, "Chose node %s for %s from %d candidates",
                      chosen ? chosen->details->uname : "<none>", rsc->id, length);
 
         if (!pe_rsc_is_unique_clone(rsc->parent)
             && chosen && (chosen->weight > 0) && pcmk__node_available(chosen)) {
             /* If the resource is already running on a node, prefer that node if
              * it is just as good as the chosen node.
              *
              * We don't do this for unique clone instances, because
              * distribute_children() has already assigned instances to their
              * running nodes when appropriate, and if we get here, we don't want
              * remaining unallocated instances to prefer a node that's already
              * running another instance.
              */
             pe_node_t *running = pe__current_node(rsc);
 
             if ((running != NULL) && !pcmk__node_available(running)) {
                 pe_rsc_trace(rsc, "Current node for %s (%s) can't run resources",
                              rsc->id, running->details->uname);
             } else if (running) {
                 for (GList *iter = nodes->next; iter; iter = iter->next) {
                     pe_node_t *tmp = (pe_node_t *) iter->data;
 
                     if (tmp->weight != chosen->weight) {
                         // The nodes are sorted by weight, so no more are equal
                         break;
                     }
                     if (tmp->details == running->details) {
                         // Scores are equal, so prefer the current node
                         chosen = tmp;
                     }
                     multiple++;
                 }
             }
         }
     }
 
     if (multiple > 1) {
         static char score[33];
         int log_level = (chosen->weight >= INFINITY)? LOG_WARNING : LOG_INFO;
 
         score2char_stack(chosen->weight, score, sizeof(score));
         do_crm_log(log_level,
                    "Chose node %s for %s from %d nodes with score %s",
                    chosen->details->uname, rsc->id, multiple, score);
     }
 
     result = pcmk__assign_primitive(rsc, chosen, false);
     g_list_free(nodes);
     return result;
 }
 
 /*!
  * \internal
  * \brief Find score of highest-scored node that matches colocation attribute
  *
  * \param[in] rsc    Resource whose allowed nodes should be searched
  * \param[in] attr   Colocation attribute name (must not be NULL)
  * \param[in] value  Colocation attribute value to require
  */
 static int
 best_node_score_matching_attr(const pe_resource_t *rsc, const char *attr,
                               const char *value)
 {
     GHashTableIter iter;
     pe_node_t *node = NULL;
     int best_score = -INFINITY;
     const char *best_node = NULL;
 
     // Find best allowed node with matching attribute
     g_hash_table_iter_init(&iter, rsc->allowed_nodes);
     while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
 
         if ((node->weight > best_score) && pcmk__node_available(node)
             && pcmk__str_eq(value, pe_node_attribute_raw(node, attr), pcmk__str_casei)) {
 
             best_score = node->weight;
             best_node = node->details->uname;
         }
     }
 
     if (!pcmk__str_eq(attr, CRM_ATTR_UNAME, pcmk__str_casei)) {
         if (best_node == NULL) {
             crm_info("No allowed node for %s matches node attribute %s=%s",
                      rsc->id, attr, value);
         } else {
             crm_info("Allowed node %s for %s had best score (%d) "
                      "of those matching node attribute %s=%s",
                      best_node, rsc->id, best_score, attr, value);
         }
     }
     return best_score;
 }
 
 /*!
  * \internal
  * \brief Add resource's colocation matches to current node allocation scores
  *
  * For each node in a given table, if any of a given resource's allowed nodes
  * have a matching value for the colocation attribute, add the highest of those
  * nodes' scores to the node's score.
  *
  * \param[in,out] nodes  Hash table of nodes with allocation scores so far
  * \param[in]     rsc    Resource whose allowed nodes should be compared
  * \param[in]     attr   Colocation attribute that must match (NULL for default)
  * \param[in]     factor Factor by which to multiply scores being added
  * \param[in]     only_positive  Whether to add only positive scores
  */
 static void
 add_node_scores_matching_attr(GHashTable *nodes, const pe_resource_t *rsc,
                               const char *attr, float factor,
                               bool only_positive)
 {
     GHashTableIter iter;
     pe_node_t *node = NULL;
 
     if (attr == NULL) {
         attr = CRM_ATTR_UNAME;
     }
 
     // Iterate through each node
     g_hash_table_iter_init(&iter, nodes);
     while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
         float weight_f = 0;
         int weight = 0;
         int score = 0;
         int new_score = 0;
 
         score = best_node_score_matching_attr(rsc, attr,
                                               pe_node_attribute_raw(node, attr));
 
         if ((factor < 0) && (score < 0)) {
             /* Negative preference for a node with a negative score
              * should not become a positive preference.
              *
              * @TODO Consider filtering only if weight is -INFINITY
              */
             crm_trace("%s: Filtering %d + %f * %d (double negative disallowed)",
                       node->details->uname, node->weight, factor, score);
             continue;
         }
 
         if (node->weight == INFINITY_HACK) {
             crm_trace("%s: Filtering %d + %f * %d (node was marked unusable)",
                       node->details->uname, node->weight, factor, score);
             continue;
         }
 
         weight_f = factor * score;
 
         // Round the number; see http://c-faq.com/fp/round.html
         weight = (int) ((weight_f < 0)? (weight_f - 0.5) : (weight_f + 0.5));
 
         /* Small factors can obliterate the small scores that are often actually
          * used in configurations. If the score and factor are nonzero, ensure
          * that the result is nonzero as well.
          */
         if ((weight == 0) && (score != 0)) {
             if (factor > 0.0) {
                 weight = 1;
             } else if (factor < 0.0) {
                 weight = -1;
             }
         }
 
         new_score = pcmk__add_scores(weight, node->weight);
 
         if (only_positive && (new_score < 0) && (node->weight > 0)) {
             crm_trace("%s: Filtering %d + %f * %d = %d "
                       "(negative disallowed, marking node unusable)",
                       node->details->uname, node->weight, factor, score,
                       new_score);
             node->weight = INFINITY_HACK;
             continue;
         }
 
         if (only_positive && (new_score < 0) && (node->weight == 0)) {
             crm_trace("%s: Filtering %d + %f * %d = %d (negative disallowed)",
                       node->details->uname, node->weight, factor, score,
                       new_score);
             continue;
         }
 
         crm_trace("%s: %d + %f * %d = %d", node->details->uname,
                   node->weight, factor, score, new_score);
         node->weight = new_score;
     }
 }
 
 static inline bool
 is_nonempty_group(pe_resource_t *rsc)
 {
     return rsc && (rsc->variant == pe_group) && (rsc->children != NULL);
 }
 
 /*!
  * \internal
  * \brief Incorporate colocation constraint scores into node weights
  *
  * \param[in,out] rsc         Resource being placed
  * \param[in]     primary_id  ID of primary resource in constraint
  * \param[in,out] nodes       Nodes, with scores as of this point
  * \param[in]     attr        Colocation attribute (ID by default)
  * \param[in]     factor      Incorporate scores multiplied by this factor
  * \param[in]     flags       Bitmask of enum pe_weights values
  *
  * \return Nodes, with scores modified by this constraint
  * \note This function assumes ownership of the nodes argument. The caller
  *       should free the returned copy rather than the original.
  */
 GHashTable *
 pcmk__native_merge_weights(pe_resource_t *rsc, const char *primary_id,
                            GHashTable *nodes, const char *attr, float factor,
                            uint32_t flags)
 {
     GHashTable *work = NULL;
 
     // Avoid infinite recursion
     if (pcmk_is_set(rsc->flags, pe_rsc_merging)) {
         pe_rsc_info(rsc, "%s: Breaking dependency loop at %s",
                     primary_id, rsc->id);
         return nodes;
     }
     pe__set_resource_flags(rsc, pe_rsc_merging);
 
     if (pcmk_is_set(flags, pe_weights_init)) {
         if (is_nonempty_group(rsc)) {
             GList *last = g_list_last(rsc->children);
             pe_resource_t *last_rsc = last->data;
 
             pe_rsc_trace(rsc, "%s: Merging scores from group %s "
                          "using last member %s (at %.6f)",
                          primary_id, rsc->id, last_rsc->id, factor);
             work = pcmk__native_merge_weights(last_rsc, primary_id, NULL, attr,
                                               factor, flags);
         } else {
             work = pcmk__copy_node_table(rsc->allowed_nodes);
         }
         clear_node_weights_flags(flags, rsc, pe_weights_init);
 
     } else if (is_nonempty_group(rsc)) {
         /* The first member of the group will recursively incorporate any
          * constraints involving other members (including the group internal
          * colocation).
          *
          * @TODO The indirect colocations from the dependent group's other
          *       members will be incorporated at full strength rather than by
          *       factor, so the group's combined stickiness will be treated as
          *       (factor + (#members - 1)) * stickiness. It is questionable what
          *       the right approach should be.
          */
         pe_rsc_trace(rsc, "%s: Merging scores from first member of group %s "
                      "(at %.6f)", primary_id, rsc->id, factor);
         work = pcmk__copy_node_table(nodes);
         work = pcmk__native_merge_weights(rsc->children->data, primary_id, work,
                                           attr, factor, flags);
 
     } else {
         pe_rsc_trace(rsc, "%s: Merging scores from %s (at %.6f)",
                      primary_id, rsc->id, factor);
         work = pcmk__copy_node_table(nodes);
         add_node_scores_matching_attr(work, rsc, attr, factor,
                                       pcmk_is_set(flags, pe_weights_positive));
     }
 
     if (pcmk__any_node_available(work)) {
         GList *gIter = NULL;
         int multiplier = (factor < 0)? -1 : 1;
 
         if (pcmk_is_set(flags, pe_weights_forward)) {
             gIter = rsc->rsc_cons;
             pe_rsc_trace(rsc,
                          "Checking additional %d optional '%s with' constraints",
                          g_list_length(gIter), rsc->id);
 
         } else if (is_nonempty_group(rsc)) {
             pe_resource_t *last_rsc = g_list_last(rsc->children)->data;
 
             gIter = last_rsc->rsc_cons_lhs;
             pe_rsc_trace(rsc, "Checking additional %d optional 'with group %s' "
                          "constraints using last member %s",
                          g_list_length(gIter), rsc->id, last_rsc->id);
 
         } else {
             gIter = rsc->rsc_cons_lhs;
             pe_rsc_trace(rsc,
                          "Checking additional %d optional 'with %s' constraints",
                          g_list_length(gIter), rsc->id);
         }
 
         for (; gIter != NULL; gIter = gIter->next) {
             pe_resource_t *other = NULL;
             pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data;
 
             if (pcmk_is_set(flags, pe_weights_forward)) {
                 other = constraint->primary;
             } else if (!pcmk__colocation_has_influence(constraint, NULL)) {
                 continue;
             } else {
                 other = constraint->dependent;
             }
 
             pe_rsc_trace(rsc, "Optionally merging score of '%s' constraint (%s with %s)",
                          constraint->id, constraint->dependent->id,
                          constraint->primary->id);
             work = pcmk__native_merge_weights(other, primary_id, work,
                                               constraint->node_attribute,
                                               multiplier * constraint->score / (float) INFINITY,
                                               flags|pe_weights_rollback);
             pe__show_node_weights(true, NULL, primary_id, work, rsc->cluster);
         }
 
     } else if (pcmk_is_set(flags, pe_weights_rollback)) {
         pe_rsc_info(rsc, "%s: Rolling back optional scores from %s",
                     primary_id, rsc->id);
         g_hash_table_destroy(work);
         pe__clear_resource_flags(rsc, pe_rsc_merging);
         return nodes;
     }
 
 
     if (pcmk_is_set(flags, pe_weights_positive)) {
         pe_node_t *node = NULL;
         GHashTableIter iter;
 
         g_hash_table_iter_init(&iter, work);
         while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
             if (node->weight == INFINITY_HACK) {
                 node->weight = 1;
             }
         }
     }
 
     if (nodes) {
         g_hash_table_destroy(nodes);
     }
 
     pe__clear_resource_flags(rsc, pe_rsc_merging);
     return work;
 }
 
 pe_node_t *
 pcmk__native_allocate(pe_resource_t *rsc, pe_node_t *prefer,
                       pe_working_set_t *data_set)
 {
     GList *gIter = NULL;
 
     if (rsc->parent && !pcmk_is_set(rsc->parent->flags, pe_rsc_allocating)) {
         /* never allocate children on their own */
         pe_rsc_debug(rsc, "Escalating allocation of %s to its parent: %s", rsc->id,
                      rsc->parent->id);
         rsc->parent->cmds->allocate(rsc->parent, prefer, data_set);
     }
 
     if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
         return rsc->allocated_to;
     }
 
     if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) {
         pe_rsc_debug(rsc, "Dependency loop detected involving %s", rsc->id);
         return NULL;
     }
 
     pe__set_resource_flags(rsc, pe_rsc_allocating);
     pe__show_node_weights(true, rsc, "Pre-alloc", rsc->allowed_nodes, data_set);
 
     for (gIter = rsc->rsc_cons; gIter != NULL; gIter = gIter->next) {
         pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data;
 
         GHashTable *archive = NULL;
         pe_resource_t *primary = constraint->primary;
 
         if ((constraint->dependent_role >= RSC_ROLE_PROMOTED)
             || (constraint->score < 0 && constraint->score > -INFINITY)) {
             archive = pcmk__copy_node_table(rsc->allowed_nodes);
         }
 
         pe_rsc_trace(rsc,
                      "%s: Allocating %s first (constraint=%s score=%d role=%s)",
                      rsc->id, primary->id, constraint->id,
                      constraint->score, role2text(constraint->dependent_role));
         primary->cmds->allocate(primary, NULL, data_set);
         rsc->cmds->rsc_colocation_lh(rsc, primary, constraint, data_set);
         if (archive && !pcmk__any_node_available(rsc->allowed_nodes)) {
             pe_rsc_info(rsc, "%s: Rolling back scores from %s",
                         rsc->id, primary->id);
             g_hash_table_destroy(rsc->allowed_nodes);
             rsc->allowed_nodes = archive;
             archive = NULL;
         }
         if (archive) {
             g_hash_table_destroy(archive);
         }
     }
 
     pe__show_node_weights(true, rsc, "Post-coloc", rsc->allowed_nodes, data_set);
 
     for (gIter = rsc->rsc_cons_lhs; gIter != NULL; gIter = gIter->next) {
         pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data;
 
         if (!pcmk__colocation_has_influence(constraint, NULL)) {
             continue;
         }
         pe_rsc_trace(rsc, "Merging score of '%s' constraint (%s with %s)",
                      constraint->id, constraint->dependent->id,
                      constraint->primary->id);
         rsc->allowed_nodes = constraint->dependent->cmds->merge_weights(
             constraint->dependent, rsc->id, rsc->allowed_nodes,
             constraint->node_attribute, constraint->score / (float) INFINITY,
             pe_weights_rollback);
     }
 
     if (rsc->next_role == RSC_ROLE_STOPPED) {
         pe_rsc_trace(rsc, "Making sure %s doesn't get allocated", rsc->id);
         /* make sure it doesn't come up again */
         resource_location(rsc, NULL, -INFINITY, XML_RSC_ATTR_TARGET_ROLE, data_set);
 
     } else if(rsc->next_role > rsc->role
               && !pcmk_is_set(data_set->flags, pe_flag_have_quorum)
               && data_set->no_quorum_policy == no_quorum_freeze) {
         crm_notice("Resource %s cannot be elevated from %s to %s: no-quorum-policy=freeze",
                    rsc->id, role2text(rsc->role), role2text(rsc->next_role));
         pe__set_next_role(rsc, rsc->role, "no-quorum-policy=freeze");
     }
 
     pe__show_node_weights(!pcmk_is_set(data_set->flags, pe_flag_show_scores),
                           rsc, __func__, rsc->allowed_nodes, data_set);
     if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)
         && !pcmk_is_set(data_set->flags, pe_flag_have_stonith_resource)) {
         pe__clear_resource_flags(rsc, pe_rsc_managed);
     }
 
     if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
         const char *reason = NULL;
         pe_node_t *assign_to = NULL;
 
         pe__set_next_role(rsc, rsc->role, "unmanaged");
         assign_to = pe__current_node(rsc);
         if (assign_to == NULL) {
             reason = "inactive";
         } else if (rsc->role == RSC_ROLE_PROMOTED) {
             reason = "promoted";
         } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
             reason = "failed";
         } else {
             reason = "active";
         }
         pe_rsc_info(rsc, "Unmanaged resource %s allocated to %s: %s", rsc->id,
                     (assign_to? assign_to->details->uname : "no node"), reason);
         pcmk__assign_primitive(rsc, assign_to, true);
 
     } else if (pcmk_is_set(data_set->flags, pe_flag_stop_everything)) {
         pe_rsc_debug(rsc, "Forcing %s to stop", rsc->id);
         pcmk__assign_primitive(rsc, NULL, true);
 
     } else if (pcmk_is_set(rsc->flags, pe_rsc_provisional)
                && native_choose_node(rsc, prefer, data_set)) {
         pe_rsc_trace(rsc, "Allocated resource %s to %s", rsc->id,
                      rsc->allocated_to->details->uname);
 
     } else if (rsc->allocated_to == NULL) {
         if (!pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
             pe_rsc_info(rsc, "Resource %s cannot run anywhere", rsc->id);
         } else if (rsc->running_on != NULL) {
             pe_rsc_info(rsc, "Stopping orphan resource %s", rsc->id);
         }
 
     } else {
         pe_rsc_debug(rsc, "Pre-Allocated resource %s to %s", rsc->id,
                      rsc->allocated_to->details->uname);
     }
 
     pe__clear_resource_flags(rsc, pe_rsc_allocating);
 
     if (rsc->is_remote_node) {
         pe_node_t *remote_node = pe_find_node(data_set->nodes, rsc->id);
 
         CRM_ASSERT(remote_node != NULL);
         if (rsc->allocated_to && rsc->next_role != RSC_ROLE_STOPPED) {
             crm_trace("Setting Pacemaker Remote node %s to ONLINE",
                       remote_node->details->id);
             remote_node->details->online = TRUE;
             /* We shouldn't consider an unseen remote-node unclean if we are going
              * to try and connect to it. Otherwise we get an unnecessary fence */
             if (remote_node->details->unseen == TRUE) {
                 remote_node->details->unclean = FALSE;
             }
 
         } else {
             crm_trace("Setting Pacemaker Remote node %s to SHUTDOWN (next role %s, %sallocated)",
                       remote_node->details->id, role2text(rsc->next_role),
                       (rsc->allocated_to? "" : "un"));
             remote_node->details->shutdown = TRUE;
         }
     }
 
     return rsc->allocated_to;
 }
 
 static gboolean
 is_op_dup(pe_resource_t *rsc, const char *name, guint interval_ms)
 {
     gboolean dup = FALSE;
     const char *id = NULL;
     const char *value = NULL;
     xmlNode *operation = NULL;
     guint interval2_ms = 0;
 
     CRM_ASSERT(rsc);
     for (operation = pcmk__xe_first_child(rsc->ops_xml); operation != NULL;
          operation = pcmk__xe_next(operation)) {
 
         if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
             value = crm_element_value(operation, "name");
             if (!pcmk__str_eq(value, name, pcmk__str_casei)) {
                 continue;
             }
 
             value = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
             interval2_ms = crm_parse_interval_spec(value);
             if (interval_ms != interval2_ms) {
                 continue;
             }
 
             if (id == NULL) {
                 id = ID(operation);
 
             } else {
                 pcmk__config_err("Operation %s is duplicate of %s (do not use "
                                  "same name and interval combination more "
                                  "than once per resource)", ID(operation), id);
                 dup = TRUE;
             }
         }
     }
 
     return dup;
 }
 
 static bool
 op_cannot_recur(const char *name)
 {
     return pcmk__strcase_any_of(name, RSC_STOP, RSC_START, RSC_DEMOTE, RSC_PROMOTE, NULL);
 }
 
 static void
 RecurringOp(pe_resource_t * rsc, pe_action_t * start, pe_node_t * node,
             xmlNode * operation, pe_working_set_t * data_set)
 {
     char *key = NULL;
     const char *name = NULL;
     const char *role = NULL;
     const char *interval_spec = NULL;
     const char *node_uname = node? node->details->uname : "n/a";
 
     guint interval_ms = 0;
     pe_action_t *mon = NULL;
     gboolean is_optional = TRUE;
     GList *possible_matches = NULL;
 
     CRM_ASSERT(rsc);
 
     /* Only process for the operations without role="Stopped" */
     role = crm_element_value(operation, "role");
     if (role && text2role(role) == RSC_ROLE_STOPPED) {
         return;
     }
 
     interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
     interval_ms = crm_parse_interval_spec(interval_spec);
     if (interval_ms == 0) {
         return;
     }
 
     name = crm_element_value(operation, "name");
     if (is_op_dup(rsc, name, interval_ms)) {
         crm_trace("Not creating duplicate recurring action %s for %dms %s",
                   ID(operation), interval_ms, name);
         return;
     }
 
     if (op_cannot_recur(name)) {
         pcmk__config_err("Ignoring %s because action '%s' cannot be recurring",
                          ID(operation), name);
         return;
     }
 
     key = pcmk__op_key(rsc->id, name, interval_ms);
     if (find_rsc_op_entry(rsc, key) == NULL) {
         crm_trace("Not creating recurring action %s for disabled resource %s",
                   ID(operation), rsc->id);
         free(key);
         return;
     }
 
     pe_rsc_trace(rsc, "Creating recurring action %s for %s in role %s on %s",
                  ID(operation), rsc->id, role2text(rsc->next_role), node_uname);
 
     if (start != NULL) {
         pe_rsc_trace(rsc, "Marking %s %s due to %s", key,
                      pcmk_is_set(start->flags, pe_action_optional)? "optional" : "mandatory",
                      start->uuid);
         is_optional = (rsc->cmds->action_flags(start, NULL) & pe_action_optional);
     } else {
         pe_rsc_trace(rsc, "Marking %s optional", key);
         is_optional = TRUE;
     }
 
     /* start a monitor for an already active resource */
     possible_matches = find_actions_exact(rsc->actions, key, node);
     if (possible_matches == NULL) {
         is_optional = FALSE;
         pe_rsc_trace(rsc, "Marking %s mandatory: not active", key);
 
     } else {
         GList *gIter = NULL;
 
         for (gIter = possible_matches; gIter != NULL; gIter = gIter->next) {
             pe_action_t *op = (pe_action_t *) gIter->data;
 
             if (pcmk_is_set(op->flags, pe_action_reschedule)) {
                 is_optional = FALSE;
                 break;
             }
         }
         g_list_free(possible_matches);
     }
 
     if (((rsc->next_role == RSC_ROLE_PROMOTED) && (role == NULL))
         || (role != NULL && text2role(role) != rsc->next_role)) {
         int log_level = LOG_TRACE;
         const char *result = "Ignoring";
 
         if (is_optional) {
             char *after_key = NULL;
             pe_action_t *cancel_op = NULL;
 
             // It's running, so cancel it
             log_level = LOG_INFO;
             result = "Cancelling";
             cancel_op = pcmk__new_cancel_action(rsc, name, interval_ms, node);
 
             switch (rsc->role) {
                 case RSC_ROLE_UNPROMOTED:
                 case RSC_ROLE_STARTED:
                     if (rsc->next_role == RSC_ROLE_PROMOTED) {
                         after_key = promote_key(rsc);
 
                     } else if (rsc->next_role == RSC_ROLE_STOPPED) {
                         after_key = stop_key(rsc);
                     }
 
                     break;
                 case RSC_ROLE_PROMOTED:
                     after_key = demote_key(rsc);
                     break;
                 default:
                     break;
             }
 
             if (after_key) {
                 pcmk__new_ordering(rsc, NULL, cancel_op, rsc, after_key, NULL,
                                    pe_order_runnable_left, data_set);
             }
         }
 
         do_crm_log(log_level, "%s action %s (%s vs. %s)",
                    result, key, role ? role : role2text(RSC_ROLE_UNPROMOTED),
                    role2text(rsc->next_role));
 
         free(key);
         return;
     }
 
     mon = custom_action(rsc, key, name, node, is_optional, TRUE, data_set);
     key = mon->uuid;
     if (is_optional) {
         pe_rsc_trace(rsc, "%s\t   %s (optional)", node_uname, mon->uuid);
     }
 
     if ((start == NULL) || !pcmk_is_set(start->flags, pe_action_runnable)) {
         pe_rsc_debug(rsc, "%s\t   %s (cancelled : start un-runnable)",
                      node_uname, mon->uuid);
         pe__clear_action_flags(mon, pe_action_runnable);
 
     } else if (node == NULL || node->details->online == FALSE || node->details->unclean) {
         pe_rsc_debug(rsc, "%s\t   %s (cancelled : no node available)",
                      node_uname, mon->uuid);
         pe__clear_action_flags(mon, pe_action_runnable);
 
     } else if (!pcmk_is_set(mon->flags, pe_action_optional)) {
         pe_rsc_info(rsc, " Start recurring %s (%us) for %s on %s",
                     mon->task, interval_ms / 1000, rsc->id, node_uname);
     }
 
     if (rsc->next_role == RSC_ROLE_PROMOTED) {
         char *running_promoted = pcmk__itoa(PCMK_OCF_RUNNING_PROMOTED);
 
         add_hash_param(mon->meta, XML_ATTR_TE_TARGET_RC, running_promoted);
         free(running_promoted);
     }
 
     if ((node == NULL) || pcmk_is_set(rsc->flags, pe_rsc_managed)) {
         pcmk__new_ordering(rsc, start_key(rsc), NULL, NULL, strdup(key), mon,
                            pe_order_implies_then|pe_order_runnable_left,
                            data_set);
 
         pcmk__new_ordering(rsc, reload_key(rsc), NULL, NULL, strdup(key), mon,
                            pe_order_implies_then|pe_order_runnable_left,
                            data_set);
 
         if (rsc->next_role == RSC_ROLE_PROMOTED) {
             pcmk__new_ordering(rsc, promote_key(rsc), NULL, rsc, NULL, mon,
                                pe_order_optional|pe_order_runnable_left,
                                data_set);
 
         } else if (rsc->role == RSC_ROLE_PROMOTED) {
             pcmk__new_ordering(rsc, demote_key(rsc), NULL, rsc, NULL, mon,
                                pe_order_optional|pe_order_runnable_left,
                                data_set);
         }
     }
 }
 
 static void
 Recurring(pe_resource_t * rsc, pe_action_t * start, pe_node_t * node, pe_working_set_t * data_set)
 {
     if (!pcmk_is_set(rsc->flags, pe_rsc_maintenance) &&
         (node == NULL || node->details->maintenance == FALSE)) {
         xmlNode *operation = NULL;
 
         for (operation = pcmk__xe_first_child(rsc->ops_xml);
              operation != NULL;
              operation = pcmk__xe_next(operation)) {
 
             if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
                 RecurringOp(rsc, start, node, operation, data_set);
             }
         }
     }
 }
 
 static void
 RecurringOp_Stopped(pe_resource_t * rsc, pe_action_t * start, pe_node_t * node,
                     xmlNode * operation, pe_working_set_t * data_set)
 {
     char *key = NULL;
     const char *name = NULL;
     const char *role = NULL;
     const char *interval_spec = NULL;
     const char *node_uname = node? node->details->uname : "n/a";
 
     guint interval_ms = 0;
     GList *possible_matches = NULL;
     GList *gIter = NULL;
 
     /* Only process for the operations with role="Stopped" */
     role = crm_element_value(operation, "role");
     if (role == NULL || text2role(role) != RSC_ROLE_STOPPED) {
         return;
     }
 
     interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
     interval_ms = crm_parse_interval_spec(interval_spec);
     if (interval_ms == 0) {
         return;
     }
 
     name = crm_element_value(operation, "name");
     if (is_op_dup(rsc, name, interval_ms)) {
         crm_trace("Not creating duplicate recurring action %s for %dms %s",
                   ID(operation), interval_ms, name);
         return;
     }
 
     if (op_cannot_recur(name)) {
         pcmk__config_err("Ignoring %s because action '%s' cannot be recurring",
                          ID(operation), name);
         return;
     }
 
     key = pcmk__op_key(rsc->id, name, interval_ms);
     if (find_rsc_op_entry(rsc, key) == NULL) {
         crm_trace("Not creating recurring action %s for disabled resource %s",
                   ID(operation), rsc->id);
         free(key);
         return;
     }
 
     // @TODO add support
     if (!pcmk_is_set(rsc->flags, pe_rsc_unique)) {
         crm_notice("Ignoring %s (recurring monitors for Stopped role are "
                    "not supported for anonymous clones)",
                    ID(operation));
         return;
     }
 
     pe_rsc_trace(rsc,
                  "Creating recurring action %s for %s in role %s on nodes where it should not be running",
                  ID(operation), rsc->id, role2text(rsc->next_role));
 
     /* if the monitor exists on the node where the resource will be running, cancel it */
     if (node != NULL) {
         possible_matches = find_actions_exact(rsc->actions, key, node);
         if (possible_matches) {
             pe_action_t *cancel_op = NULL;
 
             g_list_free(possible_matches);
 
             cancel_op = pcmk__new_cancel_action(rsc, name, interval_ms, node);
 
             if ((rsc->next_role == RSC_ROLE_STARTED)
                 || (rsc->next_role == RSC_ROLE_UNPROMOTED)) {
                 /* rsc->role == RSC_ROLE_STOPPED: cancel the monitor before start */
                 /* rsc->role == RSC_ROLE_STARTED: for a migration, cancel the monitor on the target node before start */
                 pcmk__new_ordering(rsc, NULL, cancel_op, rsc, start_key(rsc),
                                    NULL, pe_order_runnable_left, data_set);
             }
 
             pe_rsc_info(rsc, "Cancel action %s (%s vs. %s) on %s",
                         key, role, role2text(rsc->next_role), node_uname);
         }
     }
 
     for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
         pe_node_t *stop_node = (pe_node_t *) gIter->data;
         const char *stop_node_uname = stop_node->details->uname;
         gboolean is_optional = TRUE;
         gboolean probe_is_optional = TRUE;
         gboolean stop_is_optional = TRUE;
         pe_action_t *stopped_mon = NULL;
         char *rc_inactive = NULL;
         GList *stop_ops = NULL;
         GList *local_gIter = NULL;
 
         if (node && pcmk__str_eq(stop_node_uname, node_uname, pcmk__str_casei)) {
             continue;
         }
 
         pe_rsc_trace(rsc, "Creating recurring action %s for %s on %s",
                      ID(operation), rsc->id,
                      pcmk__s(stop_node_uname, "unknown node"));
 
         /* start a monitor for an already stopped resource */
         possible_matches = find_actions_exact(rsc->actions, key, stop_node);
         if (possible_matches == NULL) {
             pe_rsc_trace(rsc, "Marking %s mandatory on %s: not active", key,
                          pcmk__s(stop_node_uname, "unknown node"));
             is_optional = FALSE;
         } else {
             pe_rsc_trace(rsc, "Marking %s optional on %s: already active", key,
                          pcmk__s(stop_node_uname, "unknown node"));
             is_optional = TRUE;
             g_list_free(possible_matches);
         }
 
         stopped_mon = custom_action(rsc, strdup(key), name, stop_node, is_optional, TRUE, data_set);
 
         rc_inactive = pcmk__itoa(PCMK_OCF_NOT_RUNNING);
         add_hash_param(stopped_mon->meta, XML_ATTR_TE_TARGET_RC, rc_inactive);
         free(rc_inactive);
 
         if (pcmk_is_set(rsc->flags, pe_rsc_managed)) {
             GList *probes = pe__resource_actions(rsc, stop_node, RSC_STATUS,
                                                  FALSE);
             GList *pIter = NULL;
 
             for (pIter = probes; pIter != NULL; pIter = pIter->next) {
                 pe_action_t *probe = (pe_action_t *) pIter->data;
 
                 order_actions(probe, stopped_mon, pe_order_runnable_left);
                 crm_trace("%s then %s on %s", probe->uuid, stopped_mon->uuid, stop_node->details->uname);
             }
 
             g_list_free(probes);
         }
 
         stop_ops = pe__resource_actions(rsc, stop_node, RSC_STOP, TRUE);
 
         for (local_gIter = stop_ops; local_gIter != NULL; local_gIter = local_gIter->next) {
             pe_action_t *stop = (pe_action_t *) local_gIter->data;
 
             if (!pcmk_is_set(stop->flags, pe_action_optional)) {
                 stop_is_optional = FALSE;
             }
 
             if (!pcmk_is_set(stop->flags, pe_action_runnable)) {
                 crm_debug("%s\t   %s (cancelled : stop un-runnable)",
                           pcmk__s(stop_node_uname, "<null>"),
                           stopped_mon->uuid);
                 pe__clear_action_flags(stopped_mon, pe_action_runnable);
             }
 
             if (pcmk_is_set(rsc->flags, pe_rsc_managed)) {
                 pcmk__new_ordering(rsc, stop_key(rsc), stop, NULL, strdup(key),
                                    stopped_mon,
                                    pe_order_implies_then|pe_order_runnable_left,
                                    data_set);
             }
 
         }
 
         if (stop_ops) {
             g_list_free(stop_ops);
         }
 
         if (is_optional == FALSE && probe_is_optional && stop_is_optional
             && !pcmk_is_set(rsc->flags, pe_rsc_managed)) {
             pe_rsc_trace(rsc, "Marking %s optional on %s due to unmanaged",
                          key, pcmk__s(stop_node_uname, "unknown node"));
             pe__set_action_flags(stopped_mon, pe_action_optional);
         }
 
         if (pcmk_is_set(stopped_mon->flags, pe_action_optional)) {
             pe_rsc_trace(rsc, "%s\t   %s (optional)",
                          pcmk__s(stop_node_uname, "<null>"),
                          stopped_mon->uuid);
         }
 
         if (stop_node->details->online == FALSE || stop_node->details->unclean) {
             pe_rsc_debug(rsc, "%s\t   %s (cancelled : no node available)",
                          pcmk__s(stop_node_uname, "<null>"),
                          stopped_mon->uuid);
             pe__clear_action_flags(stopped_mon, pe_action_runnable);
         }
 
         if (pcmk_is_set(stopped_mon->flags, pe_action_runnable)
             && !pcmk_is_set(stopped_mon->flags, pe_action_optional)) {
             crm_notice(" Start recurring %s (%us) for %s on %s", stopped_mon->task,
                        interval_ms / 1000, rsc->id,
                        pcmk__s(stop_node_uname, "unknown node"));
         }
     }
 
     free(key);
 }
 
 static void
 Recurring_Stopped(pe_resource_t * rsc, pe_action_t * start, pe_node_t * node, pe_working_set_t * data_set)
 {
     if (!pcmk_is_set(rsc->flags, pe_rsc_maintenance) &&
         (node == NULL || node->details->maintenance == FALSE)) {
         xmlNode *operation = NULL;
 
         for (operation = pcmk__xe_first_child(rsc->ops_xml);
              operation != NULL;
              operation = pcmk__xe_next(operation)) {
 
             if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
                 RecurringOp_Stopped(rsc, start, node, operation, data_set);
             }
         }
     }
 }
 
 static void
 handle_migration_actions(pe_resource_t * rsc, pe_node_t *current, pe_node_t *chosen, pe_working_set_t * data_set)
 {
     pe_action_t *migrate_to = NULL;
     pe_action_t *migrate_from = NULL;
     pe_action_t *start = NULL;
     pe_action_t *stop = NULL;
     gboolean partial = rsc->partial_migration_target ? TRUE : FALSE;
 
     pe_rsc_trace(rsc, "Processing migration actions %s moving from %s to %s . partial migration = %s",
     rsc->id, current->details->id, chosen->details->id, partial ? "TRUE" : "FALSE");
     start = start_action(rsc, chosen, TRUE);
     stop = stop_action(rsc, current, TRUE);
 
     if (partial == FALSE) {
         migrate_to = custom_action(rsc, pcmk__op_key(rsc->id, RSC_MIGRATE, 0),
                                    RSC_MIGRATE, current, TRUE, TRUE, data_set);
     }
 
     migrate_from = custom_action(rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0),
                                  RSC_MIGRATED, chosen, TRUE, TRUE, data_set);
 
     if ((migrate_to && migrate_from) || (migrate_from && partial)) {
 
         pe__set_action_flags(start, pe_action_migrate_runnable);
         pe__set_action_flags(stop, pe_action_migrate_runnable);
 
         // This is easier than trying to delete it from the graph
         pe__set_action_flags(start, pe_action_pseudo);
 
         /* order probes before migrations */
         if (partial) {
             pe__set_action_flags(migrate_from, pe_action_migrate_runnable);
             migrate_from->needs = start->needs;
 
             pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_STATUS, 0), NULL,
                                rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0),
                                NULL, pe_order_optional, data_set);
 
         } else {
             pe__set_action_flags(migrate_from, pe_action_migrate_runnable);
             pe__set_action_flags(migrate_to, pe_action_migrate_runnable);
             migrate_to->needs = start->needs;
 
             pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_STATUS, 0), NULL,
                                rsc, pcmk__op_key(rsc->id, RSC_MIGRATE, 0),
                                NULL, pe_order_optional, data_set);
             pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_MIGRATE, 0), NULL,
                                rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0),
                                NULL,
                                pe_order_optional|pe_order_implies_first_migratable,
                                data_set);
         }
 
         pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0), NULL,
                            rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
                            pe_order_optional|pe_order_implies_first_migratable,
                            data_set);
         pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0), NULL,
                            rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL,
                            pe_order_optional|pe_order_implies_first_migratable|pe_order_pseudo_left,
                            data_set);
     }
 
     if (migrate_to) {
         add_hash_param(migrate_to->meta, XML_LRM_ATTR_MIGRATE_SOURCE, current->details->uname);
         add_hash_param(migrate_to->meta, XML_LRM_ATTR_MIGRATE_TARGET, chosen->details->uname);
 
         /* Pacemaker Remote connections don't require pending to be recorded in
          * the CIB. We can reduce CIB writes by not setting PENDING for them.
          */
         if (rsc->is_remote_node == FALSE) {
             /* migrate_to takes place on the source node, but can 
              * have an effect on the target node depending on how
              * the agent is written. Because of this, we have to maintain
              * a record that the migrate_to occurred, in case the source node
              * loses membership while the migrate_to action is still in-flight.
              */
             add_hash_param(migrate_to->meta, XML_OP_ATTR_PENDING, "true");
         }
     }
 
     if (migrate_from) {
         add_hash_param(migrate_from->meta, XML_LRM_ATTR_MIGRATE_SOURCE, current->details->uname);
         add_hash_param(migrate_from->meta, XML_LRM_ATTR_MIGRATE_TARGET, chosen->details->uname);
     }
 }
 
 /*!
  * \internal
  * \brief Schedule actions to bring resource down and back to current role
  *
  * \param[in] rsc           Resource to restart
  * \param[in] current       Node that resource should be brought down on
  * \param[in] chosen        Node that resource should be brought up on
  * \param[in] need_stop     Whether the resource must be stopped
  * \param[in] need_promote  Whether the resource must be promoted
  *
  * \return Role that resource would have after scheduled actions are taken
  */
 static void
 schedule_restart_actions(pe_resource_t *rsc, pe_node_t *current,
                          pe_node_t *chosen, bool need_stop, bool need_promote)
 {
     enum rsc_role_e role = rsc->role;
     enum rsc_role_e next_role;
 
     pe__set_resource_flags(rsc, pe_rsc_restarting);
 
     // Bring resource down to a stop on its current node
     while (role != RSC_ROLE_STOPPED) {
         next_role = rsc_state_matrix[role][RSC_ROLE_STOPPED];
         pe_rsc_trace(rsc, "Creating %s action to take %s down from %s to %s",
                      (need_stop? "required" : "optional"), rsc->id,
                      role2text(role), role2text(next_role));
         if (!rsc_action_matrix[role][next_role](rsc, current, !need_stop,
                                                 rsc->cluster)) {
             break;
         }
         role = next_role;
     }
 
     // Bring resource up to its next role on its next node
     while ((rsc->role <= rsc->next_role) && (role != rsc->role)
            && !pcmk_is_set(rsc->flags, pe_rsc_block)) {
         bool required = need_stop;
 
         next_role = rsc_state_matrix[role][rsc->role];
         if ((next_role == RSC_ROLE_PROMOTED) && need_promote) {
             required = true;
         }
         pe_rsc_trace(rsc, "Creating %s action to take %s up from %s to %s",
                      (required? "required" : "optional"), rsc->id,
                      role2text(role), role2text(next_role));
         if (!rsc_action_matrix[role][next_role](rsc, chosen, !required,
                                                 rsc->cluster)) {
             break;
         }
         role = next_role;
     }
 
     pe__clear_resource_flags(rsc, pe_rsc_restarting);
 }
 
 void
 native_create_actions(pe_resource_t * rsc, pe_working_set_t * data_set)
 {
     pe_action_t *start = NULL;
     pe_node_t *chosen = NULL;
     pe_node_t *current = NULL;
     gboolean need_stop = FALSE;
     bool need_promote = FALSE;
     gboolean is_moving = FALSE;
     gboolean allow_migrate = FALSE;
 
     GList *gIter = NULL;
     unsigned int num_all_active = 0;
     unsigned int num_clean_active = 0;
     bool multiply_active = FALSE;
     enum rsc_role_e role = RSC_ROLE_UNKNOWN;
     enum rsc_role_e next_role = RSC_ROLE_UNKNOWN;
 
     CRM_ASSERT(rsc != NULL);
     allow_migrate = pcmk_is_set(rsc->flags, pe_rsc_allow_migrate)? TRUE : FALSE;
 
     chosen = rsc->allocated_to;
     next_role = rsc->next_role;
     if (next_role == RSC_ROLE_UNKNOWN) {
         pe__set_next_role(rsc,
                           (chosen == NULL)? RSC_ROLE_STOPPED : RSC_ROLE_STARTED,
                           "allocation");
     }
     pe_rsc_trace(rsc, "Creating all actions for %s transition from %s to %s (%s) on %s",
                  rsc->id, role2text(rsc->role), role2text(rsc->next_role),
                  ((next_role == RSC_ROLE_UNKNOWN)? "implicit" : "explicit"),
                  ((chosen == NULL)? "no node" : chosen->details->uname));
 
     current = pe__find_active_on(rsc, &num_all_active, &num_clean_active);
 
     for (gIter = rsc->dangling_migrations; gIter != NULL; gIter = gIter->next) {
         pe_node_t *dangling_source = (pe_node_t *) gIter->data;
 
         pe_action_t *stop = NULL;
 
         pe_rsc_trace(rsc, "Creating stop action %sfor %s on %s due to dangling migration",
                      pcmk_is_set(data_set->flags, pe_flag_remove_after_stop)? "and cleanup " : "",
                      rsc->id, dangling_source->details->uname);
         stop = stop_action(rsc, dangling_source, FALSE);
         pe__set_action_flags(stop, pe_action_dangle);
         if (pcmk_is_set(data_set->flags, pe_flag_remove_after_stop)) {
             DeleteRsc(rsc, dangling_source, FALSE, data_set);
         }
     }
 
     if ((num_all_active == 2) && (num_clean_active == 2) && chosen
         && rsc->partial_migration_source && rsc->partial_migration_target
         && (current->details == rsc->partial_migration_source->details)
         && (chosen->details == rsc->partial_migration_target->details)) {
 
         /* The chosen node is still the migration target from a partial
          * migration. Attempt to continue the migration instead of recovering
          * by stopping the resource everywhere and starting it on a single node.
          */
         pe_rsc_trace(rsc, "Will attempt to continue with partial migration "
                      "to target %s from %s",
                      rsc->partial_migration_target->details->id,
                      rsc->partial_migration_source->details->id);
 
     } else if (!pcmk_is_set(rsc->flags, pe_rsc_needs_fencing)) {
         /* If a resource has "requires" set to nothing or quorum, don't consider
          * it active on unclean nodes (similar to how all resources behave when
          * stonith-enabled is false). We can start such resources elsewhere
          * before fencing completes, and if we considered the resource active on
          * the failed node, we would attempt recovery for being active on
          * multiple nodes.
          */
         multiply_active = (num_clean_active > 1);
     } else {
         multiply_active = (num_all_active > 1);
     }
 
     if (multiply_active) {
         if (rsc->partial_migration_target && rsc->partial_migration_source) {
             // Migration was in progress, but we've chosen a different target
             crm_notice("Resource %s can no longer migrate from %s to %s "
                        "(will stop on both nodes)",
                        rsc->id, rsc->partial_migration_source->details->uname,
                        rsc->partial_migration_target->details->uname);
             multiply_active = false;
 
         } else {
             const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
 
             // Resource was (possibly) incorrectly multiply active
             pe_proc_err("%s resource %s might be active on %u nodes (%s)",
                         pcmk__s(class, "Untyped"), rsc->id, num_all_active,
                         recovery2text(rsc->recovery_type));
             crm_notice("See https://wiki.clusterlabs.org/wiki/FAQ#Resource_is_Too_Active for more information");
         }
 
         switch (rsc->recovery_type) {
             case recovery_stop_start:
                 need_stop = TRUE;
                 break;
             case recovery_stop_unexpected:
                 need_stop = TRUE; // StopRsc() will skip expected node
                 pe__set_resource_flags(rsc, pe_rsc_stop_unexpected);
                 break;
             default:
                 break;
         }
 
         /* If by chance a partial migration is in process, but the migration
          * target is not chosen still, clear all partial migration data.
          */
         rsc->partial_migration_source = rsc->partial_migration_target = NULL;
         allow_migrate = FALSE;
     }
 
     if (!multiply_active) {
         pe__clear_resource_flags(rsc, pe_rsc_stop_unexpected);
     }
 
     if (pcmk_is_set(rsc->flags, pe_rsc_start_pending)) {
         pe_rsc_trace(rsc, "Creating start action for %s to represent already pending start",
                      rsc->id);
         start = start_action(rsc, chosen, TRUE);
         pe__set_action_flags(start, pe_action_print_always);
     }
 
     if (current && chosen && current->details != chosen->details) {
         pe_rsc_trace(rsc, "Moving %s from %s to %s",
                      rsc->id, pcmk__s(current->details->uname, "unknown node"),
                      pcmk__s(chosen->details->uname, "unknown node"));
         is_moving = TRUE;
         need_stop = TRUE;
 
     } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
         if (pcmk_is_set(rsc->flags, pe_rsc_stop)) {
             need_stop = TRUE;
             pe_rsc_trace(rsc, "Recovering %s", rsc->id);
         } else {
             pe_rsc_trace(rsc, "Recovering %s by demotion", rsc->id);
             if (rsc->next_role == RSC_ROLE_PROMOTED) {
                 need_promote = TRUE;
             }
         }
 
     } else if (pcmk_is_set(rsc->flags, pe_rsc_block)) {
         pe_rsc_trace(rsc, "Blocking further actions on %s", rsc->id);
         need_stop = TRUE;
 
     } else if (rsc->role > RSC_ROLE_STARTED && current != NULL && chosen != NULL) {
         pe_rsc_trace(rsc, "Creating start action for promoted resource %s",
                      rsc->id);
         start = start_action(rsc, chosen, TRUE);
         if (!pcmk_is_set(start->flags, pe_action_optional)) {
             // Recovery of a promoted resource
             pe_rsc_trace(rsc, "%s restart is required for recovery", rsc->id);
             need_stop = TRUE;
         }
     }
 
     /* Create any additional actions required when bringing resource down and
      * back up to same level.
      */
     schedule_restart_actions(rsc, current, chosen, need_stop, need_promote);
 
     /* Required steps from this role to the next */
     role = rsc->role;
     while (role != rsc->next_role) {
         next_role = rsc_state_matrix[role][rsc->next_role];
         pe_rsc_trace(rsc, "Creating action to take %s from %s to %s (ending at %s)",
                      rsc->id, role2text(role), role2text(next_role),
                      role2text(rsc->next_role));
         if (rsc_action_matrix[role][next_role] (rsc, chosen, FALSE, data_set) == FALSE) {
             break;
         }
         role = next_role;
     }
 
     if (pcmk_is_set(rsc->flags, pe_rsc_block)) {
         pe_rsc_trace(rsc, "Not creating recurring monitors for blocked resource %s",
                      rsc->id);
 
     } else if ((rsc->next_role != RSC_ROLE_STOPPED)
                || !pcmk_is_set(rsc->flags, pe_rsc_managed)) {
         pe_rsc_trace(rsc, "Creating recurring monitors for %s resource %s",
                      ((rsc->next_role == RSC_ROLE_STOPPED)? "unmanaged" : "active"),
                      rsc->id);
         start = start_action(rsc, chosen, TRUE);
         Recurring(rsc, start, chosen, data_set);
         Recurring_Stopped(rsc, start, chosen, data_set);
 
     } else {
         pe_rsc_trace(rsc, "Creating recurring monitors for inactive resource %s",
                      rsc->id);
         Recurring_Stopped(rsc, NULL, NULL, data_set);
     }
 
     /* if we are stuck in a partial migration, where the target
      * of the partial migration no longer matches the chosen target.
      * A full stop/start is required */
     if (rsc->partial_migration_target && (chosen == NULL || rsc->partial_migration_target->details != chosen->details)) {
         pe_rsc_trace(rsc, "Not allowing partial migration of %s to continue",
                      rsc->id);
         allow_migrate = FALSE;
 
     } else if (!is_moving || !pcmk_is_set(rsc->flags, pe_rsc_managed)
                || pcmk_any_flags_set(rsc->flags,
                                      pe_rsc_failed|pe_rsc_start_pending)
                || (current && current->details->unclean)
                || rsc->next_role < RSC_ROLE_STARTED) {
 
         allow_migrate = FALSE;
     }
 
     if (allow_migrate) {
         handle_migration_actions(rsc, current, chosen, data_set);
     }
 }
 
 static void
 rsc_avoids_remote_nodes(pe_resource_t *rsc)
 {
     GHashTableIter iter;
     pe_node_t *node = NULL;
     g_hash_table_iter_init(&iter, rsc->allowed_nodes);
     while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
         if (node->details->remote_rsc) {
             node->weight = -INFINITY;
         }
     }
 }
 
 /*!
  * \internal
  * \brief Return allowed nodes as (possibly sorted) list
  *
  * Convert a resource's hash table of allowed nodes to a list. If printing to
  * stdout, sort the list, to keep action ID numbers consistent for regression
  * test output (while avoiding the performance hit on a live cluster).
  *
  * \param[in] rsc       Resource to check for allowed nodes
  * \param[in] data_set  Cluster working set
  *
  * \return List of resource's allowed nodes
  * \note Callers should take care not to rely on the list being sorted.
  */
 static GList *
 allowed_nodes_as_list(pe_resource_t *rsc, pe_working_set_t *data_set)
 {
     GList *allowed_nodes = NULL;
 
     if (rsc->allowed_nodes) {
         allowed_nodes = g_hash_table_get_values(rsc->allowed_nodes);
     }
 
     if (!pcmk__is_daemon) {
         allowed_nodes = g_list_sort(allowed_nodes, sort_node_uname);
     }
 
     return allowed_nodes;
 }
 
 void
 native_internal_constraints(pe_resource_t * rsc, pe_working_set_t * data_set)
 {
     /* This function is on the critical path and worth optimizing as much as possible */
 
     pe_resource_t *top = NULL;
     GList *allowed_nodes = NULL;
     bool check_unfencing = FALSE;
     bool check_utilization = false;
 
     if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
         pe_rsc_trace(rsc,
                      "Skipping native constraints for unmanaged resource: %s",
                      rsc->id);
         return;
     }
 
     top = uber_parent(rsc);
 
     // Whether resource requires unfencing
     check_unfencing = !pcmk_is_set(rsc->flags, pe_rsc_fence_device)
                       && pcmk_is_set(data_set->flags, pe_flag_enable_unfencing)
                       && pcmk_is_set(rsc->flags, pe_rsc_needs_unfencing);
 
     // Whether a non-default placement strategy is used
     check_utilization = (g_hash_table_size(rsc->utilization) > 0)
                          && !pcmk__str_eq(data_set->placement_strategy,
                                           "default", pcmk__str_casei);
 
     // Order stops before starts (i.e. restart)
     pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
                        rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL,
                        pe_order_optional|pe_order_implies_then|pe_order_restart,
                        data_set);
 
     // Promotable ordering: demote before stop, start before promote
     if (pcmk_is_set(top->flags, pe_rsc_promotable)
         || (rsc->role > RSC_ROLE_UNPROMOTED)) {
 
         pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_DEMOTE, 0), NULL,
                            rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
                            pe_order_promoted_implies_first, data_set);
 
         pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL,
                            rsc, pcmk__op_key(rsc->id, RSC_PROMOTE, 0), NULL,
                            pe_order_runnable_left, data_set);
     }
 
     // Don't clear resource history if probing on same node
     pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, CRM_OP_LRM_DELETE, 0),
                        NULL, rsc, pcmk__op_key(rsc->id, RSC_STATUS, 0),
                        NULL, pe_order_same_node|pe_order_then_cancels_first,
                        data_set);
 
     // Certain checks need allowed nodes
     if (check_unfencing || check_utilization || rsc->container) {
         allowed_nodes = allowed_nodes_as_list(rsc, data_set);
     }
 
     if (check_unfencing) {
         /* Check if the node needs to be unfenced first */
 
         for (GList *item = allowed_nodes; item; item = item->next) {
             pe_node_t *node = item->data;
             pe_action_t *unfence = pe_fence_op(node, "on", TRUE, NULL, FALSE, data_set);
 
             crm_debug("Ordering any stops of %s before %s, and any starts after",
                       rsc->id, unfence->uuid);
 
             /*
              * It would be more efficient to order clone resources once,
              * rather than order each instance, but ordering the instance
              * allows us to avoid unnecessary dependencies that might conflict
              * with user constraints.
              *
              * @TODO: This constraint can still produce a transition loop if the
              * resource has a stop scheduled on the node being unfenced, and
              * there is a user ordering constraint to start some other resource
              * (which will be ordered after the unfence) before stopping this
              * resource. An example is "start some slow-starting cloned service
              * before stopping an associated virtual IP that may be moving to
              * it":
              *       stop this -> unfencing -> start that -> stop this
              */
             pcmk__new_ordering(rsc, stop_key(rsc), NULL,
                                NULL, strdup(unfence->uuid), unfence,
                                pe_order_optional|pe_order_same_node, data_set);
 
             pcmk__new_ordering(NULL, strdup(unfence->uuid), unfence,
                                rsc, start_key(rsc), NULL,
                                pe_order_implies_then_on_node|pe_order_same_node,
                                data_set);
         }
     }
 
     if (check_utilization) {
         pcmk__create_utilization_constraints(rsc, allowed_nodes);
     }
 
     if (rsc->container) {
         pe_resource_t *remote_rsc = NULL;
 
         if (rsc->is_remote_node) {
             // rsc is the implicit remote connection for a guest or bundle node
 
             /* Do not allow a guest resource to live on a Pacemaker Remote node,
              * to avoid nesting remotes. However, allow bundles to run on remote
              * nodes.
              */
             if (!pcmk_is_set(rsc->flags, pe_rsc_allow_remote_remotes)) {
                 rsc_avoids_remote_nodes(rsc->container);
             }
 
             /* If someone cleans up a guest or bundle node's container, we will
              * likely schedule a (re-)probe of the container and recovery of the
              * connection. Order the connection stop after the container probe,
              * so that if we detect the container running, we will trigger a new
              * transition and avoid the unnecessary recovery.
              */
             pcmk__order_resource_actions(rsc->container, RSC_STATUS, rsc,
                                          RSC_STOP, pe_order_optional, data_set);
 
         /* A user can specify that a resource must start on a Pacemaker Remote
          * node by explicitly configuring it with the container=NODENAME
          * meta-attribute. This is of questionable merit, since location
          * constraints can accomplish the same thing. But we support it, so here
          * we check whether a resource (that is not itself a remote connection)
          * has container set to a remote node or guest node resource.
          */
         } else if (rsc->container->is_remote_node) {
             remote_rsc = rsc->container;
         } else  {
             remote_rsc = pe__resource_contains_guest_node(data_set,
                                                           rsc->container);
         }
 
         if (remote_rsc) {
             /* Force the resource on the Pacemaker Remote node instead of
              * colocating the resource with the container resource.
              */
             for (GList *item = allowed_nodes; item; item = item->next) {
                 pe_node_t *node = item->data;
 
                 if (node->details->remote_rsc != remote_rsc) {
                     node->weight = -INFINITY;
                 }
             }
 
         } else {
             /* This resource is either a filler for a container that does NOT
              * represent a Pacemaker Remote node, or a Pacemaker Remote
              * connection resource for a guest node or bundle.
              */
             int score;
 
             crm_trace("Order and colocate %s relative to its container %s",
                       rsc->id, rsc->container->id);
 
             pcmk__new_ordering(rsc->container,
                                pcmk__op_key(rsc->container->id, RSC_START, 0),
                                NULL, rsc, pcmk__op_key(rsc->id, RSC_START, 0),
                                NULL,
                                pe_order_implies_then|pe_order_runnable_left,
                                data_set);
 
             pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
                                rsc->container,
                                pcmk__op_key(rsc->container->id, RSC_STOP, 0),
                                NULL, pe_order_implies_first, data_set);
 
             if (pcmk_is_set(rsc->flags, pe_rsc_allow_remote_remotes)) {
                 score = 10000;    /* Highly preferred but not essential */
             } else {
                 score = INFINITY; /* Force them to run on the same host */
             }
             pcmk__new_colocation("resource-with-container", NULL, score, rsc,
                                  rsc->container, NULL, NULL, true, data_set);
         }
     }
 
     if (rsc->is_remote_node || pcmk_is_set(rsc->flags, pe_rsc_fence_device)) {
         /* don't allow remote nodes to run stonith devices
          * or remote connection resources.*/
         rsc_avoids_remote_nodes(rsc);
     }
     g_list_free(allowed_nodes);
 }
 
 void
 native_rsc_colocation_lh(pe_resource_t *dependent, pe_resource_t *primary,
                          pcmk__colocation_t *constraint,
                          pe_working_set_t *data_set)
 {
     if (dependent == NULL) {
         pe_err("dependent was NULL for %s", constraint->id);
         return;
 
     } else if (constraint->primary == NULL) {
         pe_err("primary was NULL for %s", constraint->id);
         return;
     }
 
     pe_rsc_trace(dependent,
                  "Processing colocation constraint between %s and %s",
                  dependent->id, primary->id);
 
     primary->cmds->rsc_colocation_rh(dependent, primary, constraint, data_set);
 }
 
 void
 native_rsc_colocation_rh(pe_resource_t *dependent, pe_resource_t *primary,
                          pcmk__colocation_t *constraint,
                          pe_working_set_t *data_set)
 {
     enum pcmk__coloc_affects filter_results;
 
     CRM_ASSERT((dependent != NULL) && (primary != NULL));
     filter_results = pcmk__colocation_affects(dependent, primary, constraint,
                                               false);
     pe_rsc_trace(dependent, "%s %s with %s (%s, score=%d, filter=%d)",
                  ((constraint->score > 0)? "Colocating" : "Anti-colocating"),
                  dependent->id, primary->id, constraint->id, constraint->score,
                  filter_results);
 
     switch (filter_results) {
         case pcmk__coloc_affects_role:
             pcmk__apply_coloc_to_priority(dependent, primary, constraint);
             break;
         case pcmk__coloc_affects_location:
             pcmk__apply_coloc_to_weights(dependent, primary, constraint);
             break;
         case pcmk__coloc_affects_nothing:
         default:
             return;
     }
 }
 
 enum pe_action_flags
 native_action_flags(pe_action_t * action, pe_node_t * node)
 {
     return action->flags;
 }
 
 static inline bool
 is_primitive_action(pe_action_t *action)
 {
     return action && action->rsc && (action->rsc->variant == pe_native);
 }
 
 /*!
  * \internal
  * \brief Clear a single action flag and set reason text
  *
  * \param[in] action  Action whose flag should be cleared
  * \param[in] flag    Action flag that should be cleared
  * \param[in] reason  Action that is the reason why flag is being cleared
  */
 #define clear_action_flag_because(action, flag, reason) do {                \
         if (pcmk_is_set((action)->flags, (flag))) {                         \
             pe__clear_action_flags(action, flag);                           \
             if ((action)->rsc != (reason)->rsc) {                           \
                 char *reason_text = pe__action2reason((reason), (flag));    \
                 pe_action_set_reason((action), reason_text,                 \
                                    ((flag) == pe_action_migrate_runnable)); \
                 free(reason_text);                                          \
             }                                                               \
         }                                                                   \
     } while (0)
 
 /*!
  * \internal
  * \brief Set action bits appropriately when pe_restart_order is used
  *
  * \param[in] first   'First' action in an ordering with pe_restart_order
  * \param[in] then    'Then' action in an ordering with pe_restart_order
  * \param[in] filter  What ordering flags to care about
  *
  * \note pe_restart_order is set for "stop resource before starting it" and
  *       "stop later group member before stopping earlier group member"
  */
 static void
 handle_restart_ordering(pe_action_t *first, pe_action_t *then,
                         enum pe_action_flags filter)
 {
     const char *reason = NULL;
 
     CRM_ASSERT(is_primitive_action(first));
     CRM_ASSERT(is_primitive_action(then));
 
     // We need to update the action in two cases:
 
     // ... if 'then' is required
     if (pcmk_is_set(filter, pe_action_optional)
         && !pcmk_is_set(then->flags, pe_action_optional)) {
         reason = "restart";
     }
 
     /* ... if 'then' is unrunnable action on same resource (if a resource
      * should restart but can't start, we still want to stop)
      */
     if (pcmk_is_set(filter, pe_action_runnable)
         && !pcmk_is_set(then->flags, pe_action_runnable)
         && pcmk_is_set(then->rsc->flags, pe_rsc_managed)
         && (first->rsc == then->rsc)) {
         reason = "stop";
     }
 
     if (reason == NULL) {
         return;
     }
 
     pe_rsc_trace(first->rsc, "Handling %s -> %s for %s",
                  first->uuid, then->uuid, reason);
 
     // Make 'first' required if it is runnable
     if (pcmk_is_set(first->flags, pe_action_runnable)) {
         clear_action_flag_because(first, pe_action_optional, then);
     }
 
     // Make 'first' required if 'then' is required
     if (!pcmk_is_set(then->flags, pe_action_optional)) {
         clear_action_flag_because(first, pe_action_optional, then);
     }
 
     // Make 'first' unmigratable if 'then' is unmigratable
     if (!pcmk_is_set(then->flags, pe_action_migrate_runnable)) {
         clear_action_flag_because(first, pe_action_migrate_runnable, then);
     }
 
     // Make 'then' unrunnable if 'first' is required but unrunnable
     if (!pcmk_is_set(first->flags, pe_action_optional)
         && !pcmk_is_set(first->flags, pe_action_runnable)) {
         clear_action_flag_because(then, pe_action_runnable, first);
     }
 }
 
 /* \param[in] flags   Flags from action_flags_for_ordering()
  */
 enum pe_graph_flags
 native_update_actions(pe_action_t *first, pe_action_t *then, pe_node_t *node,
                       enum pe_action_flags flags, enum pe_action_flags filter,
                       enum pe_ordering type, pe_working_set_t *data_set)
 {
     enum pe_graph_flags changed = pe_graph_none;
     enum pe_action_flags then_flags = then->flags;
     enum pe_action_flags first_flags = first->flags;
 
     if (type & pe_order_asymmetrical) {
         pe_resource_t *then_rsc = then->rsc;
         enum rsc_role_e then_rsc_role = then_rsc ? then_rsc->fns->state(then_rsc, TRUE) : 0;
 
         if (!then_rsc) {
             /* ignore */
         } else if ((then_rsc_role == RSC_ROLE_STOPPED) && pcmk__str_eq(then->task, RSC_STOP, pcmk__str_casei)) {
             /* ignore... if 'then' is supposed to be stopped after 'first', but
              * then is already stopped, there is nothing to be done when non-symmetrical.  */
         } else if ((then_rsc_role >= RSC_ROLE_STARTED)
                    && pcmk__str_eq(then->task, RSC_START, pcmk__str_casei)
                    && pcmk_is_set(then->flags, pe_action_optional)
                    && then->node
                    && pcmk__list_of_1(then_rsc->running_on)
                    && then->node->details == ((pe_node_t *) then_rsc->running_on->data)->details) {
             /* Ignore. If 'then' is supposed to be started after 'first', but
              * 'then' is already started, there is nothing to be done when
              * asymmetrical -- unless the start is mandatory, which indicates
              * the resource is restarting, and the ordering is still needed.
              */
         } else if (!(first->flags & pe_action_runnable)) {
             /* prevent 'then' action from happening if 'first' is not runnable and
              * 'then' has not yet occurred. */
             clear_action_flag_because(then, pe_action_optional, first);
             clear_action_flag_because(then, pe_action_runnable, first);
         } else {
             /* ignore... then is allowed to start/stop if it wants to. */
         }
     }
 
     if (pcmk_is_set(type, pe_order_implies_first)
         && !pcmk_is_set(then_flags, pe_action_optional)) {
         // Then is required, and implies first should be, too
 
         if (pcmk_is_set(filter, pe_action_optional)
             && !pcmk_is_set(flags, pe_action_optional)
             && pcmk_is_set(first_flags, pe_action_optional)) {
             clear_action_flag_because(first, pe_action_optional, then);
         }
 
         if (pcmk_is_set(flags, pe_action_migrate_runnable) &&
             !pcmk_is_set(then->flags, pe_action_migrate_runnable)) {
             clear_action_flag_because(first, pe_action_migrate_runnable, then);
         }
     }
 
     if (type & pe_order_promoted_implies_first) {
         if ((filter & pe_action_optional) &&
             ((then->flags & pe_action_optional) == FALSE) &&
             (then->rsc != NULL) && (then->rsc->role == RSC_ROLE_PROMOTED)) {
 
             clear_action_flag_because(first, pe_action_optional, then);
 
             if (pcmk_is_set(first->flags, pe_action_migrate_runnable) &&
                 !pcmk_is_set(then->flags, pe_action_migrate_runnable)) {
                 clear_action_flag_because(first, pe_action_migrate_runnable,
                                           then);
             }
         }
     }
 
     if ((type & pe_order_implies_first_migratable)
         && pcmk_is_set(filter, pe_action_optional)) {
 
         if (((then->flags & pe_action_migrate_runnable) == FALSE) ||
             ((then->flags & pe_action_runnable) == FALSE)) {
             clear_action_flag_because(first, pe_action_runnable, then);
         }
 
         if ((then->flags & pe_action_optional) == 0) {
             clear_action_flag_because(first, pe_action_optional, then);
         }
     }
 
     if ((type & pe_order_pseudo_left)
         && pcmk_is_set(filter, pe_action_optional)) {
 
         if ((first->flags & pe_action_runnable) == FALSE) {
             clear_action_flag_because(then, pe_action_migrate_runnable, first);
             pe__clear_action_flags(then, pe_action_pseudo);
         }
     }
 
     if (pcmk_is_set(type, pe_order_runnable_left)
         && pcmk_is_set(filter, pe_action_runnable)
         && pcmk_is_set(then->flags, pe_action_runnable)
         && !pcmk_is_set(flags, pe_action_runnable)) {
 
         clear_action_flag_because(then, pe_action_runnable, first);
         clear_action_flag_because(then, pe_action_migrate_runnable, first);
     }
 
     if (pcmk_is_set(type, pe_order_implies_then)
         && pcmk_is_set(filter, pe_action_optional)
         && pcmk_is_set(then->flags, pe_action_optional)
         && !pcmk_is_set(flags, pe_action_optional)
         && !pcmk_is_set(first->flags, pe_action_migrate_runnable)) {
 
         clear_action_flag_because(then, pe_action_optional, first);
     }
 
     if (pcmk_is_set(type, pe_order_restart)) {
         handle_restart_ordering(first, then, filter);
     }
 
     if (then_flags != then->flags) {
         pe__set_graph_flags(changed, first, pe_graph_updated_then);
         pe_rsc_trace(then->rsc,
                      "%s on %s: flags are now %#.6x (was %#.6x) "
                      "because of 'first' %s (%#.6x)",
                      then->uuid,
                      then->node? then->node->details->uname : "no node",
                      then->flags, then_flags, first->uuid, first->flags);
 
         if(then->rsc && then->rsc->parent) {
             /* "X_stop then X_start" doesn't get handled for cloned groups unless we do this */
             pcmk__update_action_for_orderings(then, data_set);
         }
     }
 
     if (first_flags != first->flags) {
         pe__set_graph_flags(changed, first, pe_graph_updated_first);
         pe_rsc_trace(first->rsc,
                      "%s on %s: flags are now %#.6x (was %#.6x) "
                      "because of 'then' %s (%#.6x)",
                      first->uuid,
                      first->node? first->node->details->uname : "no node",
                      first->flags, first_flags, then->uuid, then->flags);
     }
 
     return changed;
 }
 
 void
 native_rsc_location(pe_resource_t *rsc, pe__location_t *constraint)
 {
     pcmk__apply_location(constraint, rsc);
 }
 
 void
 native_expand(pe_resource_t * rsc, pe_working_set_t * data_set)
 {
     GList *gIter = NULL;
 
     CRM_ASSERT(rsc);
     pe_rsc_trace(rsc, "Processing actions from %s", rsc->id);
 
     for (gIter = rsc->actions; gIter != NULL; gIter = gIter->next) {
         pe_action_t *action = (pe_action_t *) gIter->data;
 
         crm_trace("processing action %d for rsc=%s", action->id, rsc->id);
         pcmk__add_action_to_graph(action, data_set);
     }
 
     for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         child_rsc->cmds->expand(child_rsc, data_set);
     }
 }
 
 /*!
  * \internal
  * \brief Check whether a node is a multiply active resource's expected node
  *
  * \param[in] rsc  Resource to check
  * \param[in] node  Node to check
  *
  * \return true if \p rsc is multiply active with multiple-active set to
  *         stop_unexpected, and \p node is the node where it will remain active
  * \note This assumes that the resource's next role cannot be changed to stopped
  *       after this is called, which should be reasonable if status has already
  *       been unpacked and resources have been assigned to nodes.
  */
 static bool
 is_expected_node(const pe_resource_t *rsc, const pe_node_t *node)
 {
     return pcmk_all_flags_set(rsc->flags,
                               pe_rsc_stop_unexpected|pe_rsc_restarting)
            && (rsc->next_role > RSC_ROLE_STOPPED)
            && (rsc->allocated_to != NULL) && (node != NULL)
            && (rsc->allocated_to->details == node->details);
 }
 
 gboolean
 StopRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
 {
     GList *gIter = NULL;
 
     CRM_ASSERT(rsc);
 
     for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
         pe_node_t *current = (pe_node_t *) gIter->data;
         pe_action_t *stop;
 
         if (is_expected_node(rsc, current)) {
             /* We are scheduling restart actions for a multiply active resource
              * with multiple-active=stop_unexpected, and this is where it should
              * not be stopped.
              */
             pe_rsc_trace(rsc,
                          "Skipping stop of multiply active resource %s "
                          "on expected node %s",
                          rsc->id, current->details->uname);
             continue;
         }
 
         if (rsc->partial_migration_target) {
             if (rsc->partial_migration_target->details == current->details
                 // Only if the allocated node still is the migration target.
                 && rsc->allocated_to
                 && rsc->allocated_to->details == rsc->partial_migration_target->details) {
                 pe_rsc_trace(rsc,
                              "Skipping stop of %s on %s "
                              "because migration to %s in progress",
                              rsc->id, current->details->uname,
                              next->details->uname);
                 continue;
             } else {
                 pe_rsc_trace(rsc,
                              "Forcing stop of %s on %s "
                              "because migration target changed",
                              rsc->id, current->details->uname);
                 optional = FALSE;
             }
         }
 
         pe_rsc_trace(rsc, "Scheduling stop of %s on %s",
                      rsc->id, current->details->uname);
         stop = stop_action(rsc, current, optional);
 
         if(rsc->allocated_to == NULL) {
             pe_action_set_reason(stop, "node availability", TRUE);
         } else if (pcmk_all_flags_set(rsc->flags, pe_rsc_restarting
                                                   |pe_rsc_stop_unexpected)) {
             /* We are stopping a multiply active resource on a node that is
              * not its expected node, and we are still scheduling restart
              * actions, so the stop is for being multiply active.
              */
             pe_action_set_reason(stop, "being multiply active", TRUE);
         }
 
         if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
             pe__clear_action_flags(stop, pe_action_runnable);
         }
 
         if (pcmk_is_set(data_set->flags, pe_flag_remove_after_stop)) {
             DeleteRsc(rsc, current, optional, data_set);
         }
 
         if (pcmk_is_set(rsc->flags, pe_rsc_needs_unfencing)) {
             pe_action_t *unfence = pe_fence_op(current, "on", TRUE, NULL, FALSE, data_set);
 
             order_actions(stop, unfence, pe_order_implies_first);
             if (!pcmk__node_unfenced(current)) {
                 pe_proc_err("Stopping %s until %s can be unfenced", rsc->id, current->details->uname);
             }
         }
     }
 
     return TRUE;
 }
 
 gboolean
 StartRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
 {
     pe_action_t *start = NULL;
 
     CRM_ASSERT(rsc);
 
     pe_rsc_trace(rsc, "Scheduling %s start of %s on %s (weight=%d)",
                  (optional? "optional" : "required"), rsc->id,
                  ((next == NULL)? "N/A" : next->details->uname),
                  ((next == NULL)? 0 : next->weight));
     start = start_action(rsc, next, TRUE);
 
     pcmk__order_vs_unfence(rsc, next, start, pe_order_implies_then, data_set);
 
     if (pcmk_is_set(start->flags, pe_action_runnable) && !optional) {
         pe__clear_action_flags(start, pe_action_optional);
     }
 
     if (is_expected_node(rsc, next)) {
         /* This could be a problem if the start becomes necessary for other
          * reasons later.
          */
         pe_rsc_trace(rsc,
                      "Start of multiply active resouce %s "
                      "on expected node %s will be a pseudo-action",
                      rsc->id, next->details->uname);
         pe__set_action_flags(start, pe_action_pseudo);
     }
 
     return TRUE;
 }
 
 gboolean
 PromoteRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
 {
     GList *gIter = NULL;
     gboolean runnable = TRUE;
     GList *action_list = NULL;
 
     CRM_ASSERT(rsc);
     CRM_CHECK(next != NULL, return FALSE);
 
     pe_rsc_trace(rsc, "%s on %s", rsc->id, next->details->uname);
 
     action_list = pe__resource_actions(rsc, next, RSC_START, TRUE);
 
     for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
         pe_action_t *start = (pe_action_t *) gIter->data;
 
         if (!pcmk_is_set(start->flags, pe_action_runnable)) {
             runnable = FALSE;
         }
     }
     g_list_free(action_list);
 
     if (runnable) {
         pe_action_t *promote = promote_action(rsc, next, optional);
 
         if (is_expected_node(rsc, next)) {
             /* This could be a problem if the promote becomes necessary for
              * other reasons later.
              */
             pe_rsc_trace(rsc,
                          "Promotion of multiply active resouce %s "
                          "on expected node %s will be a pseudo-action",
                          rsc->id, next->details->uname);
             pe__set_action_flags(promote, pe_action_pseudo);
         }
 
         return TRUE;
     }
 
     pe_rsc_debug(rsc, "%s\tPromote %s (canceled)", next->details->uname, rsc->id);
 
     action_list = pe__resource_actions(rsc, next, RSC_PROMOTE, TRUE);
 
     for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
         pe_action_t *promote = (pe_action_t *) gIter->data;
 
         pe__clear_action_flags(promote, pe_action_runnable);
     }
 
     g_list_free(action_list);
     return TRUE;
 }
 
 gboolean
 DemoteRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
 {
     GList *gIter = NULL;
 
     CRM_ASSERT(rsc);
 
     if (is_expected_node(rsc, next)) {
         pe_rsc_trace(rsc,
                      "Skipping demote of multiply active resource %s "
                      "on expected node %s",
                      rsc->id, next->details->uname);
         return TRUE;
     }
 
     pe_rsc_trace(rsc, "%s", rsc->id);
 
     /* CRM_CHECK(rsc->next_role == RSC_ROLE_UNPROMOTED, return FALSE); */
     for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
         pe_node_t *current = (pe_node_t *) gIter->data;
 
         pe_rsc_trace(rsc, "%s on %s", rsc->id, next ? next->details->uname : "N/A");
         demote_action(rsc, current, optional);
     }
     return TRUE;
 }
 
 gboolean
 RoleError(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
 {
     CRM_ASSERT(rsc);
     crm_err("%s on %s", rsc->id, next ? next->details->uname : "N/A");
     CRM_CHECK(FALSE, return FALSE);
     return FALSE;
 }
 
 gboolean
 NullOp(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
 {
     CRM_ASSERT(rsc);
     pe_rsc_trace(rsc, "%s", rsc->id);
     return FALSE;
 }
 
 gboolean
 DeleteRsc(pe_resource_t * rsc, pe_node_t * node, gboolean optional, pe_working_set_t * data_set)
 {
     if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
         pe_rsc_trace(rsc, "Resource %s not deleted from %s: failed", rsc->id, node->details->uname);
         return FALSE;
 
     } else if (node == NULL) {
         pe_rsc_trace(rsc, "Resource %s not deleted: NULL node", rsc->id);
         return FALSE;
 
     } else if (node->details->unclean || node->details->online == FALSE) {
         pe_rsc_trace(rsc, "Resource %s not deleted from %s: unrunnable", rsc->id,
                      node->details->uname);
         return FALSE;
     }
 
     crm_notice("Removing %s from %s", rsc->id, node->details->uname);
 
     delete_action(rsc, node, optional);
 
     pcmk__order_resource_actions(rsc, RSC_STOP, rsc, RSC_DELETE,
                                  optional? pe_order_implies_then : pe_order_optional,
                                  data_set);
 
     pcmk__order_resource_actions(rsc, RSC_DELETE, rsc, RSC_START,
                                  optional? pe_order_implies_then : pe_order_optional,
                                  data_set);
 
     return TRUE;
 }
 
 gboolean
 native_create_probe(pe_resource_t * rsc, pe_node_t * node, pe_action_t * complete,
                     gboolean force, pe_working_set_t * data_set)
 {
     enum pe_ordering flags = pe_order_optional;
     char *key = NULL;
     pe_action_t *probe = NULL;
     pe_node_t *running = NULL;
     pe_node_t *allowed = NULL;
     pe_resource_t *top = uber_parent(rsc);
 
     static const char *rc_promoted = NULL;
     static const char *rc_inactive = NULL;
 
     if (rc_inactive == NULL) {
         rc_inactive = pcmk__itoa(PCMK_OCF_NOT_RUNNING);
         rc_promoted = pcmk__itoa(PCMK_OCF_RUNNING_PROMOTED);
     }
 
     CRM_CHECK(node != NULL, return FALSE);
     if (!force && !pcmk_is_set(data_set->flags, pe_flag_startup_probes)) {
         pe_rsc_trace(rsc, "Skipping active resource detection for %s", rsc->id);
         return FALSE;
     }
 
     if (pe__is_guest_or_remote_node(node)) {
         const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
 
         if (pcmk__str_eq(class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) {
             pe_rsc_trace(rsc,
                          "Skipping probe for %s on %s because Pacemaker Remote nodes cannot run stonith agents",
                          rsc->id, node->details->id);
             return FALSE;
         } else if (pe__is_guest_node(node)
                    && pe__resource_contains_guest_node(data_set, rsc)) {
             pe_rsc_trace(rsc,
                          "Skipping probe for %s on %s because guest nodes cannot run resources containing guest nodes",
                          rsc->id, node->details->id);
             return FALSE;
         } else if (rsc->is_remote_node) {
             pe_rsc_trace(rsc,
                          "Skipping probe for %s on %s because Pacemaker Remote nodes cannot host remote connections",
                          rsc->id, node->details->id);
             return FALSE;
         }
     }
 
     if (rsc->children) {
         GList *gIter = NULL;
         gboolean any_created = FALSE;
 
         for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
             pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
             any_created = child_rsc->cmds->create_probe(child_rsc, node, complete, force, data_set)
                 || any_created;
         }
 
         return any_created;
 
     } else if ((rsc->container) && (!rsc->is_remote_node)) {
         pe_rsc_trace(rsc, "Skipping %s: it is within container %s", rsc->id, rsc->container->id);
         return FALSE;
     }
 
     if (pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
         pe_rsc_trace(rsc, "Skipping orphan: %s", rsc->id);
         return FALSE;
     }
 
     // Check whether resource is already known on node
     if (!force && g_hash_table_lookup(rsc->known_on, node->details->id)) {
         pe_rsc_trace(rsc, "Skipping known: %s on %s", rsc->id, node->details->uname);
         return FALSE;
     }
 
     allowed = g_hash_table_lookup(rsc->allowed_nodes, node->details->id);
 
     if (rsc->exclusive_discover || top->exclusive_discover) {
         if (allowed == NULL) {
             /* exclusive discover is enabled and this node is not in the allowed list. */    
             pe_rsc_trace(rsc, "Skipping probe for %s on node %s, A", rsc->id, node->details->id);
             return FALSE;
         } else if (allowed->rsc_discover_mode != pe_discover_exclusive) {
             /* exclusive discover is enabled and this node is not marked
              * as a node this resource should be discovered on */ 
             pe_rsc_trace(rsc, "Skipping probe for %s on node %s, B", rsc->id, node->details->id);
             return FALSE;
         }
     }
 
     if(allowed == NULL && node->rsc_discover_mode == pe_discover_never) {
         /* If this node was allowed to host this resource it would
          * have been explicitly added to the 'allowed_nodes' list.
          * However it wasn't and the node has discovery disabled, so
          * no need to probe for this resource.
          */
         pe_rsc_trace(rsc, "Skipping probe for %s on node %s, C", rsc->id, node->details->id);
         return FALSE;
     }
 
     if (allowed && allowed->rsc_discover_mode == pe_discover_never) {
         /* this resource is marked as not needing to be discovered on this node */
         pe_rsc_trace(rsc, "Skipping probe for %s on node %s, discovery mode", rsc->id, node->details->id);
         return FALSE;
     }
 
     if (pe__is_guest_node(node)) {
         pe_resource_t *remote = node->details->remote_rsc->container;
 
         if(remote->role == RSC_ROLE_STOPPED) {
             /* If the container is stopped, then we know anything that
              * might have been inside it is also stopped and there is
              * no need to probe.
              *
              * If we don't know the container's state on the target
              * either:
              *
              * - the container is running, the transition will abort
              *   and we'll end up in a different case next time, or
              *
              * - the container is stopped
              *
              * Either way there is no need to probe.
              *
              */
             if(remote->allocated_to
                && g_hash_table_lookup(remote->known_on, remote->allocated_to->details->id) == NULL) {
                 /* For safety, we order the 'rsc' start after 'remote'
                  * has been probed.
                  *
                  * Using 'top' helps for groups, but we may need to
                  * follow the start's ordering chain backwards.
                  */
                 pcmk__new_ordering(remote,
                                    pcmk__op_key(remote->id, RSC_STATUS, 0),
                                    NULL, top,
                                    pcmk__op_key(top->id, RSC_START, 0), NULL,
                                    pe_order_optional, data_set);
             }
             pe_rsc_trace(rsc, "Skipping probe for %s on node %s, %s is stopped",
                          rsc->id, node->details->id, remote->id);
             return FALSE;
 
             /* Here we really we want to check if remote->stop is required,
              * but that information doesn't exist yet
              */
         } else if(node->details->remote_requires_reset
                   || node->details->unclean
                   || pcmk_is_set(remote->flags, pe_rsc_failed)
                   || remote->next_role == RSC_ROLE_STOPPED
                   || (remote->allocated_to
                       && pe_find_node(remote->running_on, remote->allocated_to->details->uname) == NULL)
             ) {
             /* The container is stopping or restarting, don't start
              * 'rsc' until 'remote' stops as this also implies that
              * 'rsc' is stopped - avoiding the need to probe
              */
             pcmk__new_ordering(remote, pcmk__op_key(remote->id, RSC_STOP, 0),
                                NULL, top, pcmk__op_key(top->id, RSC_START, 0),
                                NULL, pe_order_optional, data_set);
         pe_rsc_trace(rsc, "Skipping probe for %s on node %s, %s is stopping, restarting or moving",
                      rsc->id, node->details->id, remote->id);
             return FALSE;
 /*      } else {
  *            The container is running so there is no problem probing it
  */
         }
     }
 
     key = pcmk__op_key(rsc->id, RSC_STATUS, 0);
     probe = custom_action(rsc, key, RSC_STATUS, node, FALSE, TRUE, data_set);
     pe__clear_action_flags(probe, pe_action_optional);
 
     pcmk__order_vs_unfence(rsc, node, probe, pe_order_optional, data_set);
 
     /*
      * We need to know if it's running_on (not just known_on) this node
      * to correctly determine the target rc.
      */
     running = pe_find_node_id(rsc->running_on, node->details->id);
     if (running == NULL) {
         add_hash_param(probe->meta, XML_ATTR_TE_TARGET_RC, rc_inactive);
 
     } else if (rsc->role == RSC_ROLE_PROMOTED) {
         add_hash_param(probe->meta, XML_ATTR_TE_TARGET_RC, rc_promoted);
     }
 
     crm_debug("Probing %s on %s (%s) %d %p", rsc->id, node->details->uname, role2text(rsc->role),
               pcmk_is_set(probe->flags, pe_action_runnable), rsc->running_on);
 
-    if (pcmk__is_unfence_device(rsc, data_set) || !pe_rsc_is_clone(top)) {
+    if ((pcmk_is_set(rsc->flags, pe_rsc_fence_device)
+         && pcmk_is_set(data_set->flags, pe_flag_enable_unfencing))
+        || !pe_rsc_is_clone(top)) {
         top = rsc;
     } else {
         crm_trace("Probing %s on %s (%s) as %s", rsc->id, node->details->uname, role2text(rsc->role), top->id);
     }
 
     if (!pcmk_is_set(probe->flags, pe_action_runnable)
         && (rsc->running_on == NULL)) {
         /* Prevent the start from occurring if rsc isn't active, but
          * don't cause it to stop if it was active already
          */
         pe__set_order_flags(flags, pe_order_runnable_left);
     }
 
     pcmk__new_ordering(rsc, NULL, probe, top,
                        pcmk__op_key(top->id, RSC_START, 0), NULL, flags,
                        data_set);
 
     // Order the probe before any agent reload
     pcmk__new_ordering(rsc, NULL, probe, top, reload_key(rsc), NULL,
                        pe_order_optional, data_set);
 
     return TRUE;
 }
 
 void
 native_append_meta(pe_resource_t * rsc, xmlNode * xml)
 {
     char *value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION);
     pe_resource_t *parent;
 
     if (value) {
         char *name = NULL;
 
         name = crm_meta_name(XML_RSC_ATTR_INCARNATION);
         crm_xml_add(xml, name, value);
         free(name);
     }
 
     value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_REMOTE_NODE);
     if (value) {
         char *name = NULL;
 
         name = crm_meta_name(XML_RSC_ATTR_REMOTE_NODE);
         crm_xml_add(xml, name, value);
         free(name);
     }
 
     for (parent = rsc; parent != NULL; parent = parent->parent) {
         if (parent->container) {
             crm_xml_add(xml, CRM_META"_"XML_RSC_ATTR_CONTAINER, parent->container->id);
         }
     }
 }
 
 // Primitive implementation of resource_alloc_functions_t:add_utilization()
 void
 pcmk__primitive_add_utilization(pe_resource_t *rsc, pe_resource_t *orig_rsc,
                                 GList *all_rscs, GHashTable *utilization)
 {
     if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
         return;
     }
 
     pe_rsc_trace(orig_rsc, "%s: Adding primitive %s as colocated utilization",
                  orig_rsc->id, rsc->id);
     pcmk__release_node_capacity(utilization, rsc);
 }
 
 /*!
  * \internal
  * \brief Get epoch time of node's shutdown attribute (or now if none)
  *
  * \param[in] node      Node to check
  * \param[in] data_set  Cluster working set
  *
  * \return Epoch time corresponding to shutdown attribute if set or now if not
  */
 static time_t
 shutdown_time(pe_node_t *node, pe_working_set_t *data_set)
 {
     const char *shutdown = pe_node_attribute_raw(node, XML_CIB_ATTR_SHUTDOWN);
     time_t result = 0;
 
     if (shutdown != NULL) {
         long long result_ll;
 
         if (pcmk__scan_ll(shutdown, &result_ll, 0LL) == pcmk_rc_ok) {
             result = (time_t) result_ll;
         }
     }
     return (result == 0)? get_effective_time(data_set) : result;
 }
 
 // Primitive implementation of resource_alloc_functions_t:shutdown_lock()
 void
 pcmk__primitive_shutdown_lock(pe_resource_t *rsc)
 {
     const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
 
     // Fence devices and remote connections can't be locked
     if (pcmk__str_eq(class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_null_matches)
         || pe__resource_is_remote_conn(rsc, rsc->cluster)) {
         return;
     }
 
     if (rsc->lock_node != NULL) {
         // The lock was obtained from resource history
 
         if (rsc->running_on != NULL) {
             /* The resource was started elsewhere even though it is now
              * considered locked. This shouldn't be possible, but as a
              * failsafe, we don't want to disturb the resource now.
              */
             pe_rsc_info(rsc,
                         "Cancelling shutdown lock because %s is already active",
                         rsc->id);
             pe__clear_resource_history(rsc, rsc->lock_node, rsc->cluster);
             rsc->lock_node = NULL;
             rsc->lock_time = 0;
         }
 
     // Only a resource active on exactly one node can be locked
     } else if (pcmk__list_of_1(rsc->running_on)) {
         pe_node_t *node = rsc->running_on->data;
 
         if (node->details->shutdown) {
             if (node->details->unclean) {
                 pe_rsc_debug(rsc, "Not locking %s to unclean %s for shutdown",
                              rsc->id, node->details->uname);
             } else {
                 rsc->lock_node = node;
                 rsc->lock_time = shutdown_time(node, rsc->cluster);
             }
         }
     }
 
     if (rsc->lock_node == NULL) {
         // No lock needed
         return;
     }
 
     if (rsc->cluster->shutdown_lock > 0) {
         time_t lock_expiration = rsc->lock_time + rsc->cluster->shutdown_lock;
 
         pe_rsc_info(rsc, "Locking %s to %s due to shutdown (expires @%lld)",
                     rsc->id, rsc->lock_node->details->uname,
                     (long long) lock_expiration);
         pe__update_recheck_time(++lock_expiration, rsc->cluster);
     } else {
         pe_rsc_info(rsc, "Locking %s to %s due to shutdown",
                     rsc->id, rsc->lock_node->details->uname);
     }
 
     // If resource is locked to one node, ban it from all other nodes
     for (GList *item = rsc->cluster->nodes; item != NULL; item = item->next) {
         pe_node_t *node = item->data;
 
         if (strcmp(node->details->uname, rsc->lock_node->details->uname)) {
             resource_location(rsc, node, -CRM_SCORE_INFINITY,
                               XML_CONFIG_ATTR_SHUTDOWN_LOCK, rsc->cluster);
         }
     }
 }
diff --git a/lib/pacemaker/pcmk_sched_nodes.c b/lib/pacemaker/pcmk_sched_nodes.c
index 1880327244..ceafaa419e 100644
--- a/lib/pacemaker/pcmk_sched_nodes.c
+++ b/lib/pacemaker/pcmk_sched_nodes.c
@@ -1,315 +1,340 @@
 /*
  * Copyright 2004-2022 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 #include <crm/msg_xml.h>
 #include <crm/lrmd.h>       // lrmd_event_data_t
 #include <crm/common/xml_internal.h>
 #include <pacemaker-internal.h>
 #include <pacemaker.h>
 #include "libpacemaker_private.h"
 
 /*!
  * \internal
  * \brief Check whether a node is available to run resources
  *
  * \param[in] node  Node to check
  *
  * \return true if node is online and not shutting down, unclean, or in standby
  *         or maintenance mode, otherwise false
  */
 bool
 pcmk__node_available(const pe_node_t *node)
 {
     // @TODO Should we add (node->weight >= 0)?
     return (node != NULL) && (node->details != NULL) && node->details->online
             && !node->details->shutdown && !node->details->unclean
             && !node->details->standby && !node->details->maintenance;
 }
 
 /*!
  * \internal
  * \brief Copy a hash table of node objects
  *
  * \param[in] nodes  Hash table to copy
  *
  * \return New copy of nodes (or NULL if nodes is NULL)
  */
 GHashTable *
 pcmk__copy_node_table(GHashTable *nodes)
 {
     GHashTable *new_table = NULL;
     GHashTableIter iter;
     pe_node_t *node = NULL;
 
     if (nodes == NULL) {
         return NULL;
     }
     new_table = pcmk__strkey_table(NULL, free);
     g_hash_table_iter_init(&iter, nodes);
     while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
         pe_node_t *new_node = pe__copy_node(node);
 
         g_hash_table_insert(new_table, (gpointer) new_node->details->id,
                             new_node);
     }
     return new_table;
 }
 
 /*!
  * \internal
  * \brief Copy a list of node objects
  *
  * \param[in] list   List to copy
  * \param[in] reset  Set copies' scores to 0
  *
  * \return New list of shallow copies of nodes in original list
  */
 GList *
 pcmk__copy_node_list(const GList *list, bool reset)
 {
     GList *result = NULL;
 
     for (const GList *gIter = list; gIter != NULL; gIter = gIter->next) {
         pe_node_t *new_node = NULL;
         pe_node_t *this_node = (pe_node_t *) gIter->data;
 
         new_node = pe__copy_node(this_node);
         if (reset) {
             new_node->weight = 0;
         }
         result = g_list_prepend(result, new_node);
     }
     return result;
 }
 
 struct node_weight_s {
     pe_node_t *active;
     pe_working_set_t *data_set;
 };
 
 /*!
  * \internal
  * \brief Compare two nodes for allocation desirability
  *
  * Given two nodes, check which one is more preferred by allocation criteria
  * such as node weight and utilization.
  *
  * \param[in] a     First node to compare
  * \param[in] b     Second node to compare
  * \param[in] data  Sort data (as struct node_weight_s *)
  *
  * \return -1 if \p a is preferred, +1 if \p b is preferred, or 0 if they are
  *         equally preferred
  */
 static gint
 compare_nodes(gconstpointer a, gconstpointer b, gpointer data)
 {
     const pe_node_t *node1 = (const pe_node_t *) a;
     const pe_node_t *node2 = (const pe_node_t *) b;
     struct node_weight_s *nw = data;
 
     int node1_weight = 0;
     int node2_weight = 0;
 
     int result = 0;
 
     if (a == NULL) {
         return 1;
     }
     if (b == NULL) {
         return -1;
     }
 
     // Compare node weights
 
     node1_weight = pcmk__node_available(node1)? node1->weight : -INFINITY;
     node2_weight = pcmk__node_available(node2)? node2->weight : -INFINITY;
 
     if (node1_weight > node2_weight) {
         crm_trace("%s (%d) > %s (%d) : weight",
                   node1->details->uname, node1_weight, node2->details->uname,
                   node2_weight);
         return -1;
     }
 
     if (node1_weight < node2_weight) {
         crm_trace("%s (%d) < %s (%d) : weight",
                   node1->details->uname, node1_weight, node2->details->uname,
                   node2_weight);
         return 1;
     }
 
     crm_trace("%s (%d) == %s (%d) : weight",
               node1->details->uname, node1_weight, node2->details->uname,
               node2_weight);
 
     // If appropriate, compare node utilization
 
     if (pcmk__str_eq(nw->data_set->placement_strategy, "minimal",
                      pcmk__str_casei)) {
         goto equal;
     }
 
     if (pcmk__str_eq(nw->data_set->placement_strategy, "balanced",
                      pcmk__str_casei)) {
         result = pcmk__compare_node_capacities(node1, node2);
         if (result < 0) {
             crm_trace("%s > %s : capacity (%d)",
                       node1->details->uname, node2->details->uname, result);
             return -1;
         } else if (result > 0) {
             crm_trace("%s < %s : capacity (%d)",
                       node1->details->uname, node2->details->uname, result);
             return 1;
         }
     }
 
     // Compare number of allocated resources
 
     if (node1->details->num_resources < node2->details->num_resources) {
         crm_trace("%s (%d) > %s (%d) : resources",
                   node1->details->uname, node1->details->num_resources,
                   node2->details->uname, node2->details->num_resources);
         return -1;
 
     } else if (node1->details->num_resources > node2->details->num_resources) {
         crm_trace("%s (%d) < %s (%d) : resources",
                   node1->details->uname, node1->details->num_resources,
                   node2->details->uname, node2->details->num_resources);
         return 1;
     }
 
     // Check whether one node is already running desired resource
 
     if (nw->active != NULL) {
         if (nw->active->details == node1->details) {
             crm_trace("%s (%d) > %s (%d) : active",
                       node1->details->uname, node1->details->num_resources,
                       node2->details->uname, node2->details->num_resources);
             return -1;
         } else if (nw->active->details == node2->details) {
             crm_trace("%s (%d) < %s (%d) : active",
                       node1->details->uname, node1->details->num_resources,
                       node2->details->uname, node2->details->num_resources);
             return 1;
         }
     }
 
     // If all else is equal, prefer node with lowest-sorting name
 equal:
     crm_trace("%s = %s", node1->details->uname, node2->details->uname);
     return strcmp(node1->details->uname, node2->details->uname);
 }
 
 /*!
  * \internal
  * \brief Sort a list of nodes by allocation desirability
  *
  * \param[in] nodes        Node list to sort
  * \param[in] active_node  If not NULL, node currently running resource
  * \param[in] data_set     Cluster working set
  *
  * \return New head of sorted list
  */
 GList *
 pcmk__sort_nodes(GList *nodes, pe_node_t *active_node,
                  pe_working_set_t *data_set)
 {
     struct node_weight_s nw = { active_node, data_set };
 
     return g_list_sort_with_data(nodes, compare_nodes, &nw);
 }
 
 /*!
  * \internal
  * \brief Check whether any node is available to run resources
  *
  * \param[in] nodes  Nodes to check
  *
  * \return true if any node in \p nodes is available to run resources,
  *         otherwise false
  */
 bool
 pcmk__any_node_available(GHashTable *nodes)
 {
     GHashTableIter iter;
     pe_node_t *node = NULL;
 
     if (nodes == NULL) {
         return false;
     }
     g_hash_table_iter_init(&iter, nodes);
     while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
         if ((node->weight >= 0) && pcmk__node_available(node)) {
             return true;
         }
     }
     return false;
 }
 
 /*!
  * \internal
  * \brief Apply node health values for all nodes in cluster
  *
  * \param[in] data_set  Cluster working set
  */
 void
 pcmk__apply_node_health(pe_working_set_t *data_set)
 {
     int base_health = 0;
     enum pcmk__health_strategy strategy;
     const char *strategy_str = pe_pref(data_set->config_hash,
                                        PCMK__OPT_NODE_HEALTH_STRATEGY);
 
     strategy = pcmk__parse_health_strategy(strategy_str);
     if (strategy == pcmk__health_strategy_none) {
         return;
     }
     crm_info("Applying node health strategy '%s'", strategy_str);
 
     // The progressive strategy can use a base health score
     if (strategy == pcmk__health_strategy_progressive) {
         base_health = pe__health_score(PCMK__OPT_NODE_HEALTH_BASE, data_set);
     }
 
     for (GList *iter = data_set->nodes; iter != NULL; iter = iter->next) {
         pe_node_t *node = (pe_node_t *) iter->data;
         int health = pe__sum_node_health_scores(node, base_health);
 
         // An overall health score of 0 has no effect
         if (health == 0) {
             continue;
         }
         crm_info("Node %s overall system health is %d",
                  node->details->uname, health);
 
         // Use node health as a location score for each resource on the node
         for (GList *r = data_set->resources; r != NULL; r = r->next) {
             pe_resource_t *rsc = (pe_resource_t *) r->data;
 
             bool constrain = true;
 
             if (health < 0) {
                 /* Negative health scores do not apply to resources with
                  * allow-unhealthy-nodes=true.
                  */
                 constrain = !crm_is_true(g_hash_table_lookup(rsc->meta,
                                          PCMK__META_ALLOW_UNHEALTHY_NODES));
             }
             if (constrain) {
                 pcmk__new_location(strategy_str, rsc, health, NULL, node,
                                    data_set);
             } else {
                 pe_rsc_trace(rsc, "%s is immune from health ban on %s",
                              rsc->id, node->details->uname);
             }
         }
     }
 }
+
+/*!
+ * \internal
+ * \brief Check for a node in a resource's parent's allowed nodes
+ *
+ * \param[in] rsc   Resource whose parent should be checked
+ * \param[in] node  Node to check for
+ *
+ * \return Equivalent of \p node from \p rsc's parent's allowed nodes if any,
+ *         otherwise NULL
+ */
+pe_node_t *
+pcmk__top_allowed_node(const pe_resource_t *rsc, const pe_node_t *node)
+{
+    GHashTable *allowed_nodes = NULL;
+
+    if ((rsc == NULL) || (node == NULL)) {
+        return NULL;
+    } else if (rsc->parent == NULL) {
+        allowed_nodes = rsc->allowed_nodes;
+    } else {
+        allowed_nodes = rsc->parent->allowed_nodes;
+    }
+    return pe_hash_table_lookup(allowed_nodes, node->details->id);
+}
diff --git a/lib/pacemaker/pcmk_sched_promotable.c b/lib/pacemaker/pcmk_sched_promotable.c
index 5f713f55d7..f97555c817 100644
--- a/lib/pacemaker/pcmk_sched_promotable.c
+++ b/lib/pacemaker/pcmk_sched_promotable.c
@@ -1,1051 +1,1049 @@
 /*
  * Copyright 2004-2022 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <crm/msg_xml.h>
 #include <pacemaker-internal.h>
 
 #include "libpacemaker_private.h"
 
 #define VARIANT_CLONE 1
 #include <lib/pengine/variant.h>
 
-extern gint sort_clone_instance(gconstpointer a, gconstpointer b, gpointer data_set);
-
 extern bool pcmk__is_daemon;
 
 static void
 child_promoting_constraints(clone_variant_data_t * clone_data, enum pe_ordering type,
                             pe_resource_t * rsc, pe_resource_t * child, pe_resource_t * last,
                             pe_working_set_t * data_set)
 {
     if (child == NULL) {
         if (clone_data->ordered && last != NULL) {
             pe_rsc_trace(rsc, "Ordered version (last node)");
             /* last child promote before promoted started */
             pcmk__order_resource_actions(last, RSC_PROMOTE, rsc, RSC_PROMOTED,
                                          type, data_set);
         }
         return;
     }
 
     /* child promote before global promoted */
     pcmk__order_resource_actions(child, RSC_PROMOTE, rsc, RSC_PROMOTED, type,
                                  data_set);
 
     /* global promote before child promote */
     pcmk__order_resource_actions(rsc, RSC_PROMOTE, child, RSC_PROMOTE, type,
                                  data_set);
 
     if (clone_data->ordered) {
         pe_rsc_trace(rsc, "Ordered version");
         if (last == NULL) {
             /* global promote before first child promote */
             last = rsc;
 
         }
         /* else: child/child relative promote */
         pcmk__order_starts(last, child, type, data_set);
         pcmk__order_resource_actions(last, RSC_PROMOTE, child, RSC_PROMOTE,
                                      type, data_set);
 
     } else {
         pe_rsc_trace(rsc, "Un-ordered version");
     }
 }
 
 static void
 child_demoting_constraints(clone_variant_data_t * clone_data, enum pe_ordering type,
                            pe_resource_t * rsc, pe_resource_t * child, pe_resource_t * last,
                            pe_working_set_t * data_set)
 {
     if (child == NULL) {
         if (clone_data->ordered && last != NULL) {
             pe_rsc_trace(rsc, "Ordered version (last node)");
             /* global demote before first child demote */
             pcmk__order_resource_actions(rsc, RSC_DEMOTE, last, RSC_DEMOTE,
                                          pe_order_optional, data_set);
         }
         return;
     }
 
     /* child demote before global demoted */
     pcmk__order_resource_actions(child, RSC_DEMOTE, rsc, RSC_DEMOTED,
                                  pe_order_implies_then_printed, data_set);
 
     /* global demote before child demote */
     pcmk__order_resource_actions(rsc, RSC_DEMOTE, child, RSC_DEMOTE,
                                  pe_order_implies_first_printed, data_set);
 
     if (clone_data->ordered && last != NULL) {
         pe_rsc_trace(rsc, "Ordered version");
 
         /* child/child relative demote */
         pcmk__order_resource_actions(child, RSC_DEMOTE, last, RSC_DEMOTE, type,
                                      data_set);
 
     } else if (clone_data->ordered) {
         pe_rsc_trace(rsc, "Ordered version (1st node)");
         /* first child stop before global stopped */
         pcmk__order_resource_actions(child, RSC_DEMOTE, rsc, RSC_DEMOTED, type,
                                      data_set);
 
     } else {
         pe_rsc_trace(rsc, "Un-ordered version");
     }
 }
 
 static void
 check_promotable_actions(pe_resource_t *rsc, gboolean *demoting,
                          gboolean *promoting)
 {
     GList *gIter = NULL;
 
     if (rsc->children) {
         gIter = rsc->children;
         for (; gIter != NULL; gIter = gIter->next) {
             pe_resource_t *child = (pe_resource_t *) gIter->data;
 
             check_promotable_actions(child, demoting, promoting);
         }
         return;
     }
 
     CRM_ASSERT(demoting != NULL);
     CRM_ASSERT(promoting != NULL);
 
     gIter = rsc->actions;
     for (; gIter != NULL; gIter = gIter->next) {
         pe_action_t *action = (pe_action_t *) gIter->data;
 
         if (*promoting && *demoting) {
             return;
 
         } else if (pcmk_is_set(action->flags, pe_action_optional)) {
             continue;
 
         } else if (pcmk__str_eq(RSC_DEMOTE, action->task, pcmk__str_casei)) {
             *demoting = TRUE;
 
         } else if (pcmk__str_eq(RSC_PROMOTE, action->task, pcmk__str_casei)) {
             *promoting = TRUE;
         }
     }
 }
 
 static void
 apply_promoted_location(pe_resource_t *child, GList *location_constraints,
                         pe_node_t *chosen)
 {
     CRM_CHECK(child && chosen, return);
     for (GList *gIter = location_constraints; gIter; gIter = gIter->next) {
         pe_node_t *cons_node = NULL;
         pe__location_t *cons = gIter->data;
 
         if (cons->role_filter == RSC_ROLE_PROMOTED) {
             pe_rsc_trace(child, "Applying %s to %s", cons->id, child->id);
             cons_node = pe_find_node_id(cons->node_list_rh, chosen->details->id);
         }
         if (cons_node != NULL) {
             int new_priority = pcmk__add_scores(child->priority,
                                                 cons_node->weight);
 
             pe_rsc_trace(child, "\t%s[%s]: %d -> %d (%d)",
                          child->id, cons_node->details->uname, child->priority,
                          new_priority, cons_node->weight);
             child->priority = new_priority;
         }
     }
 }
 
 static pe_node_t *
 guest_location(pe_node_t *guest_node)
 {
     pe_resource_t *guest = guest_node->details->remote_rsc->container;
 
     return guest->fns->location(guest, NULL, FALSE);
 }
 
 static pe_node_t *
 node_to_be_promoted_on(pe_resource_t *rsc)
 {
     pe_node_t *node = NULL;
     pe_node_t *local_node = NULL;
     pe_resource_t *parent = uber_parent(rsc);
     clone_variant_data_t *clone_data = NULL;
 
 #if 0
     enum rsc_role_e role = RSC_ROLE_UNKNOWN;
 
     role = rsc->fns->state(rsc, FALSE);
     crm_info("%s role: %s", rsc->id, role2text(role));
 #endif
 
     if (rsc->children) {
         GList *gIter = rsc->children;
 
         for (; gIter != NULL; gIter = gIter->next) {
             pe_resource_t *child = (pe_resource_t *) gIter->data;
 
             if (node_to_be_promoted_on(child) == NULL) {
                 pe_rsc_trace(rsc, "Child %s of %s can't be promoted", child->id, rsc->id);
                 return NULL;
             }
         }
     }
 
     node = rsc->fns->location(rsc, NULL, FALSE);
     if (node == NULL) {
         pe_rsc_trace(rsc, "%s cannot be promoted: not allocated", rsc->id);
         return NULL;
 
     } else if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
         if (rsc->fns->state(rsc, TRUE) == RSC_ROLE_PROMOTED) {
             crm_notice("Forcing unmanaged instance %s to remain promoted on %s",
                        rsc->id, node->details->uname);
 
         } else {
             return NULL;
         }
 
     } else if (rsc->priority < 0) {
         pe_rsc_trace(rsc, "%s cannot be promoted: preference: %d",
                      rsc->id, rsc->priority);
         return NULL;
 
     } else if (!pcmk__node_available(node)) {
         crm_trace("Node can't run any resources: %s", node->details->uname);
         return NULL;
 
     /* @TODO It's possible this check should be done in pcmk__node_available()
      * instead. We should investigate all its callers to figure out whether that
      * would be a good idea.
      */
     } else if (pe__is_guest_node(node) && (guest_location(node) == NULL)) {
         pe_rsc_trace(rsc, "%s cannot be promoted: guest %s not allocated",
                      rsc->id, node->details->remote_rsc->container->id);
         return NULL;
     }
 
     get_clone_variant_data(clone_data, parent);
     local_node = pe_hash_table_lookup(parent->allowed_nodes, node->details->id);
 
     if (local_node == NULL) {
         crm_err("%s cannot run on %s: node not allowed", rsc->id, node->details->uname);
         return NULL;
 
     } else if ((local_node->count < clone_data->promoted_node_max)
                || !pcmk_is_set(rsc->flags, pe_rsc_managed)) {
         return local_node;
 
     } else {
         pe_rsc_trace(rsc, "%s cannot be promoted on %s: node full",
                      rsc->id, node->details->uname);
     }
 
     return NULL;
 }
 
 static gint
 sort_promotable_instance(gconstpointer a, gconstpointer b, gpointer data_set)
 {
     int rc;
     enum rsc_role_e role1 = RSC_ROLE_UNKNOWN;
     enum rsc_role_e role2 = RSC_ROLE_UNKNOWN;
 
     const pe_resource_t *resource1 = (const pe_resource_t *)a;
     const pe_resource_t *resource2 = (const pe_resource_t *)b;
 
     CRM_ASSERT(resource1 != NULL);
     CRM_ASSERT(resource2 != NULL);
 
     role1 = resource1->fns->state(resource1, TRUE);
     role2 = resource2->fns->state(resource2, TRUE);
 
     rc = sort_rsc_index(a, b);
     if (rc != 0) {
         crm_trace("%s %c %s (index)", resource1->id, rc < 0 ? '<' : '>', resource2->id);
         return rc;
     }
 
     if (role1 > role2) {
         crm_trace("%s %c %s (role)", resource1->id, '<', resource2->id);
         return -1;
 
     } else if (role1 < role2) {
         crm_trace("%s %c %s (role)", resource1->id, '>', resource2->id);
         return 1;
     }
 
-    return sort_clone_instance(a, b, data_set);
+    return pcmk__cmp_instance(a, b);
 }
 
 static void
 promotion_order(pe_resource_t *rsc, pe_working_set_t *data_set)
 {
     GList *gIter = NULL;
     pe_node_t *node = NULL;
     pe_node_t *chosen = NULL;
     clone_variant_data_t *clone_data = NULL;
     char score[33];
     size_t len = sizeof(score);
 
     get_clone_variant_data(clone_data, rsc);
 
     if (clone_data->added_promoted_constraints) {
         return;
     }
     clone_data->added_promoted_constraints = true;
     pe_rsc_trace(rsc, "Merging weights for %s", rsc->id);
     pe__set_resource_flags(rsc, pe_rsc_merging);
 
     for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child = (pe_resource_t *) gIter->data;
 
         pe_rsc_trace(rsc, "Sort index: %s = %d", child->id, child->sort_index);
     }
     pe__show_node_weights(true, rsc, "Before", rsc->allowed_nodes, data_set);
 
     gIter = rsc->children;
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child = (pe_resource_t *) gIter->data;
 
         chosen = child->fns->location(child, NULL, FALSE);
         if (chosen == NULL || child->sort_index < 0) {
             pe_rsc_trace(rsc, "Skipping %s", child->id);
             continue;
         }
 
         node = (pe_node_t *) pe_hash_table_lookup(rsc->allowed_nodes, chosen->details->id);
         CRM_ASSERT(node != NULL);
         // Add promotion preferences and rsc_location scores when role=Promoted
         score2char_stack(child->sort_index, score, len);
         pe_rsc_trace(rsc, "Adding %s to %s from %s", score,
                      node->details->uname, child->id);
         node->weight = pcmk__add_scores(child->sort_index, node->weight);
     }
 
     pe__show_node_weights(true, rsc, "Middle", rsc->allowed_nodes, data_set);
 
     gIter = rsc->rsc_cons;
     for (; gIter != NULL; gIter = gIter->next) {
         pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data;
 
         /* (Re-)add location preferences of resources that a promoted instance
          * should/must be colocated with.
          */
         if (constraint->dependent_role == RSC_ROLE_PROMOTED) {
             enum pe_weights flags = constraint->score == INFINITY ? 0 : pe_weights_rollback;
 
             pe_rsc_trace(rsc, "RHS: %s with %s: %d",
                          constraint->dependent->id, constraint->primary->id,
                          constraint->score);
             rsc->allowed_nodes = constraint->primary->cmds->merge_weights(
                 constraint->primary, rsc->id, rsc->allowed_nodes,
                 constraint->node_attribute,
                 constraint->score / (float) INFINITY, flags);
         }
     }
 
     gIter = rsc->rsc_cons_lhs;
     for (; gIter != NULL; gIter = gIter->next) {
         pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data;
 
         if (!pcmk__colocation_has_influence(constraint, NULL)) {
             continue;
         }
 
         /* (Re-)add location preferences of resources that wish to be colocated
          * with a promoted instance.
          */
         if (constraint->primary_role == RSC_ROLE_PROMOTED) {
             pe_rsc_trace(rsc, "LHS: %s with %s: %d",
                          constraint->dependent->id, constraint->primary->id,
                          constraint->score);
             rsc->allowed_nodes = constraint->dependent->cmds->merge_weights(
                 constraint->dependent, rsc->id, rsc->allowed_nodes,
                 constraint->node_attribute,
                 constraint->score / (float) INFINITY,
                 pe_weights_rollback|pe_weights_positive);
         }
     }
 
     // Ban resource from all nodes if it needs a ticket but doesn't have it
     pcmk__require_promotion_tickets(rsc);
 
     pe__show_node_weights(true, rsc, "After", rsc->allowed_nodes, data_set);
 
     /* write them back and sort */
 
     gIter = rsc->children;
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child = (pe_resource_t *) gIter->data;
 
         chosen = child->fns->location(child, NULL, FALSE);
         if (!pcmk_is_set(child->flags, pe_rsc_managed)
             && (child->next_role == RSC_ROLE_PROMOTED)) {
             child->sort_index = INFINITY;
 
         } else if (chosen == NULL || child->sort_index < 0) {
             pe_rsc_trace(rsc, "%s: %d", child->id, child->sort_index);
 
         } else {
             node = (pe_node_t *) pe_hash_table_lookup(rsc->allowed_nodes, chosen->details->id);
             CRM_ASSERT(node != NULL);
 
             child->sort_index = node->weight;
         }
         pe_rsc_trace(rsc, "Set sort index: %s = %d", child->id, child->sort_index);
     }
 
     rsc->children = g_list_sort_with_data(rsc->children,
                                           sort_promotable_instance, data_set);
     pe__clear_resource_flags(rsc, pe_rsc_merging);
 }
 
 static gboolean
 filter_anonymous_instance(pe_resource_t *rsc, const pe_node_t *node)
 {
     GList *rIter = NULL;
     char *key = clone_strip(rsc->id);
     pe_resource_t *parent = uber_parent(rsc);
 
     for (rIter = parent->children; rIter; rIter = rIter->next) {
         /* If there is an active instance on the node, only it receives the
          * promotion score. Use ->find_rsc() in case this is a cloned group.
          */
         pe_resource_t *child = rIter->data;
         pe_resource_t *active = parent->fns->find_rsc(child, key, node, pe_find_clone|pe_find_current);
 
         if(rsc == active) {
             pe_rsc_trace(rsc, "Found %s for %s active on %s: done", active->id, key, node->details->uname);
             free(key);
             return TRUE;
         } else if(active) {
             pe_rsc_trace(rsc, "Found %s for %s on %s: not %s", active->id, key, node->details->uname, rsc->id);
             free(key);
             return FALSE;
         } else {
             pe_rsc_trace(rsc, "%s on %s: not active", key, node->details->uname);
         }
     }
 
     for (rIter = parent->children; rIter; rIter = rIter->next) {
         pe_resource_t *child = rIter->data;
 
         /*
          * We know it's not running, but any score will still count if
          * the instance has been probed on $node
          *
          * Again use ->find_rsc() because we might be a cloned group
          * and knowing that other members of the group are known here
          * implies nothing
          */
         rsc = parent->fns->find_rsc(child, key, NULL, pe_find_clone);
         CRM_LOG_ASSERT(rsc);
         if(rsc) {
             pe_rsc_trace(rsc, "Checking %s for %s on %s", rsc->id, key, node->details->uname);
             if (g_hash_table_lookup(rsc->known_on, node->details->id)) {
                 free(key);
                 return TRUE;
             }
         }
     }
     free(key);
     return FALSE;
 }
 
 static const char *
 lookup_promotion_score(pe_resource_t *rsc, const pe_node_t *node, const char *name)
 {
     const char *attr_value = NULL;
 
     if (node && name) {
         char *attr_name = pcmk_promotion_score_name(name);
 
         attr_value = pe_node_attribute_calculated(node, attr_name, rsc);
         free(attr_name);
     }
     return attr_value;
 }
 
 static int
 promotion_score(pe_resource_t *rsc, const pe_node_t *node, int not_set_value)
 {
     char *name = rsc->id;
     const char *attr_value = NULL;
     int score = not_set_value;
     pe_node_t *match = NULL;
 
     CRM_CHECK(node != NULL, return not_set_value);
 
     if (rsc->children) {
         GList *gIter = rsc->children;
 
         for (; gIter != NULL; gIter = gIter->next) {
             pe_resource_t *child = (pe_resource_t *) gIter->data;
             int c_score = promotion_score(child, node, not_set_value);
 
             if (score == not_set_value) {
                 score = c_score;
             } else {
                 score += c_score;
             }
         }
         return score;
     }
 
     if (!pcmk_is_set(rsc->flags, pe_rsc_unique)
         && filter_anonymous_instance(rsc, node)) {
 
         pe_rsc_trace(rsc, "Anonymous clone %s is allowed on %s", rsc->id, node->details->uname);
 
     } else if (rsc->running_on || g_hash_table_size(rsc->known_on)) {
         /* If we've probed and/or started the resource anywhere, consider
          * promotion scores only from nodes where we know the status. However,
          * if the status of all nodes is unknown (e.g. cluster startup),
          * skip this code, to make sure we take into account any permanent
          * promotion scores set previously.
          */
         pe_node_t *known = pe_hash_table_lookup(rsc->known_on, node->details->id);
 
         match = pe_find_node_id(rsc->running_on, node->details->id);
         if ((match == NULL) && (known == NULL)) {
             pe_rsc_trace(rsc, "skipping %s (aka. %s) promotion score on %s because inactive",
                          rsc->id, rsc->clone_name, node->details->uname);
             return score;
         }
     }
 
     match = pe_hash_table_lookup(rsc->allowed_nodes, node->details->id);
     if (match == NULL) {
         return score;
 
     } else if (match->weight < 0) {
         pe_rsc_trace(rsc, "%s on %s has score: %d - ignoring",
                      rsc->id, match->details->uname, match->weight);
         return score;
     }
 
     if (rsc->clone_name) {
         /* Use the name the lrm knows this resource as,
          * since that's what crm_attribute --promotion would have used
          */
         name = rsc->clone_name;
     }
 
     attr_value = lookup_promotion_score(rsc, node, name);
     pe_rsc_trace(rsc, "Promotion score for %s on %s = %s",
                  name, node->details->uname, pcmk__s(attr_value, "(unset)"));
 
     if ((attr_value == NULL) && !pcmk_is_set(rsc->flags, pe_rsc_unique)) {
         /* If we don't have any LRM history yet, we won't have clone_name -- in
          * that case, for anonymous clones, try the resource name without any
          * instance number.
          */
         name = clone_strip(rsc->id);
         if (strcmp(rsc->id, name)) {
             attr_value = lookup_promotion_score(rsc, node, name);
             pe_rsc_trace(rsc, "Stripped promotion score for %s on %s = %s",
                          name, node->details->uname,
                          pcmk__s(attr_value, "(unset)"));
         }
         free(name);
     }
 
     if (attr_value != NULL) {
         score = char2score(attr_value);
     }
 
     return score;
 }
 
 void
 pcmk__add_promotion_scores(pe_resource_t *rsc)
 {
     int score, new_score;
     GList *gIter = rsc->children;
     clone_variant_data_t *clone_data = NULL;
 
     get_clone_variant_data(clone_data, rsc);
 
     if (clone_data->added_promotion_scores) {
         /* Make sure we only do this once */
         return;
     }
 
     clone_data->added_promotion_scores = true;
 
     for (; gIter != NULL; gIter = gIter->next) {
         GHashTableIter iter;
         pe_node_t *node = NULL;
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         g_hash_table_iter_init(&iter, child_rsc->allowed_nodes);
         while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
             if (!pcmk__node_available(node)) {
                 /* This node will never be promoted, so don't apply the
                  * promotion score, as that may lead to clone shuffling.
                  */
                 continue;
             }
 
             score = promotion_score(child_rsc, node, 0);
             if (score > 0) {
                 new_score = pcmk__add_scores(node->weight, score);
                 if (new_score != node->weight) {
                     pe_rsc_trace(rsc, "\t%s: Updating preference for %s (%d->%d)",
                                  child_rsc->id, node->details->uname, node->weight, new_score);
                     node->weight = new_score;
                 }
             }
 
             new_score = QB_MAX(child_rsc->priority, score);
             if (new_score != child_rsc->priority) {
                 pe_rsc_trace(rsc, "\t%s: Updating priority (%d->%d)",
                              child_rsc->id, child_rsc->priority, new_score);
                 child_rsc->priority = new_score;
             }
         }
     }
 }
 
 static void
 set_role_unpromoted(pe_resource_t *rsc, bool current)
 {
     GList *gIter = rsc->children;
 
     if (current) {
         if (rsc->role == RSC_ROLE_STARTED) {
             rsc->role = RSC_ROLE_UNPROMOTED;
         }
 
     } else {
         GList *allocated = NULL;
 
         rsc->fns->location(rsc, &allocated, FALSE);
         pe__set_next_role(rsc, (allocated? RSC_ROLE_UNPROMOTED : RSC_ROLE_STOPPED),
                           "unpromoted instance");
         g_list_free(allocated);
     }
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         set_role_unpromoted(child_rsc, current);
     }
 }
 
 static void
 set_role_promoted(pe_resource_t *rsc, gpointer user_data)
 {
     if (rsc->next_role == RSC_ROLE_UNKNOWN) {
         pe__set_next_role(rsc, RSC_ROLE_PROMOTED, "promoted instance");
     }
 
     g_list_foreach(rsc->children, (GFunc) set_role_promoted, NULL);
 }
 
 pe_node_t *
 pcmk__set_instance_roles(pe_resource_t *rsc, pe_working_set_t *data_set)
 {
     int promoted = 0;
     GList *gIter = NULL;
     GList *gIter2 = NULL;
     GHashTableIter iter;
     pe_node_t *node = NULL;
     pe_node_t *chosen = NULL;
     enum rsc_role_e next_role = RSC_ROLE_UNKNOWN;
     char score[33];
     size_t len = sizeof(score);
     clone_variant_data_t *clone_data = NULL;
 
     get_clone_variant_data(clone_data, rsc);
 
     // Repurpose count to track the number of promoted instances allocated
     g_hash_table_iter_init(&iter, rsc->allowed_nodes);
     while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
         node->count = 0;
     }
 
     /*
      * assign priority
      */
     for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
         GList *list = NULL;
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         pe_rsc_trace(rsc, "Assigning priority for %s: %s", child_rsc->id,
                      role2text(child_rsc->next_role));
 
         if (child_rsc->fns->state(child_rsc, TRUE) == RSC_ROLE_STARTED) {
             set_role_unpromoted(child_rsc, true);
         }
 
         chosen = child_rsc->fns->location(child_rsc, &list, FALSE);
         if (pcmk__list_of_multiple(list)) {
             pcmk__config_err("Cannot promote non-colocated child %s",
                              child_rsc->id);
         }
 
         g_list_free(list);
         if (chosen == NULL) {
             continue;
         }
 
         next_role = child_rsc->fns->state(child_rsc, FALSE);
         switch (next_role) {
             case RSC_ROLE_STARTED:
             case RSC_ROLE_UNKNOWN:
                 /*
                  * Default to -1 if no value is set
                  *
                  * This allows instances eligible for promotion to be specified
                  * based solely on rsc_location constraints,
                  * but prevents anyone from being promoted if
                  * neither a constraint nor a promotion score is present
                  */
                 child_rsc->priority = promotion_score(child_rsc, chosen, -1);
                 break;
 
             case RSC_ROLE_UNPROMOTED:
             case RSC_ROLE_STOPPED:
                 child_rsc->priority = -INFINITY;
                 break;
             case RSC_ROLE_PROMOTED:
                 /* We will arrive here if we're re-creating actions after a stonith
                  */
                 break;
             default:
                 CRM_CHECK(FALSE /* unhandled */ ,
                           crm_err("Unknown resource role: %d for %s", next_role, child_rsc->id));
         }
 
         apply_promoted_location(child_rsc, child_rsc->rsc_location, chosen);
         apply_promoted_location(child_rsc, rsc->rsc_location, chosen);
 
         for (gIter2 = child_rsc->rsc_cons; gIter2 != NULL; gIter2 = gIter2->next) {
             pcmk__colocation_t *cons = (pcmk__colocation_t *) gIter2->data;
 
             child_rsc->cmds->rsc_colocation_lh(child_rsc, cons->primary, cons,
                                                data_set);
         }
 
         child_rsc->sort_index = child_rsc->priority;
         pe_rsc_trace(rsc, "Assigning priority for %s: %d", child_rsc->id, child_rsc->priority);
 
         if (next_role == RSC_ROLE_PROMOTED) {
             child_rsc->sort_index = INFINITY;
         }
     }
 
     pe__show_node_weights(true, rsc, "Pre merge", rsc->allowed_nodes, data_set);
     promotion_order(rsc, data_set);
 
     // Choose the first N eligible instances to be promoted
     for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
         score2char_stack(child_rsc->sort_index, score, len);
 
         chosen = child_rsc->fns->location(child_rsc, NULL, FALSE);
         if (pcmk_is_set(data_set->flags, pe_flag_show_scores) && !pcmk__is_daemon) {
             if (data_set->priv != NULL) {
                 pcmk__output_t *out = data_set->priv;
                 out->message(out, "promotion-score", child_rsc, chosen, score);
             }
 
         } else {
             pe_rsc_trace(rsc, "%s promotion score on %s: %s", child_rsc->id,
                          (chosen? chosen->details->uname : "none"), score);
         }
 
         chosen = NULL;          /* nuke 'chosen' so that we don't promote more than the
                                  * required number of instances
                                  */
 
         if (child_rsc->sort_index < 0) {
             pe_rsc_trace(rsc, "Not supposed to promote child: %s", child_rsc->id);
 
         } else if ((promoted < clone_data->promoted_max)
                    || !pcmk_is_set(rsc->flags, pe_rsc_managed)) {
             chosen = node_to_be_promoted_on(child_rsc);
         }
 
         pe_rsc_debug(rsc, "%s promotion score: %d", child_rsc->id, child_rsc->priority);
 
         if (chosen == NULL) {
             set_role_unpromoted(child_rsc, false);
             continue;
 
         } else if ((child_rsc->role < RSC_ROLE_PROMOTED)
               && !pcmk_is_set(data_set->flags, pe_flag_have_quorum)
               && data_set->no_quorum_policy == no_quorum_freeze) {
             crm_notice("Resource %s cannot be elevated from %s to %s: no-quorum-policy=freeze",
                        child_rsc->id, role2text(child_rsc->role), role2text(child_rsc->next_role));
             set_role_unpromoted(child_rsc, false);
             continue;
         }
 
         chosen->count++;
         pe_rsc_info(rsc, "Promoting %s (%s %s)",
                     child_rsc->id, role2text(child_rsc->role), chosen->details->uname);
         set_role_promoted(child_rsc, NULL);
         promoted++;
     }
 
     pe_rsc_info(rsc, "%s: Promoted %d instances of a possible %d",
                 rsc->id, promoted, clone_data->promoted_max);
 
     return NULL;
 }
 
 void
 create_promotable_actions(pe_resource_t * rsc, pe_working_set_t * data_set)
 {
     pe_action_t *action = NULL;
     GList *gIter = rsc->children;
     pe_action_t *action_complete = NULL;
     gboolean any_promoting = FALSE;
     gboolean any_demoting = FALSE;
     pe_resource_t *last_promote_rsc = NULL;
     pe_resource_t *last_demote_rsc = NULL;
 
     clone_variant_data_t *clone_data = NULL;
 
     get_clone_variant_data(clone_data, rsc);
 
     pe_rsc_debug(rsc, "Creating actions for %s", rsc->id);
 
     for (; gIter != NULL; gIter = gIter->next) {
         gboolean child_promoting = FALSE;
         gboolean child_demoting = FALSE;
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         pe_rsc_trace(rsc, "Creating actions for %s", child_rsc->id);
         child_rsc->cmds->create_actions(child_rsc, data_set);
         check_promotable_actions(child_rsc, &child_demoting, &child_promoting);
 
         any_demoting = any_demoting || child_demoting;
         any_promoting = any_promoting || child_promoting;
         pe_rsc_trace(rsc, "Created actions for %s: %d %d", child_rsc->id, child_promoting,
                      child_demoting);
     }
 
     /* promote */
     action = pcmk__new_rsc_pseudo_action(rsc, RSC_PROMOTE, !any_promoting,
                                          true);
     action_complete = pcmk__new_rsc_pseudo_action(rsc, RSC_PROMOTED,
                                                   !any_promoting, true);
     action_complete->priority = INFINITY;
 
     child_promoting_constraints(clone_data, pe_order_optional,
                                 rsc, NULL, last_promote_rsc, data_set);
 
     if (clone_data->promote_notify == NULL) {
         clone_data->promote_notify = pcmk__clone_notif_pseudo_ops(rsc,
                                                                   RSC_PROMOTE,
                                                                   action,
                                                                   action_complete);
     }
 
     /* demote */
     action = pcmk__new_rsc_pseudo_action(rsc, RSC_DEMOTE, !any_demoting, true);
     action_complete = pcmk__new_rsc_pseudo_action(rsc, RSC_DEMOTED,
                                                   !any_demoting, true);
     action_complete->priority = INFINITY;
 
     child_demoting_constraints(clone_data, pe_order_optional, rsc, NULL, last_demote_rsc, data_set);
 
     if (clone_data->demote_notify == NULL) {
         clone_data->demote_notify = pcmk__clone_notif_pseudo_ops(rsc,
                                                                  RSC_DEMOTE,
                                                                  action,
                                                                  action_complete);
 
         if (clone_data->promote_notify) {
             /* If we ever wanted groups to have notifications we'd need to move this to native_internal_constraints() one day
              * Requires exposing *_notify
              */
             order_actions(clone_data->stop_notify->post_done, clone_data->promote_notify->pre,
                           pe_order_optional);
             order_actions(clone_data->start_notify->post_done, clone_data->promote_notify->pre,
                           pe_order_optional);
             order_actions(clone_data->demote_notify->post_done, clone_data->promote_notify->pre,
                           pe_order_optional);
             order_actions(clone_data->demote_notify->post_done, clone_data->start_notify->pre,
                           pe_order_optional);
             order_actions(clone_data->demote_notify->post_done, clone_data->stop_notify->pre,
                           pe_order_optional);
         }
     }
 
     /* restore the correct priority */
 
     gIter = rsc->children;
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         child_rsc->priority = rsc->priority;
     }
 }
 
 void
 promote_demote_constraints(pe_resource_t *rsc, pe_working_set_t *data_set)
 {
     /* global stopped before start */
     pcmk__order_resource_actions(rsc, RSC_STOPPED, rsc, RSC_START,
                                  pe_order_optional, data_set);
 
     /* global stopped before promote */
     pcmk__order_resource_actions(rsc, RSC_STOPPED, rsc, RSC_PROMOTE,
                                  pe_order_optional, data_set);
 
     /* global demoted before start */
     pcmk__order_resource_actions(rsc, RSC_DEMOTED, rsc, RSC_START,
                                  pe_order_optional, data_set);
 
     /* global started before promote */
     pcmk__order_resource_actions(rsc, RSC_STARTED, rsc, RSC_PROMOTE,
                                  pe_order_optional, data_set);
 
     /* global demoted before stop */
     pcmk__order_resource_actions(rsc, RSC_DEMOTED, rsc, RSC_STOP,
                                  pe_order_optional, data_set);
 
     /* global demote before demoted */
     pcmk__order_resource_actions(rsc, RSC_DEMOTE, rsc, RSC_DEMOTED,
                                  pe_order_optional, data_set);
 
     /* global demoted before promote */
     pcmk__order_resource_actions(rsc, RSC_DEMOTED, rsc, RSC_PROMOTE,
                                  pe_order_optional, data_set);
 }
 
 
 void
 promotable_constraints(pe_resource_t * rsc, pe_working_set_t * data_set)
 {
     GList *gIter = rsc->children;
     pe_resource_t *last_rsc = NULL;
     clone_variant_data_t *clone_data = NULL;
 
     get_clone_variant_data(clone_data, rsc);
 
     promote_demote_constraints(rsc, data_set);
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         /* child demote before promote */
         pcmk__order_resource_actions(child_rsc, RSC_DEMOTE, child_rsc,
                                      RSC_PROMOTE, pe_order_optional, data_set);
 
         child_promoting_constraints(clone_data, pe_order_optional,
                                     rsc, child_rsc, last_rsc, data_set);
 
         child_demoting_constraints(clone_data, pe_order_optional,
                                    rsc, child_rsc, last_rsc, data_set);
 
         last_rsc = child_rsc;
     }
 }
 
 static void
 node_hash_update_one(GHashTable * hash, pe_node_t * other, const char *attr, int score)
 {
     GHashTableIter iter;
     pe_node_t *node = NULL;
     const char *value = NULL;
 
     if (other == NULL) {
         return;
 
     } else if (attr == NULL) {
         attr = CRM_ATTR_UNAME;
     }
  
     value = pe_node_attribute_raw(other, attr);
     g_hash_table_iter_init(&iter, hash);
     while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
         const char *tmp = pe_node_attribute_raw(node, attr);
 
         if (pcmk__str_eq(value, tmp, pcmk__str_casei)) {
             crm_trace("%s: %d + %d", node->details->uname, node->weight, other->weight);
             node->weight = pcmk__add_scores(node->weight, score);
         }
     }
 }
 
 void
 promotable_colocation_rh(pe_resource_t *dependent, pe_resource_t *primary,
                          pcmk__colocation_t *constraint,
                          pe_working_set_t *data_set)
 {
     GList *gIter = NULL;
 
     if (pcmk_is_set(dependent->flags, pe_rsc_provisional)) {
         GList *affected_nodes = NULL;
 
         for (gIter = primary->children; gIter != NULL; gIter = gIter->next) {
             pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
             pe_node_t *chosen = child_rsc->fns->location(child_rsc, NULL, FALSE);
             enum rsc_role_e next_role = child_rsc->fns->state(child_rsc, FALSE);
 
             pe_rsc_trace(primary, "Processing: %s", child_rsc->id);
             if ((chosen != NULL) && (next_role == constraint->primary_role)) {
                 pe_rsc_trace(primary, "Applying: %s %s %s %d", child_rsc->id,
                              role2text(next_role), chosen->details->uname, constraint->score);
                 if (constraint->score < INFINITY) {
                     node_hash_update_one(dependent->allowed_nodes, chosen,
                                          constraint->node_attribute, constraint->score);
                 }
                 affected_nodes = g_list_prepend(affected_nodes, chosen);
             }
         }
 
         /* Only do this if it's not a promoted-with-promoted colocation. Doing
          * this unconditionally would prevent unpromoted instances from being
          * started.
          */
         if ((constraint->dependent_role != RSC_ROLE_PROMOTED)
             || (constraint->primary_role != RSC_ROLE_PROMOTED)) {
 
             if (constraint->score >= INFINITY) {
                 node_list_exclude(dependent->allowed_nodes, affected_nodes,
                                   TRUE);
             }
         }
         g_list_free(affected_nodes);
 
     } else if (constraint->dependent_role == RSC_ROLE_PROMOTED) {
         pe_resource_t *primary_instance;
 
         primary_instance = find_compatible_child(dependent, primary,
                                                  constraint->primary_role,
                                                  FALSE, data_set);
         if ((primary_instance == NULL) && (constraint->score >= INFINITY)) {
             pe_rsc_trace(dependent, "%s can't be promoted %s",
                          dependent->id, constraint->id);
             dependent->priority = -INFINITY;
 
         } else if (primary_instance != NULL) {
             int new_priority = pcmk__add_scores(dependent->priority,
                                                 constraint->score);
 
             pe_rsc_debug(dependent, "Applying %s to %s",
                          constraint->id, dependent->id);
             pe_rsc_debug(dependent, "\t%s: %d->%d",
                          dependent->id, dependent->priority, new_priority);
             dependent->priority = new_priority;
         }
     }
 
     return;
 }
diff --git a/lib/pacemaker/pcmk_sched_remote.c b/lib/pacemaker/pcmk_sched_remote.c
index 11aa01d63f..5dc138a236 100644
--- a/lib/pacemaker/pcmk_sched_remote.c
+++ b/lib/pacemaker/pcmk_sched_remote.c
@@ -1,737 +1,735 @@
 /*
  * Copyright 2004-2022 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <sys/param.h>
 
 #include <crm/crm.h>
 #include <crm/cib.h>
 #include <crm/msg_xml.h>
 #include <crm/common/xml.h>
 #include <crm/common/xml_internal.h>
 
 #include <glib.h>
 
 #include <crm/pengine/status.h>
 #include <pacemaker-internal.h>
 #include "libpacemaker_private.h"
 
 enum remote_connection_state {
     remote_state_unknown = 0,
     remote_state_alive = 1,
     remote_state_resting = 2,
     remote_state_failed = 3,
     remote_state_stopped = 4
 };
 
 static const char *
 state2text(enum remote_connection_state state)
 {
     switch (state) {
         case remote_state_unknown:
             return "unknown";
         case remote_state_alive:
             return "alive";
         case remote_state_resting:
             return "resting";
         case remote_state_failed:
             return "failed";
         case remote_state_stopped:
             return "stopped";
     }
 
     return "impossible";
 }
 
 /* We always use pe_order_preserve with these convenience functions to exempt
  * internally generated constraints from the prohibition of user constraints
  * involving remote connection resources.
  *
  * The start ordering additionally uses pe_order_runnable_left so that the
  * specified action is not runnable if the start is not runnable.
  */
 
 static inline void
 order_start_then_action(pe_resource_t *lh_rsc, pe_action_t *rh_action,
                         enum pe_ordering extra, pe_working_set_t *data_set)
 {
     if ((lh_rsc != NULL) && (rh_action != NULL) && (data_set != NULL)) {
         pcmk__new_ordering(lh_rsc, start_key(lh_rsc), NULL,
                            rh_action->rsc, NULL, rh_action,
                            pe_order_preserve|pe_order_runnable_left|extra,
                            data_set);
     }
 }
 
 static inline void
 order_action_then_stop(pe_action_t *lh_action, pe_resource_t *rh_rsc,
                        enum pe_ordering extra, pe_working_set_t *data_set)
 {
     if ((lh_action != NULL) && (rh_rsc != NULL) && (data_set != NULL)) {
         pcmk__new_ordering(lh_action->rsc, NULL, lh_action,
                            rh_rsc, stop_key(rh_rsc), NULL,
                            pe_order_preserve|extra, data_set);
     }
 }
 
 static enum remote_connection_state
 get_remote_node_state(pe_node_t *node)
 {
     pe_resource_t *remote_rsc = NULL;
     pe_node_t *cluster_node = NULL;
 
     CRM_ASSERT(node != NULL);
 
     remote_rsc = node->details->remote_rsc;
     CRM_ASSERT(remote_rsc != NULL);
 
     cluster_node = pe__current_node(remote_rsc);
 
     /* If the cluster node the remote connection resource resides on
      * is unclean or went offline, we can't process any operations
      * on that remote node until after it starts elsewhere.
      */
     if ((remote_rsc->next_role == RSC_ROLE_STOPPED)
         || (remote_rsc->allocated_to == NULL)) {
 
         // The connection resource is not going to run anywhere
 
         if ((cluster_node != NULL) && cluster_node->details->unclean) {
             /* The remote connection is failed because its resource is on a
              * failed node and can't be recovered elsewhere, so we must fence.
              */
             return remote_state_failed;
         }
 
         if (!pcmk_is_set(remote_rsc->flags, pe_rsc_failed)) {
             /* Connection resource is cleanly stopped */
             return remote_state_stopped;
         }
 
         /* Connection resource is failed */
 
         if ((remote_rsc->next_role == RSC_ROLE_STOPPED)
             && remote_rsc->remote_reconnect_ms
             && node->details->remote_was_fenced
             && !pe__shutdown_requested(node)) {
 
             /* We won't know whether the connection is recoverable until the
              * reconnect interval expires and we reattempt connection.
              */
             return remote_state_unknown;
         }
 
         /* The remote connection is in a failed state. If there are any
          * resources known to be active on it (stop) or in an unknown state
          * (probe), we must assume the worst and fence it.
          */
         return remote_state_failed;
 
     } else if (cluster_node == NULL) {
         /* Connection is recoverable but not currently running anywhere, so see
          * if we can recover it first
          */
         return remote_state_unknown;
 
     } else if (cluster_node->details->unclean
                || !(cluster_node->details->online)) {
         // Connection is running on a dead node, see if we can recover it first
         return remote_state_resting;
 
     } else if (pcmk__list_of_multiple(remote_rsc->running_on)
                && (remote_rsc->partial_migration_source != NULL)
                && (remote_rsc->partial_migration_target != NULL)) {
         /* We're in the middle of migrating a connection resource, so wait until
          * after the migration completes before performing any actions.
          */
         return remote_state_resting;
 
     }
     return remote_state_alive;
 }
 
 static int
 is_recurring_action(pe_action_t *action)
 {
     guint interval_ms;
 
     if (pcmk__guint_from_hash(action->meta,
                               XML_LRM_ATTR_INTERVAL_MS, 0,
                               &interval_ms) != pcmk_rc_ok) {
         return 0;
     }
     return (interval_ms > 0);
 }
 
 /*!
  * \internal
  * \brief Order actions on remote node relative to actions for the connection
  */
 static void
 apply_remote_ordering(pe_action_t *action, pe_working_set_t *data_set)
 {
     pe_resource_t *remote_rsc = NULL;
     enum action_tasks task = text2task(action->task);
     enum remote_connection_state state = get_remote_node_state(action->node);
 
     enum pe_ordering order_opts = pe_order_none;
 
     if (action->rsc == NULL) {
         return;
     }
 
     CRM_ASSERT(pe__is_guest_or_remote_node(action->node));
 
     remote_rsc = action->node->details->remote_rsc;
     CRM_ASSERT(remote_rsc != NULL);
 
     crm_trace("Order %s action %s relative to %s%s (state: %s)",
               action->task, action->uuid,
               pcmk_is_set(remote_rsc->flags, pe_rsc_failed)? "failed " : "",
               remote_rsc->id, state2text(state));
 
     if (pcmk__strcase_any_of(action->task, CRMD_ACTION_MIGRATE,
                              CRMD_ACTION_MIGRATED, NULL)) {
         /* Migration ops map to "no_action", but we need to apply the same
          * ordering as for stop or demote (see get_router_node()).
          */
         task = stop_rsc;
     }
 
     switch (task) {
         case start_rsc:
         case action_promote:
             order_opts = pe_order_none;
 
             if (state == remote_state_failed) {
                 /* Force recovery, by making this action required */
                 pe__set_order_flags(order_opts, pe_order_implies_then);
             }
 
             /* Ensure connection is up before running this action */
             order_start_then_action(remote_rsc, action, order_opts, data_set);
             break;
 
         case stop_rsc:
             if (state == remote_state_alive) {
                 order_action_then_stop(action, remote_rsc,
                                        pe_order_implies_first, data_set);
 
             } else if (state == remote_state_failed) {
                 /* The resource is active on the node, but since we don't have a
                  * valid connection, the only way to stop the resource is by
                  * fencing the node. There is no need to order the stop relative
                  * to the remote connection, since the stop will become implied
                  * by the fencing.
                  */
                 pe_fence_node(data_set, action->node,
                               "resources are active but connection is unrecoverable",
                               FALSE);
 
             } else if (remote_rsc->next_role == RSC_ROLE_STOPPED) {
                 /* State must be remote_state_unknown or remote_state_stopped.
                  * Since the connection is not coming back up in this
                  * transition, stop this resource first.
                  */
                 order_action_then_stop(action, remote_rsc,
                                        pe_order_implies_first, data_set);
 
             } else {
                 /* The connection is going to be started somewhere else, so
                  * stop this resource after that completes.
                  */
                 order_start_then_action(remote_rsc, action, pe_order_none,
                                         data_set);
             }
             break;
 
         case action_demote:
             /* Only order this demote relative to the connection start if the
              * connection isn't being torn down. Otherwise, the demote would be
              * blocked because the connection start would not be allowed.
              */
             if ((state == remote_state_resting)
                 || (state == remote_state_unknown)) {
 
                 order_start_then_action(remote_rsc, action, pe_order_none,
                                         data_set);
             } /* Otherwise we can rely on the stop ordering */
             break;
 
         default:
             /* Wait for the connection resource to be up */
             if (is_recurring_action(action)) {
                 /* In case we ever get the recovery logic wrong, force
                  * recurring monitors to be restarted, even if just
                  * the connection was re-established
                  */
                 order_start_then_action(remote_rsc, action,
                                         pe_order_implies_then, data_set);
 
             } else {
                 pe_node_t *cluster_node = pe__current_node(remote_rsc);
 
                 if ((task == monitor_rsc) && (state == remote_state_failed)) {
                     /* We would only be here if we do not know the state of the
                      * resource on the remote node. Since we have no way to find
                      * out, it is necessary to fence the node.
                      */
                     pe_fence_node(data_set, action->node,
                                   "resources are in unknown state "
                                   "and connection is unrecoverable", FALSE);
                 }
 
                 if ((cluster_node != NULL) && (state == remote_state_stopped)) {
                     /* The connection is currently up, but is going down
                      * permanently. Make sure we check services are actually
                      * stopped _before_ we let the connection get closed.
                      */
                     order_action_then_stop(action, remote_rsc,
                                            pe_order_runnable_left, data_set);
 
                 } else {
                     order_start_then_action(remote_rsc, action, pe_order_none,
                                             data_set);
                 }
             }
             break;
     }
 }
 
 static void
 apply_container_ordering(pe_action_t *action, pe_working_set_t *data_set)
 {
     /* VMs are also classified as containers for these purposes... in
      * that they both involve a 'thing' running on a real or remote
      * cluster node.
      *
      * This allows us to be smarter about the type and extent of
      * recovery actions required in various scenarios
      */
     pe_resource_t *remote_rsc = NULL;
     pe_resource_t *container = NULL;
     enum action_tasks task = text2task(action->task);
 
     CRM_ASSERT(action->rsc != NULL);
     CRM_ASSERT(action->node != NULL);
     CRM_ASSERT(pe__is_guest_or_remote_node(action->node));
 
     remote_rsc = action->node->details->remote_rsc;
     CRM_ASSERT(remote_rsc != NULL);
 
     container = remote_rsc->container;
     CRM_ASSERT(container != NULL);
 
     if (pcmk_is_set(container->flags, pe_rsc_failed)) {
         pe_fence_node(data_set, action->node, "container failed", FALSE);
     }
 
     crm_trace("Order %s action %s relative to %s%s for %s%s",
               action->task, action->uuid,
               pcmk_is_set(remote_rsc->flags, pe_rsc_failed)? "failed " : "",
               remote_rsc->id,
               pcmk_is_set(container->flags, pe_rsc_failed)? "failed " : "",
               container->id);
 
     if (pcmk__strcase_any_of(action->task, CRMD_ACTION_MIGRATE,
                              CRMD_ACTION_MIGRATED, NULL)) {
         /* Migration ops map to "no_action", but we need to apply the same
          * ordering as for stop or demote (see get_router_node()).
          */
         task = stop_rsc;
     }
 
     switch (task) {
         case start_rsc:
         case action_promote:
             // Force resource recovery if the container is recovered
             order_start_then_action(container, action, pe_order_implies_then,
                                     data_set);
 
             // Wait for the connection resource to be up, too
             order_start_then_action(remote_rsc, action, pe_order_none,
                                     data_set);
             break;
 
         case stop_rsc:
         case action_demote:
             if (pcmk_is_set(container->flags, pe_rsc_failed)) {
                 /* When the container representing a guest node fails, any stop
                  * or demote actions for resources running on the guest node
                  * are implied by the container stopping. This is similar to
                  * how fencing operations work for cluster nodes and remote
                  * nodes.
                  */
             } else {
                 /* Ensure the operation happens before the connection is brought
                  * down.
                  *
                  * If we really wanted to, we could order these after the
                  * connection start, IFF the container's current role was
                  * stopped (otherwise we re-introduce an ordering loop when the
                  * connection is restarting).
                  */
                 order_action_then_stop(action, remote_rsc, pe_order_none,
                                        data_set);
             }
             break;
 
         default:
             /* Wait for the connection resource to be up */
             if (is_recurring_action(action)) {
                 /* In case we ever get the recovery logic wrong, force
                  * recurring monitors to be restarted, even if just
                  * the connection was re-established
                  */
                 if(task != no_action) {
                     order_start_then_action(remote_rsc, action,
                                             pe_order_implies_then, data_set);
                 }
             } else {
                 order_start_then_action(remote_rsc, action, pe_order_none,
                                         data_set);
             }
             break;
     }
 }
 
 /*!
  * \internal
  * \brief Order all relevant actions relative to remote connection actions
  *
  * \param[in] data_set  Cluster working set
  */
 void
 pcmk__order_remote_connection_actions(pe_working_set_t *data_set)
 {
     if (!pcmk_is_set(data_set->flags, pe_flag_have_remote_nodes)) {
         return;
     }
 
     crm_trace("Creating remote connection orderings");
 
     for (GList *gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
         pe_action_t *action = (pe_action_t *) gIter->data;
         pe_resource_t *remote = NULL;
 
         // We are only interested in resource actions
         if (action->rsc == NULL) {
             continue;
         }
 
         /* Special case: If we are clearing the failcount of an actual
          * remote connection resource, then make sure this happens before
          * any start of the resource in this transition.
          */
         if (action->rsc->is_remote_node &&
             pcmk__str_eq(action->task, CRM_OP_CLEAR_FAILCOUNT, pcmk__str_casei)) {
 
             pcmk__new_ordering(action->rsc, NULL, action, action->rsc,
                                pcmk__op_key(action->rsc->id, RSC_START, 0),
                                NULL, pe_order_optional, data_set);
 
             continue;
         }
 
         // We are only interested in actions allocated to a node
         if (action->node == NULL) {
             continue;
         }
 
         if (!pe__is_guest_or_remote_node(action->node)) {
             continue;
         }
 
         /* We are only interested in real actions.
          *
          * @TODO This is probably wrong; pseudo-actions might be converted to
          * real actions and vice versa later in update_actions() at the end of
          * pcmk__apply_orderings().
          */
         if (pcmk_is_set(action->flags, pe_action_pseudo)) {
             continue;
         }
 
         remote = action->node->details->remote_rsc;
         if (remote == NULL) {
             // Orphaned
             continue;
         }
 
         /* Another special case: if a resource is moving to a Pacemaker Remote
          * node, order the stop on the original node after any start of the
          * remote connection. This ensures that if the connection fails to
          * start, we leave the resource running on the original node.
          */
         if (pcmk__str_eq(action->task, RSC_START, pcmk__str_casei)) {
             for (GList *item = action->rsc->actions; item != NULL;
                  item = item->next) {
                 pe_action_t *rsc_action = item->data;
 
                 if ((rsc_action->node->details != action->node->details)
                     && pcmk__str_eq(rsc_action->task, RSC_STOP, pcmk__str_casei)) {
                     pcmk__new_ordering(remote, start_key(remote), NULL,
                                        action->rsc, NULL, rsc_action,
                                        pe_order_optional, data_set);
                 }
             }
         }
 
         /* The action occurs across a remote connection, so create
          * ordering constraints that guarantee the action occurs while the node
          * is active (after start, before stop ... things like that).
          *
          * This is somewhat brittle in that we need to make sure the results of
          * this ordering are compatible with the result of get_router_node().
          * It would probably be better to add XML_LRM_ATTR_ROUTER_NODE as part
          * of this logic rather than create_graph_action().
          */
         if (remote->container) {
             crm_trace("Container ordering for %s", action->uuid);
             apply_container_ordering(action, data_set);
 
         } else {
             crm_trace("Remote ordering for %s", action->uuid);
             apply_remote_ordering(action, data_set);
         }
     }
 }
 
 /*!
  * \internal
  * \brief Check whether a node is a failed remote node
  *
  * \param[in] node  Node to check
  *
  * \return true if \p node is a failed remote node, false otherwise
  */
 bool
 pcmk__is_failed_remote_node(pe_node_t *node)
 {
     return pe__is_remote_node(node) && (node->details->remote_rsc != NULL)
            && (get_remote_node_state(node) == remote_state_failed);
 }
 
 /*!
  * \internal
  * \brief Check whether a given resource corresponds to a given node as guest
  *
  * \param[in] rsc   Resource to check
  * \param[in] node  Node to check
  *
  * \return true if \p node is a guest node and \p rsc is its containing
  *         resource, otherwise false
  */
 bool
 pcmk__rsc_corresponds_to_guest(pe_resource_t *rsc, pe_node_t *node)
 {
     return (rsc != NULL) && (rsc->fillers != NULL) && (node != NULL)
             && (node->details->remote_rsc != NULL)
             && (node->details->remote_rsc->container == rsc);
 }
 
 /*!
  * \internal
  * \brief Get proper connection host that a remote action must be routed through
  *
  * A remote connection resource might be starting, stopping, or migrating in the
  * same transition that an action needs to be executed on its Pacemaker Remote
  * node. Determine the proper node that the remote action should be routed
  * through.
  *
  * \param[in] action  (Potentially remote) action to route
  *
  * \return Connection host that action should be routed through if remote,
  *         otherwise NULL
  */
 pe_node_t *
 pcmk__connection_host_for_action(pe_action_t *action)
 {
     pe_node_t *began_on = NULL;
     pe_node_t *ended_on = NULL;
     bool partial_migration = false;
     const char *task = action->task;
 
     if (pcmk__str_eq(task, CRM_OP_FENCE, pcmk__str_casei)
         || !pe__is_guest_or_remote_node(action->node)) {
         return NULL;
     }
 
     CRM_ASSERT(action->node->details->remote_rsc != NULL);
 
     began_on = pe__current_node(action->node->details->remote_rsc);
     ended_on = action->node->details->remote_rsc->allocated_to;
     if (action->node->details->remote_rsc
         && (action->node->details->remote_rsc->container == NULL)
         && action->node->details->remote_rsc->partial_migration_target) {
         partial_migration = true;
     }
 
     if (began_on == NULL) {
         crm_trace("Routing %s for %s through remote connection's "
                   "next node %s (starting)%s",
                   action->task, (action->rsc? action->rsc->id : "no resource"),
                   (ended_on? ended_on->details->uname : "none"),
                   partial_migration? " (partial migration)" : "");
         return ended_on;
     }
 
     if (ended_on == NULL) {
         crm_trace("Routing %s for %s through remote connection's "
                   "current node %s (stopping)%s",
                   action->task, (action->rsc? action->rsc->id : "no resource"),
                   (began_on? began_on->details->uname : "none"),
                   partial_migration? " (partial migration)" : "");
         return began_on;
     }
 
     if (began_on->details == ended_on->details) {
         crm_trace("Routing %s for %s through remote connection's "
                   "current node %s (not moving)%s",
                   action->task, (action->rsc? action->rsc->id : "no resource"),
                   (began_on? began_on->details->uname : "none"),
                   partial_migration? " (partial migration)" : "");
         return began_on;
     }
 
     /* If we get here, the remote connection is moving during this transition.
      * This means some actions for resources behind the connection will get
      * routed through the cluster node the connection resource is currently on,
      * and others are routed through the cluster node the connection will end up
      * on.
      */
 
     if (pcmk__str_eq(task, "notify", pcmk__str_casei)) {
         task = g_hash_table_lookup(action->meta, "notify_operation");
     }
 
     /*
      * Stop, demote, and migration actions must occur before the connection can
      * move (these actions are required before the remote resource can stop). In
      * this case, we know these actions have to be routed through the initial
      * cluster node the connection resource lived on before the move takes
      * place.
      *
      * The exception is a partial migration of a (non-guest) remote connection
      * resource; in that case, all actions (even these) will be ordered after
      * the connection's pseudo-start on the migration target, so the target is
      * the router node.
      */
     if (pcmk__strcase_any_of(task, "cancel", "stop", "demote", "migrate_from",
                              "migrate_to", NULL) && !partial_migration) {
         crm_trace("Routing %s for %s through remote connection's "
                   "current node %s (moving)%s",
                   action->task, (action->rsc? action->rsc->id : "no resource"),
                   (began_on? began_on->details->uname : "none"),
                   partial_migration? " (partial migration)" : "");
         return began_on;
     }
 
     /* Everything else (start, promote, monitor, probe, refresh,
      * clear failcount, delete, ...) must occur after the connection starts on
      * the node it is moving to.
      */
     crm_trace("Routing %s for %s through remote connection's "
               "next node %s (moving)%s",
               action->task, (action->rsc? action->rsc->id : "no resource"),
               (ended_on? ended_on->details->uname : "none"),
               partial_migration? " (partial migration)" : "");
     return ended_on;
 }
 
 /*!
  * \internal
  * \brief Replace remote connection's addr="#uname" with actual address
  *
  * REMOTE_CONTAINER_HACK: If a given resource is a remote connection resource
  * with its "addr" parameter set to "#uname", pull the actual value from the
  * parameters evaluated without a node (which was put there earlier in
  * pcmk__create_graph() when the bundle's expand() method was called).
  *
  * \param[in] rsc       Resource to check
  * \param[in] params    Resource parameters evaluated per node
- * \param[in] data_set  Cluster working set
  */
 void
-pcmk__substitute_remote_addr(pe_resource_t *rsc, GHashTable *params,
-                             pe_working_set_t *data_set)
+pcmk__substitute_remote_addr(pe_resource_t *rsc, GHashTable *params)
 {
     const char *remote_addr = g_hash_table_lookup(params,
                                                   XML_RSC_ATTR_REMOTE_RA_ADDR);
 
     if (pcmk__str_eq(remote_addr, "#uname", pcmk__str_none)) {
-        GHashTable *base = pe_rsc_params(rsc, NULL, data_set);
+        GHashTable *base = pe_rsc_params(rsc, NULL, rsc->cluster);
 
         remote_addr = g_hash_table_lookup(base, XML_RSC_ATTR_REMOTE_RA_ADDR);
         if (remote_addr != NULL) {
             g_hash_table_insert(params, strdup(XML_RSC_ATTR_REMOTE_RA_ADDR),
                                 strdup(remote_addr));
         }
     }
 }
 
 /*!
  * \brief Add special bundle meta-attributes to XML
  *
  * If a given action will be executed on a guest node (including a bundle),
  * add the special bundle meta-attribute "container-attribute-target" and
  * environment variable "physical_host" as XML attributes (using meta-attribute
  * naming).
  *
  * \param[in] args_xml   XML to add attributes to
  * \param[in] action     Action to check
  */
 void
 pcmk__add_bundle_meta_to_xml(xmlNode *args_xml, pe_action_t *action)
 {
     pe_node_t *host = NULL;
     enum action_tasks task;
 
     if (!pe__is_guest_node(action->node)) {
         return;
     }
 
     task = text2task(action->task);
     if ((task == action_notify) || (task == action_notified)) {
         task = text2task(g_hash_table_lookup(action->meta, "notify_operation"));
     }
 
     switch (task) {
         case stop_rsc:
         case stopped_rsc:
         case action_demote:
         case action_demoted:
             // "Down" actions take place on guest's current host
             host = pe__current_node(action->node->details->remote_rsc->container);
             break;
 
         case start_rsc:
         case started_rsc:
         case monitor_rsc:
         case action_promote:
         case action_promoted:
             // "Up" actions take place on guest's next host
             host = action->node->details->remote_rsc->container->allocated_to;
             break;
 
         default:
             break;
     }
 
     if (host != NULL) {
         hash2metafield((gpointer) XML_RSC_ATTR_TARGET,
                        (gpointer) g_hash_table_lookup(action->rsc->meta,
                                                       XML_RSC_ATTR_TARGET),
                        (gpointer) args_xml);
         hash2metafield((gpointer) PCMK__ENV_PHYSICAL_HOST,
                        (gpointer) host->details->uname,
                        (gpointer) args_xml);
     }
 }
diff --git a/lib/pacemaker/pcmk_sched_resource.c b/lib/pacemaker/pcmk_sched_resource.c
index 50b1c47782..3cf19806ad 100644
--- a/lib/pacemaker/pcmk_sched_resource.c
+++ b/lib/pacemaker/pcmk_sched_resource.c
@@ -1,705 +1,1091 @@
 /*
  * Copyright 2014-2022 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
+
+#include <stdlib.h>
+#include <string.h>
 #include <crm/msg_xml.h>
 #include <pacemaker-internal.h>
 
 #include "libpacemaker_private.h"
 
 // Resource allocation methods that vary by resource variant
 static resource_alloc_functions_t allocation_methods[] = {
     {
         pcmk__native_merge_weights,
         pcmk__native_allocate,
         native_create_actions,
         native_create_probe,
         native_internal_constraints,
         native_rsc_colocation_lh,
         native_rsc_colocation_rh,
         pcmk__colocated_resources,
         native_rsc_location,
         native_action_flags,
         native_update_actions,
         pcmk__output_resource_actions,
         native_expand,
         native_append_meta,
         pcmk__primitive_add_utilization,
         pcmk__primitive_shutdown_lock,
     },
     {
         pcmk__group_merge_weights,
         pcmk__group_allocate,
         group_create_actions,
         native_create_probe,
         group_internal_constraints,
         group_rsc_colocation_lh,
         group_rsc_colocation_rh,
         pcmk__group_colocated_resources,
         group_rsc_location,
         group_action_flags,
         group_update_actions,
         pcmk__output_resource_actions,
         group_expand,
         group_append_meta,
         pcmk__group_add_utilization,
         pcmk__group_shutdown_lock,
     },
     {
         pcmk__native_merge_weights,
         pcmk__clone_allocate,
         clone_create_actions,
         clone_create_probe,
         clone_internal_constraints,
         clone_rsc_colocation_lh,
         clone_rsc_colocation_rh,
         pcmk__colocated_resources,
         clone_rsc_location,
         clone_action_flags,
         pcmk__multi_update_actions,
         pcmk__output_resource_actions,
         clone_expand,
         clone_append_meta,
         pcmk__clone_add_utilization,
         pcmk__clone_shutdown_lock,
     },
     {
         pcmk__native_merge_weights,
         pcmk__bundle_allocate,
         pcmk__bundle_create_actions,
         pcmk__bundle_create_probe,
         pcmk__bundle_internal_constraints,
         pcmk__bundle_rsc_colocation_lh,
         pcmk__bundle_rsc_colocation_rh,
         pcmk__colocated_resources,
         pcmk__bundle_rsc_location,
         pcmk__bundle_action_flags,
         pcmk__multi_update_actions,
         pcmk__output_bundle_actions,
         pcmk__bundle_expand,
         pcmk__bundle_append_meta,
         pcmk__bundle_add_utilization,
         pcmk__bundle_shutdown_lock,
     }
 };
 
 /*!
  * \internal
  * \brief Check whether a resource's agent standard, provider, or type changed
  *
  * \param[in] rsc             Resource to check
  * \param[in] node            Node needing unfencing/restart if agent changed
  * \param[in] rsc_entry       XML with previously known agent information
  * \param[in] active_on_node  Whether \p rsc is active on \p node
  *
  * \return true if agent for \p rsc changed, otherwise false
  */
 bool
 pcmk__rsc_agent_changed(pe_resource_t *rsc, pe_node_t *node,
                         const xmlNode *rsc_entry, bool active_on_node)
 {
     bool changed = false;
     const char *attr_list[] = {
         XML_ATTR_TYPE,
         XML_AGENT_ATTR_CLASS,
         XML_AGENT_ATTR_PROVIDER
     };
 
     for (int i = 0; i < PCMK__NELEM(attr_list); i++) {
         const char *value = crm_element_value(rsc->xml, attr_list[i]);
         const char *old_value = crm_element_value(rsc_entry, attr_list[i]);
 
         if (!pcmk__str_eq(value, old_value, pcmk__str_none)) {
             changed = true;
             trigger_unfencing(rsc, node, "Device definition changed", NULL,
                               rsc->cluster);
             if (active_on_node) {
                 crm_notice("Forcing restart of %s on %s "
                            "because %s changed from '%s' to '%s'",
                            rsc->id, node->details->uname, attr_list[i],
                            pcmk__s(old_value, ""), pcmk__s(value, ""));
             }
         }
     }
     if (changed && active_on_node) {
         // Make sure the resource is restarted
         custom_action(rsc, stop_key(rsc), CRMD_ACTION_STOP, node, FALSE, TRUE,
                       rsc->cluster);
         pe__set_resource_flags(rsc, pe_rsc_start_pending);
     }
     return changed;
 }
 
 /*!
  * \internal
  * \brief Add resource (and any matching children) to list if it matches ID
  *
  * \param[in] result  List to add resource to
  * \param[in] rsc     Resource to check
  * \param[in] id      ID to match
  *
  * \return (Possibly new) head of list
  */
 static GList *
 add_rsc_if_matching(GList *result, pe_resource_t *rsc, const char *id)
 {
     if ((strcmp(rsc->id, id) == 0)
         || ((rsc->clone_name != NULL) && (strcmp(rsc->clone_name, id) == 0))) {
         result = g_list_prepend(result, rsc);
     }
     for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
         pe_resource_t *child = (pe_resource_t *) iter->data;
 
         result = add_rsc_if_matching(result, child, id);
     }
     return result;
 }
 
 /*!
  * \internal
  * \brief Find all resources matching a given ID by either ID or clone name
  *
  * \param[in] id        Resource ID to check
  * \param[in] data_set  Cluster working set
  *
  * \return List of all resources that match \p id
  * \note The caller is responsible for freeing the return value with
  *       g_list_free().
  */
 GList *
 pcmk__rscs_matching_id(const char *id, pe_working_set_t *data_set)
 {
     GList *result = NULL;
 
     CRM_CHECK((id != NULL) && (data_set != NULL), return NULL);
     for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) {
         result = add_rsc_if_matching(result, (pe_resource_t *) iter->data, id);
     }
     return result;
 }
 
 /*!
  * \internal
  * \brief Set the variant-appropriate allocation methods for a resource
  *
  * \param[in] rsc      Resource to set allocation methods for
  * \param[in] ignored  Only here so function can be used with g_list_foreach()
  */
 static void
 set_allocation_methods_for_rsc(pe_resource_t *rsc, void *ignored)
 {
     rsc->cmds = &allocation_methods[rsc->variant];
     g_list_foreach(rsc->children, (GFunc) set_allocation_methods_for_rsc, NULL);
 }
 
 /*!
  * \internal
  * \brief Set the variant-appropriate allocation methods for all resources
  *
  * \param[in] data_set  Cluster working set
  */
 void
 pcmk__set_allocation_methods(pe_working_set_t *data_set)
 {
     g_list_foreach(data_set->resources, (GFunc) set_allocation_methods_for_rsc,
                    NULL);
 }
 
 // Shared implementation of resource_alloc_functions_t:colocated_resources()
 GList *
 pcmk__colocated_resources(pe_resource_t *rsc, pe_resource_t *orig_rsc,
                           GList *colocated_rscs)
 {
     GList *gIter = NULL;
 
     if (orig_rsc == NULL) {
         orig_rsc = rsc;
     }
 
     if ((rsc == NULL) || (g_list_find(colocated_rscs, rsc) != NULL)) {
         return colocated_rscs;
     }
 
     pe_rsc_trace(orig_rsc, "%s is in colocation chain with %s",
                  rsc->id, orig_rsc->id);
     colocated_rscs = g_list_append(colocated_rscs, rsc);
 
     // Follow colocations where this resource is the dependent resource
     for (gIter = rsc->rsc_cons; gIter != NULL; gIter = gIter->next) {
         pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data;
         pe_resource_t *primary = constraint->primary;
 
         if (primary == orig_rsc) {
             continue; // Break colocation loop
         }
 
         if ((constraint->score == INFINITY) &&
             (pcmk__colocation_affects(rsc, primary, constraint,
                                       true) == pcmk__coloc_affects_location)) {
 
             colocated_rscs = primary->cmds->colocated_resources(primary,
                                                                 orig_rsc,
                                                                 colocated_rscs);
         }
     }
 
     // Follow colocations where this resource is the primary resource
     for (gIter = rsc->rsc_cons_lhs; gIter != NULL; gIter = gIter->next) {
         pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data;
         pe_resource_t *dependent = constraint->dependent;
 
         if (dependent == orig_rsc) {
             continue; // Break colocation loop
         }
 
         if (pe_rsc_is_clone(rsc) && !pe_rsc_is_clone(dependent)) {
             continue; // We can't be sure whether dependent will be colocated
         }
 
         if ((constraint->score == INFINITY) &&
             (pcmk__colocation_affects(dependent, rsc, constraint,
                                       true) == pcmk__coloc_affects_location)) {
 
             colocated_rscs = dependent->cmds->colocated_resources(dependent,
                                                                   orig_rsc,
                                                                   colocated_rscs);
         }
     }
 
     return colocated_rscs;
 }
 
 void
 pcmk__output_resource_actions(pe_resource_t *rsc)
 {
     pcmk__output_t *out = rsc->cluster->priv;
 
     pe_node_t *next = NULL;
     pe_node_t *current = NULL;
 
     if (rsc->children != NULL) {
         for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
             pe_resource_t *child = (pe_resource_t *) iter->data;
 
             child->cmds->output_actions(child);
         }
         return;
     }
 
     next = rsc->allocated_to;
     if (rsc->running_on) {
         current = pe__current_node(rsc);
         if (rsc->role == RSC_ROLE_STOPPED) {
             /*
              * This can occur when resources are being recovered
              * We fiddle with the current role in native_create_actions()
              */
             rsc->role = RSC_ROLE_STARTED;
         }
     }
 
     if ((current == NULL) && pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
         /* Don't log stopped orphans */
         return;
     }
 
     out->message(out, "rsc-action", rsc, current, next);
 }
 
 /*!
  * \internal
  * \brief Assign a specified primitive resource to a node
  *
  * Assign a specified primitive resource to a specified node, if the node can
  * run the resource (or unconditionally, if \p force is true). Mark the resource
  * as no longer provisional. If the primitive can't be assigned (or \p chosen is
  * NULL), unassign any previous assignment for it, set its next role to stopped,
  * and update any existing actions scheduled for it. This is not done
  * recursively for children, so it should be called only for primitives.
  *
  * \param[in] rsc     Resource to assign
  * \param[in] chosen  Node to assign \p rsc to
  * \param[in] force   If true, assign to \p chosen even if unavailable
  *
  * \return true if \p rsc could be assigned, otherwise false
  *
  * \note Assigning a resource to the NULL node using this function is different
  *       from calling pcmk__unassign_resource(), in that it will also update any
  *       actions created for the resource.
  */
 bool
 pcmk__assign_primitive(pe_resource_t *rsc, pe_node_t *chosen, bool force)
 {
     pcmk__output_t *out = rsc->cluster->priv;
 
     CRM_ASSERT(rsc->variant == pe_native);
 
     if (!force && (chosen != NULL)) {
         if ((chosen->weight < 0)
             // Allow the graph to assume that guest node connections will come up
             || (!pcmk__node_available(chosen) && !pe__is_guest_node(chosen))) {
 
             crm_debug("All nodes for resource %s are unavailable, unclean or "
                       "shutting down (%s can%s run resources, with weight %d)",
                       rsc->id, chosen->details->uname,
                       (pcmk__node_available(chosen)? "" : "not"),
                       chosen->weight);
             pe__set_next_role(rsc, RSC_ROLE_STOPPED, "node availability");
             chosen = NULL;
         }
     }
 
     pcmk__unassign_resource(rsc);
     pe__clear_resource_flags(rsc, pe_rsc_provisional);
 
     if (chosen == NULL) {
         crm_debug("Could not allocate a node for %s", rsc->id);
         pe__set_next_role(rsc, RSC_ROLE_STOPPED, "unable to allocate");
 
         for (GList *iter = rsc->actions; iter != NULL; iter = iter->next) {
             pe_action_t *op = (pe_action_t *) iter->data;
 
             crm_debug("Updating %s for allocation failure", op->uuid);
 
             if (pcmk__str_eq(op->task, RSC_STOP, pcmk__str_casei)) {
                 pe__clear_action_flags(op, pe_action_optional);
 
             } else if (pcmk__str_eq(op->task, RSC_START, pcmk__str_casei)) {
                 pe__clear_action_flags(op, pe_action_runnable);
                 //pe__set_resource_flags(rsc, pe_rsc_block);
 
             } else {
                 // Cancel recurring actions, unless for stopped state
                 const char *interval_ms_s = NULL;
                 const char *target_rc_s = NULL;
                 char *rc_stopped = pcmk__itoa(PCMK_OCF_NOT_RUNNING);
 
                 interval_ms_s = g_hash_table_lookup(op->meta,
                                                     XML_LRM_ATTR_INTERVAL_MS);
                 target_rc_s = g_hash_table_lookup(op->meta,
                                                   XML_ATTR_TE_TARGET_RC);
                 if ((interval_ms_s != NULL)
                     && !pcmk__str_eq(interval_ms_s, "0", pcmk__str_none)
                     && !pcmk__str_eq(rc_stopped, target_rc_s, pcmk__str_none)) {
                     pe__clear_action_flags(op, pe_action_runnable);
                 }
                 free(rc_stopped);
             }
         }
         return false;
     }
 
     crm_debug("Assigning %s to %s", rsc->id, chosen->details->uname);
     rsc->allocated_to = pe__copy_node(chosen);
 
     chosen->details->allocated_rsc = g_list_prepend(chosen->details->allocated_rsc,
                                                     rsc);
     chosen->details->num_resources++;
     chosen->count++;
     pcmk__consume_node_capacity(chosen->details->utilization, rsc);
 
     if (pcmk_is_set(rsc->cluster->flags, pe_flag_show_utilization)) {
         out->message(out, "resource-util", rsc, chosen, __func__);
     }
     return true;
 }
 
 /*!
  * \internal
  * \brief Assign a specified resource (of any variant) to a node
  *
  * Assign a specified resource and its children (if any) to a specified node, if
  * the node can run the resource (or unconditionally, if \p force is true). Mark
  * the resources as no longer provisional. If the resources can't be assigned
  * (or \p chosen is NULL), unassign any previous assignments, set next role to
  * stopped, and update any existing actions scheduled for them.
  *
  * \param[in] rsc     Resource to assign
  * \param[in] chosen  Node to assign \p rsc to
  * \param[in] force   If true, assign to \p chosen even if unavailable
  *
  * \return true if \p rsc could be assigned, otherwise false
  *
  * \note Assigning a resource to the NULL node using this function is different
  *       from calling pcmk__unassign_resource(), in that it will also update any
  *       actions created for the resource.
  */
 bool
 pcmk__assign_resource(pe_resource_t *rsc, pe_node_t *node, bool force)
 {
     bool changed = false;
 
     if (rsc->children == NULL) {
         if (rsc->allocated_to != NULL) {
             changed = true;
         }
         pcmk__assign_primitive(rsc, node, force);
 
     } else {
         for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
             pe_resource_t *child_rsc = (pe_resource_t *) iter->data;
 
             changed |= pcmk__assign_resource(child_rsc, node, force);
         }
     }
     return changed;
 }
 
 /*!
  * \internal
  * \brief Remove any assignment of a specified resource to a node
  *
  * If a specified resource has been assigned to a node, remove that assignment
  * and mark the resource as provisional again. This is not done recursively for
  * children, so it should be called only for primitives.
  *
  * \param[in] rsc  Resource to unassign
  */
 void
 pcmk__unassign_resource(pe_resource_t *rsc)
 {
     pe_node_t *old = rsc->allocated_to;
 
     if (old == NULL) {
         return;
     }
 
     crm_info("Unassigning %s from %s", rsc->id, old->details->uname);
     pe__set_resource_flags(rsc, pe_rsc_provisional);
     rsc->allocated_to = NULL;
 
     /* We're going to free the pe_node_t, but its details member is shared and
      * will remain, so update that appropriately first.
      */
     old->details->allocated_rsc = g_list_remove(old->details->allocated_rsc,
                                                 rsc);
     old->details->num_resources--;
     pcmk__release_node_capacity(old->details->utilization, rsc);
     free(old);
 }
 
 /*!
  * \internal
  * \brief Check whether a resource has reached its migration threshold on a node
  *
  * \param[in]  rsc       Resource to check
  * \param[in]  node      Node to check
  * \param[out] failed    If the threshold has been reached, this will be set to
  *                       the resource that failed (possibly a parent of \p rsc)
  *
  * \return true if the migration threshold has been reached, false otherwise
  */
 bool
 pcmk__threshold_reached(pe_resource_t *rsc, pe_node_t *node,
                         pe_resource_t **failed)
 {
     int fail_count, remaining_tries;
     pe_resource_t *rsc_to_ban = rsc;
 
     // Migration threshold of 0 means never force away
     if (rsc->migration_threshold == 0) {
         return false;
     }
 
     // If we're ignoring failures, also ignore the migration threshold
     if (pcmk_is_set(rsc->flags, pe_rsc_failure_ignored)) {
         return false;
     }
 
     // If there are no failures, there's no need to force away
     fail_count = pe_get_failcount(node, rsc, NULL,
                                   pe_fc_effective|pe_fc_fillers, NULL,
                                   rsc->cluster);
     if (fail_count <= 0) {
         return false;
     }
 
     // If failed resource is anonymous clone instance, we'll force clone away
     if (!pcmk_is_set(rsc->flags, pe_rsc_unique)) {
         rsc_to_ban = uber_parent(rsc);
     }
 
     // How many more times recovery will be tried on this node
     remaining_tries = rsc->migration_threshold - fail_count;
 
     if (remaining_tries <= 0) {
         crm_warn("%s cannot run on %s due to reaching migration threshold "
                  "(clean up resource to allow again)"
                  CRM_XS " failures=%d migration-threshold=%d",
                  rsc_to_ban->id, node->details->uname, fail_count,
                  rsc->migration_threshold);
         if (failed != NULL) {
             *failed = rsc_to_ban;
         }
         return true;
     }
 
     crm_info("%s can fail %d more time%s on "
              "%s before reaching migration threshold (%d)",
              rsc_to_ban->id, remaining_tries, pcmk__plural_s(remaining_tries),
              node->details->uname, rsc->migration_threshold);
     return false;
 }
 
 static void *
 convert_const_pointer(const void *ptr)
 {
     /* Worst function ever */
     return (void *)ptr;
 }
 
 /*!
  * \internal
  * \brief Get a node's weight
  *
  * \param[in] node     Unweighted node to check (for node ID)
  * \param[in] nodes    List of weighted nodes to look for \p node in
  *
  * \return Node's weight, or -INFINITY if not found
  */
 static int
 get_node_weight(pe_node_t *node, GHashTable *nodes)
 {
     pe_node_t *weighted_node = NULL;
 
     if ((node != NULL) && (nodes != NULL)) {
         weighted_node = g_hash_table_lookup(nodes, node->details->id);
     }
     return (weighted_node == NULL)? -INFINITY : weighted_node->weight;
 }
 
 /*!
  * \internal
  * \brief Compare two resources according to which should be allocated first
  *
  * \param[in] a     First resource to compare
  * \param[in] b     Second resource to compare
  * \param[in] data  Sorted list of all nodes in cluster
  *
  * \return -1 if \p a should be allocated before \b, 0 if they are equal,
  *         or +1 if \p a should be allocated after \b
  */
 static gint
 cmp_resources(gconstpointer a, gconstpointer b, gpointer data)
 {
     const pe_resource_t *resource1 = a;
     const pe_resource_t *resource2 = b;
     GList *nodes = (GList *) data;
 
     int rc = 0;
     int r1_weight = -INFINITY;
     int r2_weight = -INFINITY;
     pe_node_t *r1_node = NULL;
     pe_node_t *r2_node = NULL;
     GHashTable *r1_nodes = NULL;
     GHashTable *r2_nodes = NULL;
     const char *reason = NULL;
 
     // Resources with highest priority should be allocated first
     reason = "priority";
     r1_weight = resource1->priority;
     r2_weight = resource2->priority;
     if (r1_weight > r2_weight) {
         rc = -1;
         goto done;
     }
     if (r1_weight < r2_weight) {
         rc = 1;
         goto done;
     }
 
     // We need nodes to make any other useful comparisons
     reason = "no node list";
     if (nodes == NULL) {
         goto done;
     }
 
     // Calculate and log node weights
     r1_nodes = pcmk__native_merge_weights(convert_const_pointer(resource1),
                                           resource1->id, NULL, NULL, 1,
                                           pe_weights_forward | pe_weights_init);
     r2_nodes = pcmk__native_merge_weights(convert_const_pointer(resource2),
                                           resource2->id, NULL, NULL, 1,
                                           pe_weights_forward | pe_weights_init);
     pe__show_node_weights(true, NULL, resource1->id, r1_nodes,
                           resource1->cluster);
     pe__show_node_weights(true, NULL, resource2->id, r2_nodes,
                           resource2->cluster);
 
     // The resource with highest score on its current node goes first
     reason = "current location";
     if (resource1->running_on != NULL) {
         r1_node = pe__current_node(resource1);
     }
     if (resource2->running_on != NULL) {
         r2_node = pe__current_node(resource2);
     }
     r1_weight = get_node_weight(r1_node, r1_nodes);
     r2_weight = get_node_weight(r2_node, r2_nodes);
     if (r1_weight > r2_weight) {
         rc = -1;
         goto done;
     }
     if (r1_weight < r2_weight) {
         rc = 1;
         goto done;
     }
 
     // Otherwise a higher weight on any node will do
     reason = "score";
     for (GList *iter = nodes; iter != NULL; iter = iter->next) {
         pe_node_t *node = (pe_node_t *) iter->data;
 
         r1_weight = get_node_weight(node, r1_nodes);
         r2_weight = get_node_weight(node, r2_nodes);
         if (r1_weight > r2_weight) {
             rc = -1;
             goto done;
         }
         if (r1_weight < r2_weight) {
             rc = 1;
             goto done;
         }
     }
 
 done:
     crm_trace("%s (%d)%s%s %c %s (%d)%s%s: %s",
               resource1->id, r1_weight,
               ((r1_node == NULL)? "" : " on "),
               ((r1_node == NULL)? "" : r1_node->details->id),
               ((rc < 0)? '>' : ((rc > 0)? '<' : '=')),
               resource2->id, r2_weight,
               ((r2_node == NULL)? "" : " on "),
               ((r2_node == NULL)? "" : r2_node->details->id),
               reason);
     if (r1_nodes != NULL) {
         g_hash_table_destroy(r1_nodes);
     }
     if (r2_nodes != NULL) {
         g_hash_table_destroy(r2_nodes);
     }
     return rc;
 }
 
 /*!
  * \internal
  * \brief Sort resources in the order they should be allocated to nodes
  *
  * \param[in] data_set  Cluster working set
  */
 void
 pcmk__sort_resources(pe_working_set_t *data_set)
 {
     GList *nodes = g_list_copy(data_set->nodes);
 
     nodes = pcmk__sort_nodes(nodes, NULL, data_set);
     data_set->resources = g_list_sort_with_data(data_set->resources,
                                                 cmp_resources, nodes);
     g_list_free(nodes);
 }
+
+/*!
+ * \internal
+ * \brief Create a hash table with a single node in it
+ *
+ * \param[in] node  Node to copy into new table
+ *
+ * \return Newly created hash table containing a copy of \p node
+ * \note The caller is responsible for freeing the result with
+ *       g_hash_table_destroy().
+ */
+static GHashTable *
+new_node_table(pe_node_t *node)
+{
+    GHashTable *table = pcmk__strkey_table(NULL, free);
+
+    node = pe__copy_node(node);
+    g_hash_table_insert(table, (gpointer) node->details->id, node);
+    return table;
+}
+
+/*!
+ * \internal
+ * \brief Apply a resource's parent's colocation scores to a node table
+ *
+ * \param[in]     rsc    Resource whose colocations should be applied
+ * \param[in,out] nodes  Node table to apply colocations to
+ */
+static void
+apply_parent_colocations(const pe_resource_t *rsc, GHashTable **nodes)
+{
+    GList *iter = NULL;
+    pcmk__colocation_t *colocation = NULL;
+
+    for (iter = rsc->parent->rsc_cons; iter != NULL; iter = iter->next) {
+        colocation = (pcmk__colocation_t *) iter->data;
+        *nodes = pcmk__native_merge_weights(colocation->primary, rsc->id,
+                                            *nodes, colocation->node_attribute,
+                                            colocation->score / (float) INFINITY,
+                                            0);
+    }
+    for (iter = rsc->parent->rsc_cons_lhs; iter != NULL; iter = iter->next) {
+        colocation = (pcmk__colocation_t *) iter->data;
+        if (!pcmk__colocation_has_influence(colocation, rsc)) {
+            continue;
+        }
+        *nodes = pcmk__native_merge_weights(colocation->dependent, rsc->id,
+                                            *nodes, colocation->node_attribute,
+                                            colocation->score / (float) INFINITY,
+                                            pe_weights_positive);
+    }
+}
+
+/*!
+ * \internal
+ * \brief Compare clone or bundle instances based on colocation scores
+ *
+ * Determine the relative order in which two clone or bundle instances should be
+ * assigned to nodes, considering the scores of colocation constraints directly
+ * or indirectly involving them.
+ *
+ * \param[in] instance1  First instance to compare
+ * \param[in] instance2  Second instance to compare
+ *
+ * \return A negative number if \p instance1 should be assigned first,
+ *         a positive number if \p instance2 should be assigned first,
+ *         or 0 if assignment order doesn't matter
+ */
+static int
+cmp_instance_by_colocation(const pe_resource_t *instance1,
+                           const pe_resource_t *instance2)
+{
+    int rc = 0;
+    pe_node_t *node1 = NULL;
+    pe_node_t *node2 = NULL;
+    pe_node_t *current_node1 = pe__current_node(instance1);
+    pe_node_t *current_node2 = pe__current_node(instance2);
+    GHashTable *colocated_scores1 = NULL;
+    GHashTable *colocated_scores2 = NULL;
+
+    CRM_ASSERT((instance1 != NULL) && (instance1->parent != NULL)
+               && (instance2 != NULL) && (instance2->parent != NULL)
+               && (current_node1 != NULL) && (current_node2 != NULL));
+
+    // Create node tables initialized with each node
+    colocated_scores1 = new_node_table(current_node1);
+    colocated_scores2 = new_node_table(current_node2);
+
+    // Apply parental colocations
+    apply_parent_colocations(instance1, &colocated_scores1);
+    apply_parent_colocations(instance2, &colocated_scores2);
+
+    // Find original nodes again, with scores updated for colocations
+    node1 = g_hash_table_lookup(colocated_scores1, current_node1->details->id);
+    node2 = g_hash_table_lookup(colocated_scores2, current_node2->details->id);
+
+    // Compare nodes by updated scores
+    if (node1->weight < node2->weight) {
+        crm_trace("Assign %s (%d on %s) after %s (%d on %s)",
+                  instance1->id, node1->weight, node1->details->uname,
+                  instance2->id, node2->weight, node2->details->uname);
+        rc = 1;
+
+    } else if (node1->weight > node2->weight) {
+        crm_trace("Assign %s (%d on %s) before %s (%d on %s)",
+                  instance1->id, node1->weight, node1->details->uname,
+                  instance2->id, node2->weight, node2->details->uname);
+        rc = -1;
+    }
+
+    g_hash_table_destroy(colocated_scores1);
+    g_hash_table_destroy(colocated_scores2);
+    return rc;
+}
+
+/*!
+ * \internal
+ * \brief Check whether a resource or any of its children are failed
+ *
+ * \param[in] rsc  Resource to check
+ *
+ * \return true if \p rsc or any of its children are failed, otherwise false
+ */
+static bool
+did_fail(const pe_resource_t * rsc)
+{
+    if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
+        return true;
+    }
+    for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
+        if (did_fail((pe_resource_t *) iter->data)) {
+            return true;
+        }
+    }
+    return false;
+}
+
+/*!
+ * \internal
+ * \brief Check whether a node is allowed to run a resource
+ *
+ * \param[in]     rsc   Resource to check
+ * \param[in,out] node  Node to check (will be set NULL if not allowed)
+ *
+ * \return true if *node is either NULL or allowed for \p rsc, otherwise false
+ */
+static bool
+node_is_allowed(const pe_resource_t *rsc, pe_node_t **node)
+{
+    if (*node != NULL) {
+        pe_node_t *allowed = pe_hash_table_lookup(rsc->allowed_nodes,
+                                                  (*node)->details->id);
+        if ((allowed == NULL) || (allowed->weight < 0)) {
+            pe_rsc_trace(rsc, "%s: current location (%s) is unavailable",
+                         rsc->id, (*node)->details->uname);
+            *node = NULL;
+            return false;
+        }
+    }
+    return true;
+}
+
+/*!
+ * \internal
+ * \brief Compare two clone or bundle instances' instance numbers
+ *
+ * \param[in] a  First instance to compare
+ * \param[in] b  Second instance to compare
+ *
+ * \return A negative number if \p a's instance number is lower,
+ *         a positive number if \p b's instance number is lower,
+ *         or 0 if their instance numbers are the same
+ */
+gint
+pcmk__cmp_instance_number(gconstpointer a, gconstpointer b)
+{
+    const pe_resource_t *instance1 = (const pe_resource_t *) a;
+    const pe_resource_t *instance2 = (const pe_resource_t *) b;
+    char *div1 = NULL;
+    char *div2 = NULL;
+
+    CRM_ASSERT((instance1 != NULL) && (instance2 != NULL));
+
+    // Clone numbers are after a colon, bundle numbers after a dash
+    div1 = strrchr(instance1->id, ':');
+    if (div1 == NULL) {
+        div1 = strrchr(instance1->id, '-');
+    }
+    div2 = strrchr(instance2->id, ':');
+    if (div2 == NULL) {
+        div2 = strrchr(instance2->id, '-');
+    }
+    CRM_ASSERT((div1 != NULL) && (div2 != NULL));
+
+    return (gint) (strtol(div1 + 1, NULL, 10) - strtol(div2 + 1, NULL, 10));
+}
+
+/*!
+ * \internal
+ * \brief Compare clone or bundle instances according to assignment order
+ *
+ * Compare two clone or bundle instances according to the order they should be
+ * assigned to nodes, preferring (in order):
+ *
+ *  - Active instance that is less multiply active
+ *  - Instance that is not active on a disallowed node
+ *  - Instance with higher configured priority
+ *  - Active instance whose current node can run resources
+ *  - Active instance whose parent is allowed on current node
+ *  - Active instance whose current node has fewer other instances
+ *  - Active instance
+ *  - Failed instance
+ *  - Instance whose colocations result in higher score on current node
+ *  - Instance with lower ID in lexicographic order
+ *
+ * \param[in] a          First instance to compare
+ * \param[in] b          Second instance to compare
+ *
+ * \return A negative number if \p a should be assigned first,
+ *         a positive number if \p b should be assigned first,
+ *         or 0 if assignment order doesn't matter
+ */
+gint
+pcmk__cmp_instance(gconstpointer a, gconstpointer b)
+{
+    int rc = 0;
+    pe_node_t *node1 = NULL;
+    pe_node_t *node2 = NULL;
+    unsigned int nnodes1 = 0;
+    unsigned int nnodes2 = 0;
+
+    bool can1 = true;
+    bool can2 = true;
+
+    const pe_resource_t *instance1 = (const pe_resource_t *) a;
+    const pe_resource_t *instance2 = (const pe_resource_t *) b;
+
+    CRM_ASSERT((instance1 != NULL) && (instance2 != NULL));
+
+    node1 = pe__find_active_on(instance1, &nnodes1, NULL);
+    node2 = pe__find_active_on(instance2, &nnodes2, NULL);
+
+    /* If both instances are running and at least one is multiply
+     * active, prefer instance that's running on fewer nodes.
+     */
+    if ((nnodes1 > 0) && (nnodes2 > 0)) {
+        if (nnodes1 < nnodes2) {
+            crm_trace("Assign %s (active on %d) before %s (active on %d): "
+                      "less multiply active",
+                      instance1->id, nnodes1, instance2->id, nnodes2);
+            return -1;
+
+        } else if (nnodes1 > nnodes2) {
+            crm_trace("Assign %s (active on %d) after %s (active on %d): "
+                      "more multiply active",
+                      instance1->id, nnodes1, instance2->id, nnodes2);
+            return 1;
+        }
+    }
+
+    /* An instance that is either inactive or active on an allowed node is
+     * preferred over an instance that is active on a no-longer-allowed node.
+     */
+    can1 = node_is_allowed(instance1, &node1);
+    can2 = node_is_allowed(instance2, &node2);
+    if (can1 && !can2) {
+        crm_trace("Assign %s before %s: not active on a disallowed node",
+                  instance1->id, instance2->id);
+        return -1;
+
+    } else if (!can1 && can2) {
+        crm_trace("Assign %s after %s: active on a disallowed node",
+                  instance1->id, instance2->id);
+        return 1;
+    }
+
+    // Prefer instance with higher configured priority
+    if (instance1->priority > instance2->priority) {
+        crm_trace("Assign %s before %s: priority (%d > %d)",
+                  instance1->id, instance2->id,
+                  instance1->priority, instance2->priority);
+        return -1;
+
+    } else if (instance1->priority < instance2->priority) {
+        crm_trace("Assign %s after %s: priority (%d < %d)",
+                  instance1->id, instance2->id,
+                  instance1->priority, instance2->priority);
+        return 1;
+    }
+
+    // Prefer active instance
+    if ((node1 == NULL) && (node2 == NULL)) {
+        crm_trace("No assignment preference for %s vs. %s: inactive",
+                  instance1->id, instance2->id);
+        return 0;
+
+    } else if (node1 == NULL) {
+        crm_trace("Assign %s after %s: active", instance1->id, instance2->id);
+        return 1;
+
+    } else if (node2 == NULL) {
+        crm_trace("Assign %s before %s: active", instance1->id, instance2->id);
+        return -1;
+    }
+
+    // Prefer instance whose current node can run resources
+    can1 = pcmk__node_available(node1);
+    can2 = pcmk__node_available(node2);
+    if (can1 && !can2) {
+        crm_trace("Assign %s before %s: current node can run resources",
+                  instance1->id, instance2->id);
+        return -1;
+
+    } else if (!can1 && can2) {
+        crm_trace("Assign %s after %s: current node can't run resources",
+                  instance1->id, instance2->id);
+        return 1;
+    }
+
+    // Prefer instance whose parent is allowed to run on instance's current node
+    node1 = pcmk__top_allowed_node(instance1, node1);
+    node2 = pcmk__top_allowed_node(instance2, node2);
+    if ((node1 == NULL) && (node2 == NULL)) {
+        crm_trace("No assignment preference for %s vs. %s: "
+                  "parent not allowed on either instance's current node",
+                  instance1->id, instance2->id);
+        return 0;
+
+    } else if (node1 == NULL) {
+        crm_trace("Assign %s after %s: parent not allowed on current node",
+                  instance1->id, instance2->id);
+        return 1;
+
+    } else if (node2 == NULL) {
+        crm_trace("Assign %s before %s: parent allowed on current node",
+                  instance1->id, instance2->id);
+        return -1;
+    }
+
+    // Prefer instance whose current node is running fewer other instances
+    if (node1->count < node2->count) {
+        crm_trace("Assign %s before %s: fewer active instances on current node",
+                  instance1->id, instance2->id);
+        return -1;
+
+    } else if (node1->count > node2->count) {
+        crm_trace("Assign %s after %s: more active instances on current node",
+                  instance1->id, instance2->id);
+        return 1;
+    }
+
+    // Prefer failed instance
+    can1 = did_fail(instance1);
+    can2 = did_fail(instance2);
+    if (!can1 && can2) {
+        crm_trace("Assign %s before %s: failed", instance1->id, instance2->id);
+        return -1;
+    } else if (can1 && !can2) {
+        crm_trace("Assign %s after %s: not failed",
+                  instance1->id, instance2->id);
+        return 1;
+    }
+
+    // Prefer instance with higher cumulative colocation score on current node
+    rc = cmp_instance_by_colocation(instance1, instance2);
+    if (rc != 0) {
+        return rc;
+    }
+
+    // Prefer instance with lower instance number
+    rc = pcmk__cmp_instance_number(instance1, instance2);
+    if (rc < 0) {
+        crm_trace("Assign %s before %s: instance number",
+                  instance1->id, instance2->id);
+    } else if (rc > 0) {
+        crm_trace("Assign %s after %s: instance number",
+                  instance1->id, instance2->id);
+    } else {
+        crm_trace("No assignment preference for %s vs. %s",
+                  instance1->id, instance2->id);
+    }
+    return rc;
+}
diff --git a/lib/pacemaker/pcmk_sched_utilization.c b/lib/pacemaker/pcmk_sched_utilization.c
index 4a60d2253a..d9cb02ba2e 100644
--- a/lib/pacemaker/pcmk_sched_utilization.c
+++ b/lib/pacemaker/pcmk_sched_utilization.c
@@ -1,467 +1,465 @@
 /*
  * Copyright 2014-2022 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 #include <crm/msg_xml.h>
 #include <pacemaker-internal.h>
 
 #include "libpacemaker_private.h"
 
 // Name for a pseudo-op to use in ordering constraints for utilization
 #define LOAD_STOPPED "load_stopped"
 
 /*!
  * \internal
  * \brief Get integer utilization from a string
  *
  * \param[in] s  String representation of a node utilization value
  *
  * \return Integer equivalent of \p s
  * \todo It would make sense to restrict utilization values to nonnegative
  *       integers, but the documentation just says "integers" and we didn't
  *       restrict them initially, so for backward compatibility, allow any
  *       integer.
  */
 static int
 utilization_value(const char *s)
 {
     int value = 0;
 
     if ((s != NULL) && (pcmk__scan_min_int(s, &value, INT_MIN) == EINVAL)) {
         pe_warn("Using 0 for utilization instead of invalid value '%s'", value);
         value = 0;
     }
     return value;
 }
 
 
 /*
  * Functions for comparing node capacities
  */
 
 struct compare_data {
     const pe_node_t *node1;
     const pe_node_t *node2;
     bool node2_only;
     int result;
 };
 
 /*!
  * \internal
  * \brief Compare a single utilization attribute for two nodes
  *
  * Compare one utilization attribute for two nodes, incrementing the result if
  * the first node has greater capacity, and decrementing it if the second node
  * has greater capacity.
  *
  * \param[in] key        Utilization attribute name to compare
  * \param[in] value      Utilization attribute value to compare
  * \param[in] user_data  Comparison data (as struct compare_data*)
  */
 static void
 compare_utilization_value(gpointer key, gpointer value, gpointer user_data)
 {
     int node1_capacity = 0;
     int node2_capacity = 0;
     struct compare_data *data = user_data;
     const char *node2_value = NULL;
 
     if (data->node2_only) {
         if (g_hash_table_lookup(data->node1->details->utilization, key)) {
             return; // We've already compared this attribute
         }
     } else {
         node1_capacity = utilization_value((const char *) value);
     }
 
     node2_value = g_hash_table_lookup(data->node2->details->utilization, key);
     node2_capacity = utilization_value(node2_value);
 
     if (node1_capacity > node2_capacity) {
         data->result--;
     } else if (node1_capacity < node2_capacity) {
         data->result++;
     }
 }
 
 /*!
  * \internal
  * \brief Compare utilization capacities of two nodes
  *
  * \param[in] node1  First node to compare
  * \param[in] node2  Second node to compare
  *
  * \return Negative integer if node1 has more free capacity,
  *         0 if the capacities are equal, or a positive integer
  *         if node2 has more free capacity
  */
 int
 pcmk__compare_node_capacities(const pe_node_t *node1, const pe_node_t *node2)
 {
     struct compare_data data = {
         .node1      = node1,
         .node2      = node2,
         .node2_only = false,
         .result     = 0,
     };
 
     // Compare utilization values that node1 and maybe node2 have
     g_hash_table_foreach(node1->details->utilization, compare_utilization_value,
                          &data);
 
     // Compare utilization values that only node2 has
     data.node2_only = true;
     g_hash_table_foreach(node2->details->utilization, compare_utilization_value,
                          &data);
 
     return data.result;
 }
 
 
 /*
  * Functions for updating node capacities
  */
 
 struct calculate_data {
     GHashTable *current_utilization;
     bool plus;
 };
 
 /*!
  * \internal
  * \brief Update a single utilization attribute with a new value
  *
  * \param[in] key        Name of utilization attribute to update
  * \param[in] value      Value to add or substract
  * \param[in] user_data  Calculation data (as struct calculate_data *)
  */
 static void
 update_utilization_value(gpointer key, gpointer value, gpointer user_data)
 {
     int result = 0;
     const char *current = NULL;
     struct calculate_data *data = user_data;
 
     current = g_hash_table_lookup(data->current_utilization, key);
     if (data->plus) {
         result = utilization_value(current) + utilization_value(value);
     } else if (current) {
         result = utilization_value(current) - utilization_value(value);
     }
     g_hash_table_replace(data->current_utilization,
                          strdup(key), pcmk__itoa(result));
 }
 
 /*!
  * \internal
  * \brief Subtract a resource's utilization from node capacity
  *
  * \param[in] current_utilization  Current node utilization attributes
  * \param[in] rsc                  Resource with utilization to subtract
  */
 void
 pcmk__consume_node_capacity(GHashTable *current_utilization, pe_resource_t *rsc)
 {
     struct calculate_data data = {
         .current_utilization = current_utilization,
         .plus = false,
     };
 
     g_hash_table_foreach(rsc->utilization, update_utilization_value, &data);
 }
 
 /*!
  * \internal
  * \brief Add a resource's utilization to node capacity
  *
  * \param[in] current_utilization  Current node utilization attributes
  * \param[in] rsc                  Resource with utilization to add
  */
 void
 pcmk__release_node_capacity(GHashTable *current_utilization, pe_resource_t *rsc)
 {
     struct calculate_data data = {
         .current_utilization = current_utilization,
         .plus = true,
     };
 
     g_hash_table_foreach(rsc->utilization, update_utilization_value, &data);
 }
 
 
 /*
  * Functions for checking for sufficient node capacity
  */
 
 struct capacity_data {
     pe_node_t *node;
     const char *rsc_id;
     bool is_enough;
 };
 
 /*!
  * \internal
  * \brief Check whether a single utilization attribute has sufficient capacity
  *
  * \param[in] key        Name of utilization attribute to check
  * \param[in] value      Amount of utilization required
  * \param[in] user_data  Capacity data (as struct capacity_data *)
  */
 static void
 check_capacity(gpointer key, gpointer value, gpointer user_data)
 {
     int required = 0;
     int remaining = 0;
     const char *node_value_s = NULL;
     struct capacity_data *data = user_data;
 
     node_value_s = g_hash_table_lookup(data->node->details->utilization, key);
 
     required = utilization_value(value);
     remaining = utilization_value(node_value_s);
 
     if (required > remaining) {
         crm_debug("Remaining capacity for %s on %s (%d) is insufficient "
                   "for resource %s usage (%d)",
                   (const char *) key, data->node->details->uname, remaining,
                   data->rsc_id, required);
         data->is_enough = false;
     }
 }
 
 /*!
  * \internal
  * \brief Check whether a node has sufficient capacity for a resource
  *
  * \param[in] node         Node to check
  * \param[in] rsc_id       ID of resource to check (for debug logs only)
  * \param[in] utilization  Required utilization amounts
  *
  * \return true if node has sufficient capacity for resource, otherwise false
  */
 static bool
 have_enough_capacity(pe_node_t *node, const char *rsc_id,
                      GHashTable *utilization)
 {
     struct capacity_data data = {
         .node = node,
         .rsc_id = rsc_id,
         .is_enough = true,
     };
 
     g_hash_table_foreach(utilization, check_capacity, &data);
     return data.is_enough;
 }
 
 /*!
  * \internal
  * \brief Sum the utilization requirements of a list of resources
  *
  * \param[in] orig_rsc  Resource being allocated (for logging purposes)
  * \param[in] rscs      Resources whose utilization should be summed
  *
  * \return Newly allocated hash table with sum of all utilization values
  * \note It is the caller's responsibility to free the return value using
  *       g_hash_table_destroy().
  */
 static GHashTable *
 sum_resource_utilization(pe_resource_t *orig_rsc, GList *rscs)
 {
     GHashTable *utilization = pcmk__strkey_table(free, free);
 
     for (GList *iter = rscs; iter != NULL; iter = iter->next) {
         pe_resource_t *rsc = (pe_resource_t *) iter->data;
 
         rsc->cmds->add_utilization(rsc, orig_rsc, rscs, utilization);
     }
     return utilization;
 }
 
 /*!
  * \internal
  * \brief Ban resource from nodes with insufficient utilization capacity
  *
  * \param[in]     rsc       Resource to check
  * \param[in,out] prefer    Resource's preferred node (might be updated)
- * \param[in]     data_set  Cluster working set
  */
 void
-pcmk__ban_insufficient_capacity(pe_resource_t *rsc, pe_node_t **prefer,
-                                pe_working_set_t *data_set)
+pcmk__ban_insufficient_capacity(pe_resource_t *rsc, pe_node_t **prefer)
 {
     bool any_capable = false;
     char *rscs_id = NULL;
     pe_node_t *node = NULL;
     pe_node_t *most_capable_node = NULL;
     GList *colocated_rscs = NULL;
     GHashTable *unallocated_utilization = NULL;
     GHashTableIter iter;
 
-    CRM_CHECK((rsc != NULL) && (prefer != NULL) && (data_set != NULL), return);
+    CRM_CHECK((rsc != NULL) && (prefer != NULL), return);
 
     // The default placement strategy ignores utilization
-    if (pcmk__str_eq(data_set->placement_strategy, "default",
+    if (pcmk__str_eq(rsc->cluster->placement_strategy, "default",
                      pcmk__str_casei)) {
         return;
     }
 
     // Check whether any resources are colocated with this one
     colocated_rscs = rsc->cmds->colocated_resources(rsc, NULL, NULL);
     if (colocated_rscs == NULL) {
         return;
     }
 
     rscs_id = crm_strdup_printf("%s and its colocated resources", rsc->id);
 
     // If rsc isn't in the list, add it so we include its utilization
     if (g_list_find(colocated_rscs, rsc) == NULL) {
         colocated_rscs = g_list_append(colocated_rscs, rsc);
     }
 
     // Sum utilization of colocated resources that haven't been allocated yet
     unallocated_utilization = sum_resource_utilization(rsc, colocated_rscs);
 
     // Check whether any node has enough capacity for all the resources
     g_hash_table_iter_init(&iter, rsc->allowed_nodes);
     while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
         if (!pcmk__node_available(node) || (node->weight < 0)) {
             continue;
         }
 
         if (have_enough_capacity(node, rscs_id, unallocated_utilization)) {
             any_capable = true;
         }
 
         // Keep track of node with most free capacity
         if ((most_capable_node == NULL)
             || (pcmk__compare_node_capacities(node, most_capable_node) < 0)) {
             most_capable_node = node;
         }
     }
 
     if (any_capable) {
         // If so, ban resource from any node with insufficient capacity
         g_hash_table_iter_init(&iter, rsc->allowed_nodes);
         while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
             if ((node->weight >= 0) && pcmk__node_available(node)
                 && !have_enough_capacity(node, rscs_id,
                                          unallocated_utilization)) {
                 pe_rsc_debug(rsc, "%s does not have enough capacity for %s",
                              node->details->uname, rscs_id);
                 resource_location(rsc, node, -INFINITY, "__limit_utilization__",
-                                  data_set);
+                                  rsc->cluster);
             }
         }
 
     } else {
         // Otherwise, ban from nodes with insufficient capacity for rsc alone
         if (*prefer == NULL) {
             *prefer = most_capable_node;
         }
         g_hash_table_iter_init(&iter, rsc->allowed_nodes);
         while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
             if ((node->weight >= 0) && pcmk__node_available(node)
                 && !have_enough_capacity(node, rsc->id, rsc->utilization)) {
                 pe_rsc_debug(rsc, "%s does not have enough capacity for %s",
                              node->details->uname, rsc->id);
                 resource_location(rsc, node, -INFINITY, "__limit_utilization__",
-                                  data_set);
+                                  rsc->cluster);
             }
         }
     }
 
     g_hash_table_destroy(unallocated_utilization);
     g_list_free(colocated_rscs);
     free(rscs_id);
 
     pe__show_node_weights(true, rsc, "Post-utilization",
-                          rsc->allowed_nodes, data_set);
+                          rsc->allowed_nodes, rsc->cluster);
 }
 
 /*!
  * \internal
  * \brief Create a new load_stopped pseudo-op for a node
  *
  * \param[in] node      Node to create op for
  * \param[in] data_set  Cluster working set
  *
  * \return Newly created load_stopped op
  */
 static pe_action_t *
 new_load_stopped_op(const pe_node_t *node, pe_working_set_t *data_set)
 {
     char *load_stopped_task = crm_strdup_printf(LOAD_STOPPED "_%s",
                                                 node->details->uname);
     pe_action_t *load_stopped = get_pseudo_op(load_stopped_task, data_set);
 
     if (load_stopped->node == NULL) {
         load_stopped->node = pe__copy_node(node);
         pe__clear_action_flags(load_stopped, pe_action_optional);
     }
     free(load_stopped_task);
     return load_stopped;
 }
 
 /*!
  * \internal
  * \brief Create utilization-related internal constraints for a resource
  *
  * \param[in] rsc            Resource to create constraints for
  * \param[in] allowed_nodes  List of allowed next nodes for \p rsc
  */
 void
 pcmk__create_utilization_constraints(pe_resource_t *rsc, GList *allowed_nodes)
 {
     GList *iter = NULL;
     pe_node_t *node = NULL;
     pe_action_t *load_stopped = NULL;
 
     pe_rsc_trace(rsc, "Creating utilization constraints for %s - strategy: %s",
                  rsc->id, rsc->cluster->placement_strategy);
 
     // "stop rsc then load_stopped" constraints for current nodes
     for (iter = rsc->running_on; iter != NULL; iter = iter->next) {
         node = (pe_node_t *) iter->data;
         load_stopped = new_load_stopped_op(node, rsc->cluster);
         pcmk__new_ordering(rsc, stop_key(rsc), NULL, NULL, NULL, load_stopped,
                            pe_order_load, rsc->cluster);
     }
 
     // "load_stopped then start/migrate_to rsc" constraints for allowed nodes
     for (GList *iter = allowed_nodes; iter; iter = iter->next) {
         node = (pe_node_t *) iter->data;
         load_stopped = new_load_stopped_op(node, rsc->cluster);
         pcmk__new_ordering(NULL, NULL, load_stopped, rsc, start_key(rsc), NULL,
                            pe_order_load, rsc->cluster);
         pcmk__new_ordering(NULL, NULL, load_stopped,
                            rsc, pcmk__op_key(rsc->id, RSC_MIGRATE, 0), NULL,
                            pe_order_load, rsc->cluster);
     }
 }
 
 /*!
  * \internal
  * \brief Output node capacities if enabled
  *
  * \param[in] desc      Prefix for output
  * \param[in] data_set  Cluster working set
  */
 void
 pcmk__show_node_capacities(const char *desc, pe_working_set_t *data_set)
 {
     if (!pcmk_is_set(data_set->flags, pe_flag_show_utilization)) {
         return;
     }
     for (GList *iter = data_set->nodes; iter != NULL; iter = iter->next) {
         pe_node_t *node = (pe_node_t *) iter->data;
         pcmk__output_t *out = data_set->priv;
 
         out->message(out, "node-capacity", node, desc);
     }
 }
diff --git a/tools/crm_resource.c b/tools/crm_resource.c
index 883563df97..90922fd4e9 100644
--- a/tools/crm_resource.c
+++ b/tools/crm_resource.c
@@ -1,2164 +1,2164 @@
 /*
  * Copyright 2004-2022 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <crm_resource.h>
 #include <crm/lrmd_internal.h>
 #include <crm/common/cmdline_internal.h>
 #include <crm/common/ipc_attrd_internal.h>
 #include <crm/common/lists_internal.h>
 #include <crm/common/output.h>
 #include <pacemaker-internal.h>
 
 #include <sys/param.h>
 #include <stdio.h>
 #include <sys/types.h>
 #include <unistd.h>
 #include <stdlib.h>
 #include <errno.h>
 #include <fcntl.h>
 #include <libgen.h>
 #include <time.h>
 
 #include <crm/crm.h>
 #include <crm/stonith-ng.h>
 #include <crm/common/ipc_controld.h>
 #include <crm/cib/internal.h>
 
 #define SUMMARY "crm_resource - perform tasks related to Pacemaker cluster resources"
 
 enum rsc_command {
     cmd_none = 0,           // No command option given (yet)
     cmd_ban,
     cmd_cleanup,
     cmd_clear,
     cmd_colocations,
     cmd_colocations_deep,
     cmd_cts,
     cmd_delete,
     cmd_delete_param,
     cmd_digests,
     cmd_execute_agent,
     cmd_fail,
     cmd_get_param,
     cmd_get_property,
     cmd_list_active_ops,
     cmd_list_agents,
     cmd_list_all_ops,
     cmd_list_alternatives,
     cmd_list_instances,
     cmd_list_providers,
     cmd_list_resources,
     cmd_list_standards,
     cmd_locate,
     cmd_metadata,
     cmd_move,
     cmd_query_raw_xml,
     cmd_query_xml,
     cmd_refresh,
     cmd_restart,
     cmd_set_param,
     cmd_set_property,
     cmd_wait,
     cmd_why,
 };
 
 struct {
     enum rsc_command rsc_cmd;     // crm_resource command to perform
 
     // Infrastructure that given command needs to work
     gboolean require_cib;         // Whether command requires CIB IPC
     int cib_options;              // Options to use with CIB IPC calls
     gboolean require_crmd;        // Whether command requires controller IPC
     gboolean require_dataset;     // Whether command requires populated data set
     gboolean require_resource;    // Whether command requires resource specified
     gboolean require_node;        // Whether command requires node specified
     int find_flags;               // Flags to use when searching for resource
 
     // Command-line option values
     gchar *rsc_id;                // Value of --resource
     gchar *rsc_type;              // Value of --resource-type
     gboolean force;               // --force was given
     gboolean clear_expired;       // --expired was given
     gboolean recursive;           // --recursive was given
     gboolean promoted_role_only;  // --promoted was given
     gchar *host_uname;            // Value of --node
     gchar *interval_spec;         // Value of --interval
     gchar *move_lifetime;         // Value of --lifetime
     gchar *operation;             // Value of --operation
     const char *attr_set_type;    // Instance, meta, or utilization attribute
     gchar *prop_id;               // --nvpair (attribute XML ID)
     char *prop_name;              // Attribute name
     gchar *prop_set;              // --set-name (attribute block XML ID)
     gchar *prop_value;            // --parameter-value (attribute value)
     int timeout_ms;               // Parsed from --timeout value
     char *agent_spec;             // Standard and/or provider and/or agent
     gchar *xml_file;              // Value of (deprecated) --xml-file
     int check_level;              // Optional value of --validate or --force-check
 
     // Resource configuration specified via command-line arguments
     gboolean cmdline_config;      // Resource configuration was via arguments
     char *v_agent;                // Value of --agent
     char *v_class;                // Value of --class
     char *v_provider;             // Value of --provider
     GHashTable *cmdline_params;   // Resource parameters specified
 
     // Positional command-line arguments
     gchar **remainder;            // Positional arguments as given
     GHashTable *override_params;  // Resource parameter values that override config
 } options = {
     .attr_set_type = XML_TAG_ATTR_SETS,
     .check_level = -1,
     .cib_options = cib_sync_call,
     .require_cib = TRUE,
     .require_dataset = TRUE,
     .require_resource = TRUE,
 };
 
 #if 0
 // @COMPAT @TODO enable this at next backward compatibility break
 #define SET_COMMAND(cmd) do {                                               \
         if (options.rsc_cmd != cmd_none) {                                  \
             g_set_error(error, PCMK__EXITC_ERROR, CRM_EX_USAGE,             \
                         "Only one command option may be specified");        \
             return FALSE;                                                   \
         }                                                                   \
         options.rsc_cmd = (cmd);                                            \
     } while (0)
 #else
 #define SET_COMMAND(cmd) do {                                               \
         if (options.rsc_cmd != cmd_none) {                                  \
             reset_options();                                                \
         }                                                                   \
         options.rsc_cmd = (cmd);                                            \
     } while (0)
 #endif
 
 gboolean agent_provider_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
 gboolean attr_set_type_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
 gboolean class_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
 gboolean cleanup_refresh_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
 gboolean delete_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
 gboolean expired_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
 gboolean list_agents_cb(const gchar *option_name, const gchar *optarg,
                         gpointer data, GError **error);
 gboolean list_providers_cb(const gchar *option_name, const gchar *optarg,
                            gpointer data, GError **error);
 gboolean list_standards_cb(const gchar *option_name, const gchar *optarg,
                            gpointer data, GError **error);
 gboolean list_alternatives_cb(const gchar *option_name, const gchar *optarg,
                               gpointer data, GError **error);
 gboolean metadata_cb(const gchar *option_name, const gchar *optarg,
                      gpointer data, GError **error);
 gboolean option_cb(const gchar *option_name, const gchar *optarg,
                    gpointer data, GError **error);
 gboolean fail_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
 gboolean flag_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
 gboolean get_param_prop_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
 gboolean list_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
 gboolean set_delete_param_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
 gboolean set_prop_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
 gboolean timeout_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
 gboolean validate_or_force_cb(const gchar *option_name, const gchar *optarg,
                               gpointer data, GError **error);
 gboolean restart_cb(const gchar *option_name, const gchar *optarg,
                     gpointer data, GError **error);
 gboolean digests_cb(const gchar *option_name, const gchar *optarg,
                     gpointer data, GError **error);
 gboolean wait_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
 gboolean why_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
 
 static crm_exit_t exit_code = CRM_EX_OK;
 static pcmk__output_t *out = NULL;
 static pcmk__common_args_t *args = NULL;
 
 // Things that should be cleaned up on exit
 static GError *error = NULL;
 static GMainLoop *mainloop = NULL;
 static cib_t *cib_conn = NULL;
 static pcmk_ipc_api_t *controld_api = NULL;
 static pe_working_set_t *data_set = NULL;
 
 #define MESSAGE_TIMEOUT_S 60
 
 #define INDENT "                                    "
 
 static pcmk__supported_format_t formats[] = {
     PCMK__SUPPORTED_FORMAT_NONE,
     PCMK__SUPPORTED_FORMAT_TEXT,
     PCMK__SUPPORTED_FORMAT_XML,
     { NULL, NULL, NULL }
 };
 
 // Clean up and exit
 static crm_exit_t
 bye(crm_exit_t ec)
 {
     pcmk__output_and_clear_error(error, out);
 
     if (out != NULL) {
         out->finish(out, ec, true, NULL);
         pcmk__output_free(out);
     }
 
     if (cib_conn != NULL) {
         cib_t *save_cib_conn = cib_conn;
 
         cib_conn = NULL; // Ensure we can't free this twice
         cib__clean_up_connection(&save_cib_conn);
     }
 
     if (controld_api != NULL) {
         pcmk_ipc_api_t *save_controld_api = controld_api;
 
         controld_api = NULL; // Ensure we can't free this twice
         pcmk_free_ipc_api(save_controld_api);
     }
 
     if (mainloop != NULL) {
         g_main_loop_unref(mainloop);
         mainloop = NULL;
     }
 
     pe_free_working_set(data_set);
     data_set = NULL;
     crm_exit(ec);
     return ec;
 }
 
 static void
 quit_main_loop(crm_exit_t ec)
 {
     exit_code = ec;
     if (mainloop != NULL) {
         GMainLoop *mloop = mainloop;
 
         mainloop = NULL; // Don't re-enter this block
         pcmk_quit_main_loop(mloop, 10);
         g_main_loop_unref(mloop);
     }
 }
 
 static gboolean
 resource_ipc_timeout(gpointer data)
 {
     // Start with newline because "Waiting for ..." message doesn't have one
     if (error != NULL) {
         g_clear_error(&error);
     }
 
     g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_TIMEOUT,
                 "Aborting because no messages received in %d seconds", MESSAGE_TIMEOUT_S);
 
     quit_main_loop(CRM_EX_TIMEOUT);
     return FALSE;
 }
 
 static void
 controller_event_callback(pcmk_ipc_api_t *api, enum pcmk_ipc_event event_type,
                           crm_exit_t status, void *event_data, void *user_data)
 {
     switch (event_type) {
         case pcmk_ipc_event_disconnect:
             if (exit_code == CRM_EX_DISCONNECT) { // Unexpected
                 crm_info("Connection to controller was terminated");
             }
             quit_main_loop(exit_code);
             break;
 
         case pcmk_ipc_event_reply:
             if (status != CRM_EX_OK) {
                 out->err(out, "Error: bad reply from controller: %s",
                          crm_exit_str(status));
                 pcmk_disconnect_ipc(api);
                 quit_main_loop(status);
             } else {
                 if ((pcmk_controld_api_replies_expected(api) == 0)
                     && mainloop && g_main_loop_is_running(mainloop)) {
                     out->info(out, "... got reply (done)");
                     crm_debug("Got all the replies we expected");
                     pcmk_disconnect_ipc(api);
                     quit_main_loop(CRM_EX_OK);
                 } else {
                     out->info(out, "... got reply");
                 }
             }
             break;
 
         default:
             break;
     }
 }
 
 static void
 start_mainloop(pcmk_ipc_api_t *capi)
 {
     unsigned int count = pcmk_controld_api_replies_expected(capi);
 
     if (count > 0) {
         out->info(out, "Waiting for %u %s from the controller",
                   count, pcmk__plural_alt(count, "reply", "replies"));
         exit_code = CRM_EX_DISCONNECT; // For unexpected disconnects
         mainloop = g_main_loop_new(NULL, FALSE);
         g_timeout_add(MESSAGE_TIMEOUT_S * 1000, resource_ipc_timeout, NULL);
         g_main_loop_run(mainloop);
     }
 }
 
 static int
 compare_id(gconstpointer a, gconstpointer b)
 {
     return strcmp((const char *)a, (const char *)b);
 }
 
 static GList *
 build_constraint_list(xmlNode *root)
 {
     GList *retval = NULL;
     xmlNode *cib_constraints = NULL;
     xmlXPathObjectPtr xpathObj = NULL;
     int ndx = 0;
 
     cib_constraints = pcmk_find_cib_element(root, XML_CIB_TAG_CONSTRAINTS);
     xpathObj = xpath_search(cib_constraints, "//" XML_CONS_TAG_RSC_LOCATION);
 
     for (ndx = 0; ndx < numXpathResults(xpathObj); ndx++) {
         xmlNode *match = getXpathResult(xpathObj, ndx);
         retval = g_list_insert_sorted(retval, (gpointer) ID(match), compare_id);
     }
 
     freeXpathObject(xpathObj);
     return retval;
 }
 
 /* short option letters still available: eEJkKXyYZ */
 
 static GOptionEntry query_entries[] = {
     { "list", 'L', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, list_cb,
       "List all cluster resources with status",
       NULL },
     { "list-raw", 'l', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, list_cb,
       "List IDs of all instantiated resources (individual members\n"
       INDENT "rather than groups etc.)",
       NULL },
     { "list-cts", 'c', G_OPTION_FLAG_HIDDEN|G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, list_cb,
       NULL,
       NULL },
     { "list-operations", 'O', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, list_cb,
       "List active resource operations, optionally filtered by\n"
       INDENT "--resource and/or --node",
       NULL },
     { "list-all-operations", 'o', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, list_cb,
       "List all resource operations, optionally filtered by\n"
       INDENT "--resource and/or --node",
       NULL },
     { "list-standards", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
       list_standards_cb,
       "List supported standards",
       NULL },
     { "list-ocf-providers", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
       list_providers_cb,
       "List all available OCF providers",
       NULL },
     { "list-agents", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK,
       list_agents_cb,
       "List all agents available for the named standard and/or provider",
       "STD:PROV" },
     { "list-ocf-alternatives", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK,
       list_alternatives_cb,
       "List all available providers for the named OCF agent",
       "AGENT" },
     { "show-metadata", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK,
       metadata_cb,
       "Show the metadata for the named class:provider:agent",
       "SPEC" },
     { "query-xml", 'q', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb,
       "Show XML configuration of resource (after any template expansion)",
       NULL },
     { "query-xml-raw", 'w', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb,
       "Show XML configuration of resource (before any template expansion)",
       NULL },
     { "get-parameter", 'g', G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, get_param_prop_cb,
       "Display named parameter for resource (use instance attribute\n"
       INDENT "unless --meta or --utilization is specified)",
       "PARAM" },
     { "get-property", 'G', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_CALLBACK, get_param_prop_cb,
       "Display named property of resource ('class', 'type', or 'provider') "
       "(requires --resource)",
       "PROPERTY" },
     { "locate", 'W', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb,
       "Show node(s) currently running resource",
       NULL },
     { "stack", 'A', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb,
       "Display the (co)location constraints that apply to a resource\n"
       INDENT "and the resources is it colocated with",
       NULL },
     { "constraints", 'a', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb,
       "Display the (co)location constraints that apply to a resource",
       NULL },
     { "why", 'Y', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, why_cb,
       "Show why resources are not running, optionally filtered by\n"
       INDENT "--resource and/or --node",
       NULL },
 
     { NULL }
 };
 
 static GOptionEntry command_entries[] = {
     { "validate", 0, G_OPTION_FLAG_OPTIONAL_ARG, G_OPTION_ARG_CALLBACK,
       validate_or_force_cb,
       "Validate resource configuration by calling agent's validate-all\n"
       INDENT "action. The configuration may be specified either by giving an\n"
       INDENT "existing resource name with -r, or by specifying --class,\n"
       INDENT "--agent, and --provider arguments, along with any number of\n"
       INDENT "--option arguments. An optional LEVEL argument can be given\n"
       INDENT "to control the level of checking performed.",
       "LEVEL" },
     { "cleanup", 'C', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, cleanup_refresh_cb,
       "If resource has any past failures, clear its history and fail\n"
       INDENT "count. Optionally filtered by --resource, --node, --operation\n"
       INDENT "and --interval (otherwise all). --operation and --interval\n"
       INDENT "apply to fail counts, but entire history is always clear, to\n"
       INDENT "allow current state to be rechecked. If the named resource is\n"
       INDENT "part of a group, or one numbered instance of a clone or bundled\n"
       INDENT "resource, the clean-up applies to the whole collective resource\n"
       INDENT "unless --force is given.",
       NULL },
     { "refresh", 'R', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, cleanup_refresh_cb,
       "Delete resource's history (including failures) so its current state\n"
       INDENT "is rechecked. Optionally filtered by --resource and --node\n"
       INDENT "(otherwise all). If the named resource is part of a group, or one\n"
       INDENT "numbered instance of a clone or bundled resource, the refresh\n"
       INDENT "applies to the whole collective resource unless --force is given.",
       NULL },
     { "set-parameter", 'p', G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, set_delete_param_cb,
       "Set named parameter for resource (requires -v). Use instance\n"
       INDENT "attribute unless --meta or --utilization is specified.",
       "PARAM" },
     { "delete-parameter", 'd', G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, set_delete_param_cb,
       "Delete named parameter for resource. Use instance attribute\n"
       INDENT "unless --meta or --utilization is specified.",
       "PARAM" },
     { "set-property", 'S', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_CALLBACK, set_prop_cb,
       "Set named property of resource ('class', 'type', or 'provider') "
       "(requires -r, -t, -v)",
       "PROPERTY" },
 
     { NULL }
 };
 
 static GOptionEntry location_entries[] = {
     { "move", 'M', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb,
       "Create a constraint to move resource. If --node is specified,\n"
       INDENT "the constraint will be to move to that node, otherwise it\n"
       INDENT "will be to ban the current node. Unless --force is specified\n"
       INDENT "this will return an error if the resource is already running\n"
       INDENT "on the specified node. If --force is specified, this will\n"
       INDENT "always ban the current node.\n"
       INDENT "Optional: --lifetime, --promoted. NOTE: This may prevent the\n"
       INDENT "resource from running on its previous location until the\n"
       INDENT "implicit constraint expires or is removed with --clear.",
       NULL },
     { "ban", 'B', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb,
       "Create a constraint to keep resource off a node.\n"
       INDENT "Optional: --node, --lifetime, --promoted.\n"
       INDENT "NOTE: This will prevent the resource from running on the\n"
       INDENT "affected node until the implicit constraint expires or is\n"
       INDENT "removed with --clear. If --node is not specified, it defaults\n"
       INDENT "to the node currently running the resource for primitives\n"
       INDENT "and groups, or the promoted instance of promotable clones with\n"
       INDENT "promoted-max=1 (all other situations result in an error as\n"
       INDENT "there is no sane default).",
       NULL },
     { "clear", 'U', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb,
       "Remove all constraints created by the --ban and/or --move\n"
       INDENT "commands. Requires: --resource. Optional: --node, --promoted,\n"
       INDENT "--expired. If --node is not specified, all constraints created\n"
       INDENT "by --ban and --move will be removed for the named resource. If\n"
       INDENT "--node and --force are specified, any constraint created by\n"
       INDENT "--move will be cleared, even if it is not for the specified\n"
       INDENT "node. If --expired is specified, only those constraints whose\n"
       INDENT "lifetimes have expired will be removed.",
       NULL },
     { "expired", 'e', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, expired_cb,
       "Modifies the --clear argument to remove constraints with\n"
       INDENT "expired lifetimes.",
       NULL },
     { "lifetime", 'u', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.move_lifetime,
       "Lifespan (as ISO 8601 duration) of created constraints (with\n"
       INDENT "-B, -M) see https://en.wikipedia.org/wiki/ISO_8601#Durations)",
       "TIMESPEC" },
     { "promoted", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE,
       &options.promoted_role_only,
       "Limit scope of command to promoted role (with -B, -M, -U). For\n"
       INDENT "-B and -M, previously promoted instances may remain\n"
       INDENT "active in the unpromoted role.",
       NULL },
 
     // Deprecated since 2.1.0
     { "master", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE,
       &options.promoted_role_only,
       "Deprecated: Use --promoted instead", NULL },
 
     { NULL }
 };
 
 static GOptionEntry advanced_entries[] = {
     { "delete", 'D', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, delete_cb,
       "(Advanced) Delete a resource from the CIB. Required: -t",
       NULL },
     { "fail", 'F', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, fail_cb,
       "(Advanced) Tell the cluster this resource has failed",
       NULL },
     { "restart", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, restart_cb,
       "(Advanced) Tell the cluster to restart this resource and\n"
       INDENT "anything that depends on it",
       NULL },
     { "wait", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, wait_cb,
       "(Advanced) Wait until the cluster settles into a stable state",
       NULL },
     { "digests", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, digests_cb,
       "(Advanced) Show parameter hashes that Pacemaker uses to detect\n"
       INDENT "configuration changes (only accurate if there is resource\n"
       INDENT "history on the specified node). Required: --resource, --node.\n"
       INDENT "Optional: any NAME=VALUE parameters will be used to override\n"
       INDENT "the configuration (to see what the hash would be with those\n"
       INDENT "changes).",
       NULL },
     { "force-demote", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
       validate_or_force_cb,
       "(Advanced) Bypass the cluster and demote a resource on the local\n"
       INDENT "node. Unless --force is specified, this will refuse to do so if\n"
       INDENT "the cluster believes the resource is a clone instance already\n"
       INDENT "running on the local node.",
       NULL },
     { "force-stop", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
       validate_or_force_cb,
       "(Advanced) Bypass the cluster and stop a resource on the local node",
       NULL },
     { "force-start", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
       validate_or_force_cb,
       "(Advanced) Bypass the cluster and start a resource on the local\n"
       INDENT "node. Unless --force is specified, this will refuse to do so if\n"
       INDENT "the cluster believes the resource is a clone instance already\n"
       INDENT "running on the local node.",
       NULL },
     { "force-promote", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
       validate_or_force_cb,
       "(Advanced) Bypass the cluster and promote a resource on the local\n"
       INDENT "node. Unless --force is specified, this will refuse to do so if\n"
       INDENT "the cluster believes the resource is a clone instance already\n"
       INDENT "running on the local node.",
       NULL },
     { "force-check", 0, G_OPTION_FLAG_OPTIONAL_ARG, G_OPTION_ARG_CALLBACK,
       validate_or_force_cb,
       "(Advanced) Bypass the cluster and check the state of a resource on\n"
       INDENT "the local node. An optional LEVEL argument can be given\n"
       INDENT "to control the level of checking performed.",
       "LEVEL" },
 
     { NULL }
 };
 
 static GOptionEntry addl_entries[] = {
     { "node", 'N', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.host_uname,
       "Node name",
       "NAME" },
     { "recursive", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &options.recursive,
       "Follow colocation chains when using --set-parameter",
       NULL },
     { "resource-type", 't', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.rsc_type,
       "Resource XML element (primitive, group, etc.) (with -D)",
       "ELEMENT" },
     { "parameter-value", 'v', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.prop_value,
       "Value to use with -p",
       "PARAM" },
     { "meta", 'm', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, attr_set_type_cb,
       "Use resource meta-attribute instead of instance attribute\n"
       INDENT "(with -p, -g, -d)",
       NULL },
     { "utilization", 'z', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, attr_set_type_cb,
       "Use resource utilization attribute instead of instance attribute\n"
       INDENT "(with -p, -g, -d)",
       NULL },
     { "operation", 'n', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.operation,
       "Operation to clear instead of all (with -C -r)",
       "OPERATION" },
     { "interval", 'I', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.interval_spec,
       "Interval of operation to clear (default 0) (with -C -r -n)",
       "N" },
     { "class", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, class_cb,
       "The standard the resource agent conforms to (for example, ocf).\n"
       INDENT "Use with --agent, --provider, --option, and --validate.",
       "CLASS" },
     { "agent", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, agent_provider_cb,
       "The agent to use (for example, IPaddr). Use with --class,\n"
       INDENT "--provider, --option, and --validate.",
       "AGENT" },
     { "provider", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, agent_provider_cb,
       "The vendor that supplies the resource agent (for example,\n"
       INDENT "heartbeat). Use with --class, --agent, --option, and --validate.",
       "PROVIDER" },
     { "option", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, option_cb,
       "Specify a device configuration parameter as NAME=VALUE (may be\n"
       INDENT "specified multiple times). Use with --validate and without the\n"
       INDENT "-r option.",
       "PARAM" },
     { "set-name", 's', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.prop_set,
       "(Advanced) XML ID of attributes element to use (with -p, -d)",
       "ID" },
     { "nvpair", 'i', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.prop_id,
       "(Advanced) XML ID of nvpair element to use (with -p, -d)",
       "ID" },
     { "timeout", 'T', G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, timeout_cb,
       "(Advanced) Abort if command does not finish in this time (with\n"
       INDENT "--restart, --wait, --force-*)",
       "N" },
     { "force", 'f', G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &options.force,
       "If making CIB changes, do so regardless of quorum. See help for\n"
       INDENT "individual commands for additional behavior.",
       NULL },
     { "xml-file", 'x', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_FILENAME, &options.xml_file,
       NULL,
       "FILE" },
     { "host-uname", 'H', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_STRING, &options.host_uname,
       NULL,
       "HOST" },
 
     { NULL }
 };
 
 static void
 reset_options(void) {
     options.require_crmd = FALSE;
     options.require_node = FALSE;
 
     options.require_cib = TRUE,
     options.require_dataset = TRUE,
     options.require_resource = TRUE,
 
     options.find_flags = 0;
 }
 
 gboolean
 agent_provider_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
     options.cmdline_config = TRUE;
     options.require_resource = FALSE;
 
     if (pcmk__str_eq(option_name, "--provider", pcmk__str_casei)) {
         pcmk__str_update(&options.v_provider, optarg);
     } else {
         pcmk__str_update(&options.v_agent, optarg);
     }
 
     return TRUE;
 }
 
 gboolean
 attr_set_type_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
     if (pcmk__str_any_of(option_name, "-m", "--meta", NULL)) {
         options.attr_set_type = XML_TAG_META_SETS;
     } else if (pcmk__str_any_of(option_name, "-z", "--utilization", NULL)) {
         options.attr_set_type = XML_TAG_UTILIZATION;
     }
 
     return TRUE;
 }
 
 gboolean
 class_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
     pcmk__str_update(&options.v_class, optarg);
     options.cmdline_config = TRUE;
     options.require_resource = FALSE;
     return TRUE;
 }
 
 gboolean
 cleanup_refresh_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
     if (pcmk__str_any_of(option_name, "-C", "--cleanup", NULL)) {
         SET_COMMAND(cmd_cleanup);
     } else {
         SET_COMMAND(cmd_refresh);
     }
 
     options.require_resource = FALSE;
     if (getenv("CIB_file") == NULL) {
         options.require_crmd = TRUE;
     }
     options.find_flags = pe_find_renamed|pe_find_anon;
     return TRUE;
 }
 
 gboolean
 delete_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
     SET_COMMAND(cmd_delete);
     options.require_dataset = FALSE;
     options.find_flags = pe_find_renamed|pe_find_any;
     return TRUE;
 }
 
 gboolean
 expired_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
     options.clear_expired = TRUE;
     options.require_resource = FALSE;
     return TRUE;
 }
 
 static void
 get_agent_spec(const gchar *optarg)
 {
     options.require_cib = FALSE;
     options.require_dataset = FALSE;
     options.require_resource = FALSE;
     pcmk__str_update(&options.agent_spec, optarg);
 }
 
 gboolean
 list_agents_cb(const gchar *option_name, const gchar *optarg, gpointer data,
                GError **error)
 {
     SET_COMMAND(cmd_list_agents);
     get_agent_spec(optarg);
     return TRUE;
 }
 
 gboolean
 list_providers_cb(const gchar *option_name, const gchar *optarg, gpointer data,
                   GError **error)
 {
     SET_COMMAND(cmd_list_providers);
     get_agent_spec(optarg);
     return TRUE;
 }
 
 gboolean
 list_standards_cb(const gchar *option_name, const gchar *optarg, gpointer data,
                   GError **error)
 {
     SET_COMMAND(cmd_list_standards);
     options.require_cib = FALSE;
     options.require_dataset = FALSE;
     options.require_resource = FALSE;
     return TRUE;
 }
 
 gboolean
 list_alternatives_cb(const gchar *option_name, const gchar *optarg,
                      gpointer data, GError **error)
 {
     SET_COMMAND(cmd_list_alternatives);
     get_agent_spec(optarg);
     return TRUE;
 }
 
 gboolean
 metadata_cb(const gchar *option_name, const gchar *optarg, gpointer data,
             GError **error)
 {
     SET_COMMAND(cmd_metadata);
     get_agent_spec(optarg);
     return TRUE;
 }
 
 gboolean
 option_cb(const gchar *option_name, const gchar *optarg, gpointer data,
           GError **error)
 {
     char *name = NULL;
     char *value = NULL;
 
     if (pcmk__scan_nvpair(optarg, &name, &value) != 2) {
         return FALSE;
     }
     if (options.cmdline_params == NULL) {
         options.cmdline_params = pcmk__strkey_table(free, free);
     }
     g_hash_table_replace(options.cmdline_params, name, value);
     return TRUE;
 }
 
 gboolean
 fail_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
     SET_COMMAND(cmd_fail);
     options.require_crmd = TRUE;
     options.require_node = TRUE;
     return TRUE;
 }
 
 gboolean
 flag_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
     if (pcmk__str_any_of(option_name, "-U", "--clear", NULL)) {
         SET_COMMAND(cmd_clear);
         options.find_flags = pe_find_renamed|pe_find_anon;
     } else if (pcmk__str_any_of(option_name, "-B", "--ban", NULL)) {
         SET_COMMAND(cmd_ban);
         options.find_flags = pe_find_renamed|pe_find_anon;
     } else if (pcmk__str_any_of(option_name, "-M", "--move", NULL)) {
         SET_COMMAND(cmd_move);
         options.find_flags = pe_find_renamed|pe_find_anon;
     } else if (pcmk__str_any_of(option_name, "-q", "--query-xml", NULL)) {
         SET_COMMAND(cmd_query_xml);
         options.find_flags = pe_find_renamed|pe_find_any;
     } else if (pcmk__str_any_of(option_name, "-w", "--query-xml-raw", NULL)) {
         SET_COMMAND(cmd_query_raw_xml);
         options.find_flags = pe_find_renamed|pe_find_any;
     } else if (pcmk__str_any_of(option_name, "-W", "--locate", NULL)) {
         SET_COMMAND(cmd_locate);
         options.find_flags = pe_find_renamed|pe_find_anon;
     } else if (pcmk__str_any_of(option_name, "-A", "--stack", NULL)) {
         SET_COMMAND(cmd_colocations_deep);
         options.find_flags = pe_find_renamed|pe_find_anon;
     } else {
         SET_COMMAND(cmd_colocations);
         options.find_flags = pe_find_renamed|pe_find_anon;
     }
 
     return TRUE;
 }
 
 gboolean
 get_param_prop_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
     if (pcmk__str_any_of(option_name, "-g", "--get-parameter", NULL)) {
         SET_COMMAND(cmd_get_param);
     } else {
         SET_COMMAND(cmd_get_property);
     }
 
     pcmk__str_update(&options.prop_name, optarg);
     options.find_flags = pe_find_renamed|pe_find_any;
     return TRUE;
 }
 
 gboolean
 list_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
     if (pcmk__str_any_of(option_name, "-c", "--list-cts", NULL)) {
         SET_COMMAND(cmd_cts);
     } else if (pcmk__str_any_of(option_name, "-L", "--list", NULL)) {
         SET_COMMAND(cmd_list_resources);
     } else if (pcmk__str_any_of(option_name, "-l", "--list-raw", NULL)) {
         SET_COMMAND(cmd_list_instances);
     } else if (pcmk__str_any_of(option_name, "-O", "--list-operations", NULL)) {
         SET_COMMAND(cmd_list_active_ops);
     } else {
         SET_COMMAND(cmd_list_all_ops);
     }
 
     options.require_resource = FALSE;
     return TRUE;
 }
 
 gboolean
 set_delete_param_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
     if (pcmk__str_any_of(option_name, "-p", "--set-parameter", NULL)) {
         SET_COMMAND(cmd_set_param);
     } else {
         SET_COMMAND(cmd_delete_param);
     }
 
     pcmk__str_update(&options.prop_name, optarg);
     options.find_flags = pe_find_renamed|pe_find_any;
     return TRUE;
 }
 
 gboolean
 set_prop_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
     SET_COMMAND(cmd_set_property);
     options.require_dataset = FALSE;
     pcmk__str_update(&options.prop_name, optarg);
     options.find_flags = pe_find_renamed|pe_find_any;
     return TRUE;
 }
 
 gboolean
 timeout_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
     options.timeout_ms = crm_get_msec(optarg);
     return TRUE;
 }
 
 gboolean
 validate_or_force_cb(const gchar *option_name, const gchar *optarg,
                      gpointer data, GError **error)
 {
     SET_COMMAND(cmd_execute_agent);
     if (options.operation) {
         g_free(options.operation);
     }
     options.operation = g_strdup(option_name + 2); // skip "--"
     options.find_flags = pe_find_renamed|pe_find_anon;
     if (options.override_params == NULL) {
         options.override_params = pcmk__strkey_table(free, free);
     }
 
     if (optarg != NULL) {
         if (pcmk__scan_min_int(optarg, &options.check_level, 0) != pcmk_rc_ok) {
             g_set_error(error, G_OPTION_ERROR, CRM_EX_INVALID_PARAM,
                         "Invalid check level setting: %s", optarg);
             return FALSE;
         }
     }
 
     return TRUE;
 }
 
 gboolean
 restart_cb(const gchar *option_name, const gchar *optarg, gpointer data,
            GError **error)
 {
     SET_COMMAND(cmd_restart);
     options.find_flags = pe_find_renamed|pe_find_anon;
     return TRUE;
 }
 
 gboolean
 digests_cb(const gchar *option_name, const gchar *optarg, gpointer data,
            GError **error)
 {
     SET_COMMAND(cmd_digests);
     options.find_flags = pe_find_renamed|pe_find_anon;
     if (options.override_params == NULL) {
         options.override_params = pcmk__strkey_table(free, free);
     }
     options.require_node = TRUE;
     options.require_dataset = TRUE;
     return TRUE;
 }
 
 gboolean
 wait_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
     SET_COMMAND(cmd_wait);
     options.require_resource = FALSE;
     options.require_dataset = FALSE;
     return TRUE;
 }
 
 gboolean
 why_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
     SET_COMMAND(cmd_why);
     options.require_resource = FALSE;
     options.find_flags = pe_find_renamed|pe_find_anon;
     return TRUE;
 }
 
 static int
 ban_or_move(pcmk__output_t *out, pe_resource_t *rsc, const char *move_lifetime)
 {
     int rc = pcmk_rc_ok;
     pe_node_t *current = NULL;
     unsigned int nactive = 0;
 
     CRM_CHECK(rsc != NULL, return EINVAL);
 
     current = pe__find_active_requires(rsc, &nactive);
 
     if (nactive == 1) {
         rc = cli_resource_ban(out, options.rsc_id, current->details->uname, move_lifetime, NULL,
                               cib_conn, options.cib_options, options.promoted_role_only);
 
     } else if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
         int count = 0;
         GList *iter = NULL;
 
         current = NULL;
         for(iter = rsc->children; iter; iter = iter->next) {
             pe_resource_t *child = (pe_resource_t *)iter->data;
             enum rsc_role_e child_role = child->fns->state(child, TRUE);
 
             if (child_role == RSC_ROLE_PROMOTED) {
                 count++;
                 current = pe__current_node(child);
             }
         }
 
         if(count == 1 && current) {
             rc = cli_resource_ban(out, options.rsc_id, current->details->uname, move_lifetime, NULL,
                                   cib_conn, options.cib_options, options.promoted_role_only);
 
         } else {
             rc = EINVAL;
             g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
                         "Resource '%s' not moved: active in %d locations (promoted in %d).\n"
                         "To prevent '%s' from running on a specific location, "
                         "specify a node."
                         "To prevent '%s' from being promoted at a specific "
                         "location, specify a node and the --promoted option.",
                         options.rsc_id, nactive, count, options.rsc_id, options.rsc_id);
         }
 
     } else {
         rc = EINVAL;
         g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
                     "Resource '%s' not moved: active in %d locations.\n"
                     "To prevent '%s' from running on a specific location, "
                     "specify a node.",
                     options.rsc_id, nactive, options.rsc_id);
     }
 
     return rc;
 }
 
 static void
 cleanup(pcmk__output_t *out, pe_resource_t *rsc)
 {
     int rc = pcmk_rc_ok;
 
     if (options.force == FALSE) {
         rsc = uber_parent(rsc);
     }
 
     crm_debug("Erasing failures of %s (%s requested) on %s",
               rsc->id, options.rsc_id, (options.host_uname? options.host_uname: "all nodes"));
     rc = cli_resource_delete(controld_api, options.host_uname, rsc, options.operation,
                              options.interval_spec, TRUE, data_set, options.force);
 
     if ((rc == pcmk_rc_ok) && !out->is_quiet(out)) {
         // Show any reasons why resource might stay stopped
         cli_resource_check(out, cib_conn, rsc);
     }
 
     if (rc == pcmk_rc_ok) {
         start_mainloop(controld_api);
     }
 }
 
 static int
 clear_constraints(pcmk__output_t *out, xmlNodePtr *cib_xml_copy)
 {
     GList *before = NULL;
     GList *after = NULL;
     GList *remaining = NULL;
     GList *ele = NULL;
     pe_node_t *dest = NULL;
     int rc = pcmk_rc_ok;
 
     if (!out->is_quiet(out)) {
         before = build_constraint_list(data_set->input);
     }
 
     if (options.clear_expired) {
         rc = cli_resource_clear_all_expired(data_set->input, cib_conn, options.cib_options,
                                             options.rsc_id, options.host_uname,
                                             options.promoted_role_only);
 
     } else if (options.host_uname) {
         dest = pe_find_node(data_set->nodes, options.host_uname);
         if (dest == NULL) {
             rc = pcmk_rc_node_unknown;
             if (!out->is_quiet(out)) {
                 g_list_free(before);
             }
             return rc;
         }
         rc = cli_resource_clear(options.rsc_id, dest->details->uname, NULL,
                                 cib_conn, options.cib_options, TRUE, options.force);
 
     } else {
         rc = cli_resource_clear(options.rsc_id, NULL, data_set->nodes,
                                 cib_conn, options.cib_options, TRUE, options.force);
     }
 
     if (!out->is_quiet(out)) {
         rc = cib_conn->cmds->query(cib_conn, NULL, cib_xml_copy, cib_scope_local | cib_sync_call);
         rc = pcmk_legacy2rc(rc);
 
         if (rc != pcmk_rc_ok) {
             g_set_error(&error, PCMK__RC_ERROR, rc,
                         "Could not get modified CIB: %s\n", pcmk_strerror(rc));
             g_list_free(before);
             free_xml(*cib_xml_copy);
             *cib_xml_copy = NULL;
             return rc;
         }
 
         data_set->input = *cib_xml_copy;
         cluster_status(data_set);
 
         after = build_constraint_list(data_set->input);
         remaining = pcmk__subtract_lists(before, after, (GCompareFunc) strcmp);
 
         for (ele = remaining; ele != NULL; ele = ele->next) {
             out->info(out, "Removing constraint: %s", (char *) ele->data);
         }
 
         g_list_free(before);
         g_list_free(after);
         g_list_free(remaining);
     }
 
     return rc;
 }
 
 static int
 delete(void)
 {
     int rc = pcmk_rc_ok;
     xmlNode *msg_data = NULL;
 
     if (options.rsc_type == NULL) {
         rc = ENXIO;
         g_set_error(&error, PCMK__RC_ERROR, rc,
                     "You need to specify a resource type with -t");
         return rc;
     }
 
     msg_data = create_xml_node(NULL, options.rsc_type);
     crm_xml_add(msg_data, XML_ATTR_ID, options.rsc_id);
 
     rc = cib_conn->cmds->remove(cib_conn, XML_CIB_TAG_RESOURCES, msg_data,
                                 options.cib_options);
     rc = pcmk_legacy2rc(rc);
     free_xml(msg_data);
     return rc;
 }
 
 static int
 list_agents(pcmk__output_t *out, const char *agent_spec)
 {
     int rc = pcmk_rc_ok;
     char *provider = strchr(agent_spec, ':');
     lrmd_t *lrmd_conn = NULL;
     lrmd_list_t *list = NULL;
 
     rc = lrmd__new(&lrmd_conn, NULL, NULL, 0);
     if (rc != pcmk_rc_ok) {
         goto error;
     }
 
     if (provider) {
         *provider++ = 0;
     }
 
     rc = lrmd_conn->cmds->list_agents(lrmd_conn, &list, agent_spec, provider);
 
     if (rc > 0) {
         rc = out->message(out, "agents-list", list, agent_spec, provider);
     } else {
         rc = pcmk_rc_error;
     }
 
 error:
     if (rc != pcmk_rc_ok) {
         if (provider == NULL) {
             g_set_error(&error, PCMK__RC_ERROR, rc,
                         "No agents found for standard '%s'", agent_spec);
         } else {
             g_set_error(&error, PCMK__RC_ERROR, rc,
                         "No agents found for standard '%s' and provider '%s'",
                         agent_spec, provider);
         }
     }
 
     lrmd_api_delete(lrmd_conn);
     return rc;
 }
 
 static int
 list_providers(pcmk__output_t *out, const char *agent_spec)
 {
     int rc;
     const char *text = NULL;
     lrmd_t *lrmd_conn = NULL;
     lrmd_list_t *list = NULL;
 
     rc = lrmd__new(&lrmd_conn, NULL, NULL, 0);
     if (rc != pcmk_rc_ok) {
         goto error;
     }
 
     switch (options.rsc_cmd) {
         case cmd_list_alternatives:
             rc = lrmd_conn->cmds->list_ocf_providers(lrmd_conn, agent_spec, &list);
 
             if (rc > 0) {
                 rc = out->message(out, "alternatives-list", list, agent_spec);
             } else {
                 rc = pcmk_rc_error;
             }
 
             text = "OCF providers";
             break;
         case cmd_list_standards:
             rc = lrmd_conn->cmds->list_standards(lrmd_conn, &list);
 
             if (rc > 0) {
                 rc = out->message(out, "standards-list", list);
             } else {
                 rc = pcmk_rc_error;
             }
 
             text = "standards";
             break;
         case cmd_list_providers:
             rc = lrmd_conn->cmds->list_ocf_providers(lrmd_conn, agent_spec, &list);
 
             if (rc > 0) {
                 rc = out->message(out, "providers-list", list, agent_spec);
             } else {
                 rc = pcmk_rc_error;
             }
 
             text = "OCF providers";
             break;
         default:
             g_set_error(&error, PCMK__RC_ERROR, pcmk_rc_error, "Bug");
             lrmd_api_delete(lrmd_conn);
             return pcmk_rc_error;
     }
 
 error:
     if (rc != pcmk_rc_ok) {
         if (agent_spec != NULL) {
             rc = ENXIO;
             g_set_error(&error, PCMK__RC_ERROR, rc,
                         "No %s found for %s", text, agent_spec);
 
         } else {
             rc = ENXIO;
             g_set_error(&error, PCMK__RC_ERROR, rc,
                         "No %s found", text);
         }
     }
 
     lrmd_api_delete(lrmd_conn);
     return rc;
 }
 
 static int
 populate_working_set(xmlNodePtr *cib_xml_copy)
 {
     int rc = pcmk_rc_ok;
 
     if (options.xml_file != NULL) {
         *cib_xml_copy = filename2xml(options.xml_file);
         if (*cib_xml_copy == NULL) {
             rc = pcmk_rc_cib_corrupt;
         }
     } else {
         rc = cib_conn->cmds->query(cib_conn, NULL, cib_xml_copy, cib_scope_local | cib_sync_call);
         rc = pcmk_legacy2rc(rc);
     }
 
     if (rc == pcmk_rc_ok) {
         data_set = pe_new_working_set();
         if (data_set == NULL) {
             rc = ENOMEM;
         } else {
             pe__set_working_set_flags(data_set,
                                       pe_flag_no_counts|pe_flag_no_compat);
             data_set->priv = out;
             rc = update_working_set_xml(data_set, cib_xml_copy);
         }
     }
 
     if (rc != pcmk_rc_ok) {
         free_xml(*cib_xml_copy);
         *cib_xml_copy = NULL;
         return rc;
     }
 
     cluster_status(data_set);
     return pcmk_rc_ok;
 }
 
 static int
 refresh(pcmk__output_t *out)
 {
     int rc = pcmk_rc_ok;
     const char *router_node = options.host_uname;
     int attr_options = pcmk__node_attr_none;
 
     if (options.host_uname) {
         pe_node_t *node = pe_find_node(data_set->nodes, options.host_uname);
 
         if (pe__is_guest_or_remote_node(node)) {
             node = pe__current_node(node->details->remote_rsc);
             if (node == NULL) {
                 rc = ENXIO;
                 g_set_error(&error, PCMK__RC_ERROR, rc,
                             "No cluster connection to Pacemaker Remote node %s detected",
                             options.host_uname);
                 return rc;
             }
             router_node = node->details->uname;
             attr_options |= pcmk__node_attr_remote;
         }
     }
 
     if (controld_api == NULL) {
         out->info(out, "Dry run: skipping clean-up of %s due to CIB_file",
                   options.host_uname? options.host_uname : "all nodes");
         rc = pcmk_rc_ok;
         return rc;
     }
 
     crm_debug("Re-checking the state of all resources on %s", options.host_uname?options.host_uname:"all nodes");
 
     rc = pcmk__attrd_api_clear_failures(NULL, options.host_uname, NULL,
                                         NULL, NULL, NULL, attr_options);
 
     if (pcmk_controld_api_reprobe(controld_api, options.host_uname,
                                   router_node) == pcmk_rc_ok) {
         start_mainloop(controld_api);
     }
 
     return rc;
 }
 
 static void
 refresh_resource(pcmk__output_t *out, pe_resource_t *rsc)
 {
     int rc = pcmk_rc_ok;
 
     if (options.force == FALSE) {
         rsc = uber_parent(rsc);
     }
 
     crm_debug("Re-checking the state of %s (%s requested) on %s",
               rsc->id, options.rsc_id, (options.host_uname? options.host_uname: "all nodes"));
     rc = cli_resource_delete(controld_api, options.host_uname, rsc, NULL, 0,
                              FALSE, data_set, options.force);
 
     if ((rc == pcmk_rc_ok) && !out->is_quiet(out)) {
         // Show any reasons why resource might stay stopped
         cli_resource_check(out, cib_conn, rsc);
     }
 
     if (rc == pcmk_rc_ok) {
         start_mainloop(controld_api);
     }
 }
 
 static int
 set_property(void)
 {
     int rc = pcmk_rc_ok;
     xmlNode *msg_data = NULL;
 
     if (pcmk__str_empty(options.rsc_type)) {
         g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
                     "Must specify -t with resource type");
         rc = ENXIO;
         return rc;
 
     } else if (pcmk__str_empty(options.prop_value)) {
         g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
                     "Must supply -v with new value");
         rc = ENXIO;
         return rc;
     }
 
     CRM_LOG_ASSERT(options.prop_name != NULL);
 
     msg_data = create_xml_node(NULL, options.rsc_type);
     crm_xml_add(msg_data, XML_ATTR_ID, options.rsc_id);
     crm_xml_add(msg_data, options.prop_name, options.prop_value);
 
     rc = cib_conn->cmds->modify(cib_conn, XML_CIB_TAG_RESOURCES, msg_data,
                                 options.cib_options);
     rc = pcmk_legacy2rc(rc);
     free_xml(msg_data);
 
     return rc;
 }
 
 static int
 show_metadata(pcmk__output_t *out, const char *agent_spec)
 {
     int rc = pcmk_rc_ok;
     char *standard = NULL;
     char *provider = NULL;
     char *type = NULL;
     char *metadata = NULL;
     lrmd_t *lrmd_conn = NULL;
 
     rc = lrmd__new(&lrmd_conn, NULL, NULL, 0);
     if (rc != pcmk_rc_ok) {
         g_set_error(&error, PCMK__RC_ERROR, rc,
                     "Could not create executor connection");
         lrmd_api_delete(lrmd_conn);
         return rc;
     }
 
     rc = crm_parse_agent_spec(agent_spec, &standard, &provider, &type);
     rc = pcmk_legacy2rc(rc);
 
     if (rc == pcmk_rc_ok) {
         rc = lrmd_conn->cmds->get_metadata(lrmd_conn, standard,
                                            provider, type,
                                            &metadata, 0);
         rc = pcmk_legacy2rc(rc);
 
         if (metadata) {
             out->output_xml(out, "metadata", metadata);
             free(metadata);
         } else {
             /* We were given a validly formatted spec, but it doesn't necessarily
              * match up with anything that exists.  Use ENXIO as the return code
              * here because that maps to an exit code of CRM_EX_NOSUCH, which
              * probably is the most common reason to get here.
              */
             rc = ENXIO;
             g_set_error(&error, PCMK__RC_ERROR, rc,
                         _("Metadata query for %s failed: %s"),
                         agent_spec, pcmk_rc_str(rc));
         }
     } else {
         rc = ENXIO;
         g_set_error(&error, PCMK__RC_ERROR, rc,
                     _("'%s' is not a valid agent specification"), agent_spec);
     }
 
     lrmd_api_delete(lrmd_conn);
     return rc;
 }
 
 static void
 validate_cmdline_config(void)
 {
     // Cannot use both --resource and command-line resource configuration
     if (options.rsc_id != NULL) {
         g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
                     "--resource cannot be used with --class, --agent, and --provider");
 
     // Not all commands support command-line resource configuration
     } else if (options.rsc_cmd != cmd_execute_agent) {
         g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
                     "--class, --agent, and --provider can only be used with "
                     "--validate and --force-*");
 
     // Not all of --class, --agent, and --provider need to be given.  Not all
     // classes support the concept of a provider.  Check that what we were given
     // is valid.
     } else if (pcmk__str_eq(options.v_class, "stonith", pcmk__str_none)) {
         if (options.v_provider != NULL) {
             g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
                         "stonith does not support providers");
 
         } else if (stonith_agent_exists(options.v_agent, 0) == FALSE) {
             g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
                         "%s is not a known stonith agent", options.v_agent ? options.v_agent : "");
         }
 
     } else if (resources_agent_exists(options.v_class, options.v_provider, options.v_agent) == FALSE) {
         g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
                     "%s:%s:%s is not a known resource",
                     options.v_class ? options.v_class : "",
                     options.v_provider ? options.v_provider : "",
                     options.v_agent ? options.v_agent : "");
     }
 
     if (error != NULL) {
         return;
     }
 
     if (options.cmdline_params == NULL) {
         options.cmdline_params = pcmk__strkey_table(free, free);
     }
     options.require_resource = FALSE;
     options.require_dataset = FALSE;
     options.require_cib = FALSE;
 }
 
 static GOptionContext *
 build_arg_context(pcmk__common_args_t *args, GOptionGroup **group) {
     GOptionContext *context = NULL;
 
     GOptionEntry extra_prog_entries[] = {
         { "quiet", 'Q', G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &(args->quiet),
           "Be less descriptive in output.",
           NULL },
         { "resource", 'r', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.rsc_id,
           "Resource ID",
           "ID" },
         { G_OPTION_REMAINING, 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING_ARRAY, &options.remainder,
           NULL,
           NULL },
 
         { NULL }
     };
 
     const char *description = "Examples:\n\n"
                               "List the available OCF agents:\n\n"
                               "\t# crm_resource --list-agents ocf\n\n"
                               "List the available OCF agents from the linux-ha project:\n\n"
                               "\t# crm_resource --list-agents ocf:heartbeat\n\n"
                               "Move 'myResource' to a specific node:\n\n"
                               "\t# crm_resource --resource myResource --move --node altNode\n\n"
                               "Allow (but not force) 'myResource' to move back to its original "
                               "location:\n\n"
                               "\t# crm_resource --resource myResource --clear\n\n"
                               "Stop 'myResource' (and anything that depends on it):\n\n"
                               "\t# crm_resource --resource myResource --set-parameter target-role "
                               "--meta --parameter-value Stopped\n\n"
                               "Tell the cluster not to manage 'myResource' (the cluster will not "
                               "attempt to start or stop the\n"
                               "resource under any circumstances; useful when performing maintenance "
                               "tasks on a resource):\n\n"
                               "\t# crm_resource --resource myResource --set-parameter is-managed "
                               "--meta --parameter-value false\n\n"
                               "Erase the operation history of 'myResource' on 'aNode' (the cluster "
                               "will 'forget' the existing\n"
                               "resource state, including any errors, and attempt to recover the"
                               "resource; useful when a resource\n"
                               "had failed permanently and has been repaired by an administrator):\n\n"
                               "\t# crm_resource --resource myResource --cleanup --node aNode\n\n";
 
     context = pcmk__build_arg_context(args, "text (default), xml", group, NULL);
     g_option_context_set_description(context, description);
 
     /* Add the -Q option, which cannot be part of the globally supported options
      * because some tools use that flag for something else.
      */
     pcmk__add_main_args(context, extra_prog_entries);
 
     pcmk__add_arg_group(context, "queries", "Queries:",
                         "Show query help", query_entries);
     pcmk__add_arg_group(context, "commands", "Commands:",
                         "Show command help", command_entries);
     pcmk__add_arg_group(context, "locations", "Locations:",
                         "Show location help", location_entries);
     pcmk__add_arg_group(context, "advanced", "Advanced:",
                         "Show advanced option help", advanced_entries);
     pcmk__add_arg_group(context, "additional", "Additional Options:",
                         "Show additional options", addl_entries);
     return context;
 }
 
 int
 main(int argc, char **argv)
 {
     xmlNode *cib_xml_copy = NULL;
     pe_resource_t *rsc = NULL;
     pe_node_t *node = NULL;
     int rc = pcmk_rc_ok;
 
     GOptionGroup *output_group = NULL;
     gchar **processed_args = NULL;
     GOptionContext *context = NULL;
 
     /*
      * Parse command line arguments
      */
 
     args = pcmk__new_common_args(SUMMARY);
     processed_args = pcmk__cmdline_preproc(argv, "GHINSTdginpstuvx");
     context = build_arg_context(args, &output_group);
 
     pcmk__register_formats(output_group, formats);
     if (!g_option_context_parse_strv(context, &processed_args, &error)) {
         exit_code = CRM_EX_USAGE;
         goto done;
     }
 
     pcmk__cli_init_logging("crm_resource", args->verbosity);
 
     rc = pcmk__output_new(&out, args->output_ty, args->output_dest, argv);
     if (rc != pcmk_rc_ok) {
         exit_code = CRM_EX_ERROR;
         g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "Error creating output format %s: %s",
                     args->output_ty, pcmk_rc_str(rc));
         goto done;
     }
 
     pe__register_messages(out);
     crm_resource_register_messages(out);
     lrmd__register_messages(out);
     pcmk__register_lib_messages(out);
 
     out->quiet = args->quiet;
 
     crm_log_args(argc, argv);
 
     /*
      * Validate option combinations
      */
 
     // If the user didn't explicitly specify a command, list resources
     if (options.rsc_cmd == cmd_none) {
         options.rsc_cmd = cmd_list_resources;
         options.require_resource = FALSE;
     }
 
     // --expired without --clear/-U doesn't make sense
     if (options.clear_expired && (options.rsc_cmd != cmd_clear)) {
         exit_code = CRM_EX_USAGE;
         g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "--expired requires --clear or -U");
         goto done;
     }
 
     if ((options.remainder != NULL) && (options.override_params != NULL)) {
         // Commands that use positional arguments will create override_params
         for (gchar **s = options.remainder; *s; s++) {
             char *name = calloc(1, strlen(*s));
             char *value = calloc(1, strlen(*s));
             int rc = sscanf(*s, "%[^=]=%s", name, value);
 
             if (rc == 2) {
                 g_hash_table_replace(options.override_params, name, value);
 
             } else {
                 exit_code = CRM_EX_USAGE;
                 g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
                             "Error parsing '%s' as a name=value pair",
                             argv[optind]);
                 free(value);
                 free(name);
                 goto done;
             }
         }
 
     } else if (options.remainder != NULL) {
         gchar **strv = NULL;
         gchar *msg = NULL;
         int i = 1;
         int len = 0;
 
         for (gchar **s = options.remainder; *s; s++) {
             len++;
         }
 
         CRM_ASSERT(len > 0);
 
         strv = calloc(len, sizeof(char *));
         strv[0] = strdup("non-option ARGV-elements:");
 
         for (gchar **s = options.remainder; *s; s++) {
             strv[i] = crm_strdup_printf("[%d of %d] %s\n", i, len, *s);
             i++;
         }
 
         exit_code = CRM_EX_USAGE;
         msg = g_strjoinv("", strv);
         g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "%s", msg);
         g_free(msg);
 
         for(i = 0; i < len; i++) {
             free(strv[i]);
         }
         free(strv);
 
         goto done;
     }
 
     if (pcmk__str_eq(args->output_ty, "xml", pcmk__str_none)) {
         /* Kind of a hack to display XML lists using a real tag instead of <list>.  This just
          * saves from having to write custom messages to build the lists around all these things
          */
         switch (options.rsc_cmd) {
             case cmd_execute_agent:
             case cmd_list_resources:
             case cmd_query_xml:
             case cmd_query_raw_xml:
             case cmd_list_active_ops:
             case cmd_list_all_ops:
             case cmd_colocations:
             case cmd_colocations_deep:
                 pcmk__force_args(context, &error, "%s --xml-simple-list --xml-substitute", g_get_prgname());
                 break;
 
             default:
                 pcmk__force_args(context, &error, "%s --xml-substitute", g_get_prgname());
                 break;
         }
     } else if (pcmk__str_eq(args->output_ty, "text", pcmk__str_null_matches)) {
         if (options.rsc_cmd == cmd_colocations || options.rsc_cmd == cmd_colocations_deep ||
             options.rsc_cmd == cmd_list_resources) {
             pcmk__force_args(context, &error, "%s --text-fancy", g_get_prgname());
         }
     }
 
     if (args->version) {
         out->version(out, false);
         goto done;
     }
 
     if (options.cmdline_config) {
         /* A resource configuration was given on the command line. Sanity-check
          * the values and set error if they don't make sense.
          */
         validate_cmdline_config();
         if (error != NULL) {
             exit_code = CRM_EX_USAGE;
             goto done;
         }
 
     } else if (options.cmdline_params != NULL) {
         // @COMPAT @TODO error out here when we can break backward compatibility
         g_hash_table_destroy(options.cmdline_params);
         options.cmdline_params = NULL;
     }
 
     if (options.require_resource && (options.rsc_id == NULL)) {
         exit_code = CRM_EX_USAGE;
         g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
                     "Must supply a resource id with -r");
         goto done;
     }
     if (options.require_node && (options.host_uname == NULL)) {
         exit_code = CRM_EX_USAGE;
         g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
                     "Must supply a node name with -N");
         goto done;
     }
 
     /*
      * Set up necessary connections
      */
 
     if (options.force) {
         crm_debug("Forcing...");
         cib__set_call_options(options.cib_options, crm_system_name,
                               cib_quorum_override);
     }
 
     if (options.find_flags && options.rsc_id) {
         options.require_dataset = TRUE;
     }
 
     // Establish a connection to the CIB if needed
     if (options.require_cib) {
         cib_conn = cib_new();
         if ((cib_conn == NULL) || (cib_conn->cmds == NULL)) {
             exit_code = CRM_EX_DISCONNECT;
             g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
                         "Could not create CIB connection");
             goto done;
         }
         rc = cib_conn->cmds->signon(cib_conn, crm_system_name, cib_command);
         rc = pcmk_legacy2rc(rc);
         if (rc != pcmk_rc_ok) {
             exit_code = pcmk_rc2exitc(rc);
             g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
                         "Could not connect to the CIB: %s", pcmk_rc_str(rc));
             goto done;
         }
     }
 
     /* Populate working set from XML file if specified or CIB query otherwise */
     if (options.require_dataset) {
         rc = populate_working_set(&cib_xml_copy);
         if (rc != pcmk_rc_ok) {
             exit_code = pcmk_rc2exitc(rc);
             goto done;
         }
     }
 
     // If command requires that resource exist if specified, find it
     if (options.find_flags && options.rsc_id) {
         rsc = pe_find_resource_with_flags(data_set->resources, options.rsc_id,
                                           options.find_flags);
         if (rsc == NULL) {
             exit_code = CRM_EX_NOSUCH;
             g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
                         "Resource '%s' not found", options.rsc_id);
             goto done;
         }
 
         /* The --ban, --clear, --move, and --restart commands do not work with
          * instances of clone resourcs.
          */
         if (strchr(options.rsc_id, ':') != NULL && pe_rsc_is_clone(rsc->parent) &&
             (options.rsc_cmd == cmd_ban || options.rsc_cmd == cmd_clear ||
              options.rsc_cmd == cmd_move || options.rsc_cmd == cmd_restart)) {
             exit_code = CRM_EX_INVALID_PARAM;
             g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
                         "Cannot operate on clone resource instance '%s'", options.rsc_id);
             goto done;
         }
     }
 
     // If user supplied a node name, check whether it exists
     if ((options.host_uname != NULL) && (data_set != NULL)) {
         node = pe_find_node(data_set->nodes, options.host_uname);
 
         if (node == NULL) {
             exit_code = CRM_EX_NOSUCH;
             g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
                         "Node '%s' not found", options.host_uname);
             goto done;
         }
     }
 
     // Establish a connection to the controller if needed
     if (options.require_crmd) {
         rc = pcmk_new_ipc_api(&controld_api, pcmk_ipc_controld);
         if (rc != pcmk_rc_ok) {
             exit_code = pcmk_rc2exitc(rc);
             g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
                         "Error connecting to the controller: %s", pcmk_rc_str(rc));
             goto done;
         }
         pcmk_register_ipc_callback(controld_api, controller_event_callback,
                                    NULL);
         rc = pcmk_connect_ipc(controld_api, pcmk_ipc_dispatch_main);
         if (rc != pcmk_rc_ok) {
             exit_code = pcmk_rc2exitc(rc);
             g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
                         "Error connecting to the controller: %s", pcmk_rc_str(rc));
             goto done;
         }
     }
 
     /*
      * Handle requested command
      */
 
     switch (options.rsc_cmd) {
         case cmd_list_resources: {
             GList *all = NULL;
             all = g_list_prepend(all, (gpointer) "*");
             rc = out->message(out, "resource-list", data_set,
                               pcmk_show_inactive_rscs | pcmk_show_rsc_only | pcmk_show_pending,
                               TRUE, all, all, FALSE);
             g_list_free(all);
 
             if (rc == pcmk_rc_no_output) {
                 rc = ENXIO;
             }
             break;
         }
 
         case cmd_list_instances:
             rc = out->message(out, "resource-names-list", data_set->resources);
 
             if (rc != pcmk_rc_ok) {
                 rc = ENXIO;
             }
 
             break;
 
         case cmd_list_standards:
         case cmd_list_providers:
         case cmd_list_alternatives:
             rc = list_providers(out, options.agent_spec);
             break;
 
         case cmd_list_agents:
             rc = list_agents(out, options.agent_spec);
             break;
 
         case cmd_metadata:
             rc = show_metadata(out, options.agent_spec);
             break;
 
         case cmd_restart:
             /* We don't pass data_set because rsc needs to stay valid for the
              * entire lifetime of cli_resource_restart(), but it will reset and
              * update the working set multiple times, so it needs to use its own
              * copy.
              */
             rc = cli_resource_restart(out, rsc, node, options.move_lifetime,
                                       options.timeout_ms, cib_conn,
                                       options.cib_options, options.promoted_role_only,
                                       options.force);
             break;
 
         case cmd_wait:
             rc = wait_till_stable(out, options.timeout_ms, cib_conn);
             break;
 
         case cmd_execute_agent:
             if (options.cmdline_config) {
                 exit_code = cli_resource_execute_from_params(out, NULL,
                     options.v_class, options.v_provider, options.v_agent,
                     options.operation, options.cmdline_params,
                     options.override_params, options.timeout_ms,
                     args->verbosity, options.force, options.check_level);
             } else {
                 exit_code = cli_resource_execute(rsc, options.rsc_id,
                     options.operation, options.override_params,
                     options.timeout_ms, cib_conn, data_set,
                     args->verbosity, options.force, options.check_level);
             }
             goto done;
 
         case cmd_digests:
             node = pe_find_node(data_set->nodes, options.host_uname);
             if (node == NULL) {
                 rc = pcmk_rc_node_unknown;
             } else {
                 rc = pcmk__resource_digests(out, rsc, node,
-                                            options.override_params, data_set);
+                                            options.override_params);
             }
             break;
 
         case cmd_colocations:
             rc = out->message(out, "stacks-constraints", rsc, data_set, false);
             break;
 
         case cmd_colocations_deep:
             rc = out->message(out, "stacks-constraints", rsc, data_set, true);
             break;
 
         case cmd_cts:
             rc = pcmk_rc_ok;
             g_list_foreach(data_set->resources, (GFunc) cli_resource_print_cts, out);
             cli_resource_print_cts_constraints(data_set);
             break;
 
         case cmd_fail:
             rc = cli_resource_fail(controld_api, options.host_uname,
                                    options.rsc_id, data_set);
             if (rc == pcmk_rc_ok) {
                 start_mainloop(controld_api);
             }
             break;
 
         case cmd_list_active_ops:
             rc = cli_resource_print_operations(options.rsc_id,
                                                options.host_uname, TRUE,
                                                data_set);
             break;
 
         case cmd_list_all_ops:
             rc = cli_resource_print_operations(options.rsc_id,
                                                options.host_uname, FALSE,
                                                data_set);
             break;
 
         case cmd_locate: {
             GList *nodes = cli_resource_search(rsc, options.rsc_id, data_set);
             rc = out->message(out, "resource-search-list", nodes, options.rsc_id);
             g_list_free_full(nodes, free);
             break;
         }
 
         case cmd_query_xml:
             rc = cli_resource_print(rsc, data_set, TRUE);
             break;
 
         case cmd_query_raw_xml:
             rc = cli_resource_print(rsc, data_set, FALSE);
             break;
 
         case cmd_why:
             if ((options.host_uname != NULL) && (node == NULL)) {
                 rc = pcmk_rc_node_unknown;
             } else {
                 rc = out->message(out, "resource-reasons-list", cib_conn,
                                   data_set->resources, rsc, node);
             }
             break;
 
         case cmd_clear:
             rc = clear_constraints(out, &cib_xml_copy);
             break;
 
         case cmd_move:
             if (options.host_uname == NULL) {
                 rc = ban_or_move(out, rsc, options.move_lifetime);
             } else {
                 rc = cli_resource_move(rsc, options.rsc_id, options.host_uname,
                                        options.move_lifetime, cib_conn,
                                        options.cib_options, data_set,
                                        options.promoted_role_only,
                                        options.force);
             }
 
             if (rc == EINVAL) {
                 exit_code = CRM_EX_USAGE;
                 goto done;
             }
 
             break;
 
         case cmd_ban:
             if (options.host_uname == NULL) {
                 rc = ban_or_move(out, rsc, options.move_lifetime);
             } else if (node == NULL) {
                 rc = pcmk_rc_node_unknown;
             } else {
                 rc = cli_resource_ban(out, options.rsc_id, node->details->uname,
                                       options.move_lifetime, NULL, cib_conn,
                                       options.cib_options,
                                       options.promoted_role_only);
             }
 
             if (rc == EINVAL) {
                 exit_code = CRM_EX_USAGE;
                 goto done;
             }
 
             break;
 
         case cmd_get_property:
             rc = out->message(out, "property-list", rsc, options.prop_name);
             if (rc == pcmk_rc_no_output) {
                 rc = ENXIO;
             }
 
             break;
 
         case cmd_set_property:
             rc = set_property();
             break;
 
         case cmd_get_param: {
             unsigned int count = 0;
             GHashTable *params = NULL;
             pe_node_t *current = pe__find_active_on(rsc, &count, NULL);
             bool free_params = true;
 
             if (count > 1) {
                 out->err(out, "%s is active on more than one node,"
                          " returning the default value for %s", rsc->id,
                          pcmk__s(options.prop_name, "unspecified property"));
                 current = NULL;
             }
 
             crm_debug("Looking up %s in %s", options.prop_name, rsc->id);
 
             if (pcmk__str_eq(options.attr_set_type, XML_TAG_ATTR_SETS, pcmk__str_casei)) {
                 params = pe_rsc_params(rsc, current, data_set);
                 free_params = false;
 
             } else if (pcmk__str_eq(options.attr_set_type, XML_TAG_META_SETS, pcmk__str_casei)) {
                 params = pcmk__strkey_table(free, free);
                 get_meta_attributes(params, rsc, current, data_set);
 
             } else {
                 params = pcmk__strkey_table(free, free);
                 pe__unpack_dataset_nvpairs(rsc->xml, XML_TAG_UTILIZATION, NULL, params,
                                            NULL, FALSE, data_set);
             }
 
             rc = out->message(out, "attribute-list", rsc, options.prop_name, params);
             if (free_params) {
                 g_hash_table_destroy(params);
             }
             break;
         }
 
         case cmd_set_param:
             if (pcmk__str_empty(options.prop_value)) {
                 exit_code = CRM_EX_USAGE;
                 g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
                             "You need to supply a value with the -v option");
                 goto done;
             }
 
             /* coverity[var_deref_model] False positive */
             rc = cli_resource_update_attribute(rsc, options.rsc_id,
                                                options.prop_set,
                                                options.attr_set_type,
                                                options.prop_id,
                                                options.prop_name,
                                                options.prop_value,
                                                options.recursive, cib_conn,
                                                options.cib_options, data_set,
                                                options.force);
             break;
 
         case cmd_delete_param:
             /* coverity[var_deref_model] False positive */
             rc = cli_resource_delete_attribute(rsc, options.rsc_id,
                                                options.prop_set,
                                                options.attr_set_type,
                                                options.prop_id,
                                                options.prop_name, cib_conn,
                                                options.cib_options, data_set,
                                                options.force);
             break;
 
         case cmd_cleanup:
             if (rsc == NULL) {
                 rc = cli_cleanup_all(controld_api, options.host_uname,
                                      options.operation, options.interval_spec,
                                      data_set);
                 if (rc == pcmk_rc_ok) {
                     start_mainloop(controld_api);
                 }
             } else {
                 cleanup(out, rsc);
             }
             break;
 
         case cmd_refresh:
             if (rsc == NULL) {
                 rc = refresh(out);
             } else {
                 refresh_resource(out, rsc);
             }
             break;
 
         case cmd_delete:
             rc = delete();
             break;
 
         default:
             exit_code = CRM_EX_USAGE;
             g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
                         "Unimplemented command: %d", (int) options.rsc_cmd);
             goto done;
     }
 
     /* Convert rc into an exit code. */
     if (rc != pcmk_rc_ok && rc != pcmk_rc_no_output) {
         if (rc == pcmk_rc_no_quorum) {
             g_prefix_error(&error, "To ignore quorum, use the force option.\n");
         }
 
         exit_code = pcmk_rc2exitc(rc);
     }
 
     /*
      * Clean up and exit
      */
 
 done:
     /* When we get here, exit_code has been set one of two ways - either at one of
      * the spots where there's a "goto done" (which itself could have happened either
      * directly or by calling pcmk_rc2exitc), or just up above after any of the break
      * statements.
      *
      * Thus, we can use just exit_code here to decide what to do.
      */
     if (exit_code != CRM_EX_OK && exit_code != CRM_EX_USAGE) {
         if (error != NULL) {
             char *msg = crm_strdup_printf("%s\nError performing operation: %s",
                                           error->message, crm_exit_str(exit_code));
             g_clear_error(&error);
             g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "%s", msg);
             free(msg);
         } else {
             g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
                         "Error performing operation: %s", crm_exit_str(exit_code));
         }
     }
 
     g_free(options.host_uname);
     g_free(options.interval_spec);
     g_free(options.move_lifetime);
     g_free(options.operation);
     g_free(options.prop_id);
     free(options.prop_name);
     g_free(options.prop_set);
     g_free(options.prop_value);
     g_free(options.rsc_id);
     g_free(options.rsc_type);
     free(options.agent_spec);
     free(options.v_agent);
     free(options.v_class);
     free(options.v_provider);
     g_free(options.xml_file);
     g_strfreev(options.remainder);
 
     if (options.override_params != NULL) {
         g_hash_table_destroy(options.override_params);
     }
 
     /* options.cmdline_params does not need to be destroyed here.  See the
      * comments in cli_resource_execute_from_params.
      */
 
     g_strfreev(processed_args);
     g_option_context_free(context);
 
     return bye(exit_code);
 }