diff --git a/cts/cts-scheduler.in b/cts/cts-scheduler.in
index 23e6a919f3..939f8c8af8 100644
--- a/cts/cts-scheduler.in
+++ b/cts/cts-scheduler.in
@@ -1,1520 +1,1521 @@
 #!@PYTHON@
 """ Regression tests for Pacemaker's scheduler
 """
 
 # Pacemaker targets compatibility with Python 2.7 and 3.2+
 from __future__ import print_function, unicode_literals, absolute_import, division
 
 __copyright__ = "Copyright 2004-2020 the Pacemaker project contributors"
 __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
 
 import io
 import os
 import re
 import sys
 import stat
 import shlex
 import shutil
 import argparse
 import subprocess
 import platform
 
 DESC = """Regression tests for Pacemaker's scheduler"""
 
 # Each entry in TESTS is a group of tests, where each test consists of a
 # test base name, test description, and additional test arguments.
 # Test groups will be separated by newlines in output.
 TESTS = [
     [
         [ "simple1", "Offline" ],
         [ "simple2", "Start" ],
         [ "simple3", "Start 2" ],
         [ "simple4", "Start Failed" ],
         [ "simple6", "Stop Start" ],
         [ "simple7", "Shutdown" ],
         #[ "simple8", "Stonith" ],
         #[ "simple9", "Lower version" ],
         #[ "simple10", "Higher version" ],
         [ "simple11", "Priority (ne)" ],
         [ "simple12", "Priority (eq)" ],
         [ "simple8", "Stickiness" ],
     ],
     [
         [ "group1", "Group" ],
         [ "group2", "Group + Native" ],
         [ "group3", "Group + Group" ],
         [ "group4", "Group + Native (nothing)" ],
         [ "group5", "Group + Native (move)" ],
         [ "group6", "Group + Group (move)" ],
         [ "group7", "Group colocation" ],
         [ "group13", "Group colocation (cant run)" ],
         [ "group8", "Group anti-colocation" ],
         [ "group9", "Group recovery" ],
         [ "group10", "Group partial recovery" ],
         [ "group11", "Group target_role" ],
         [ "group14", "Group stop (graph terminated)" ],
         [ "group15", "Negative group colocation" ],
         [ "bug-1573", "Partial stop of a group with two children" ],
         [ "bug-1718", "Mandatory group ordering - Stop group_FUN" ],
         [ "bug-lf-2613", "Move group on failure" ],
         [ "bug-lf-2619", "Move group on clone failure" ],
         [ "group-fail", "Ensure stop order is preserved for partially active groups" ],
         [ "group-unmanaged", "No need to restart r115 because r114 is unmanaged" ],
         [ "group-unmanaged-stopped", "Make sure r115 is stopped when r114 fails" ],
         [ "group-dependents", "Account for the location preferences of things colocated with a group" ],
         [ "group-stop-ordering", "Ensure blocked group member stop does not force other member stops" ],
     ],
     [
         [ "rsc_dep1", "Must not" ],
         [ "rsc_dep3", "Must" ],
         [ "rsc_dep5", "Must not 3" ],
         [ "rsc_dep7", "Must 3" ],
         [ "rsc_dep10", "Must (but cant)" ],
         [ "rsc_dep2", "Must (running)" ],
         [ "rsc_dep8", "Must (running : alt)" ],
         [ "rsc_dep4", "Must (running + move)" ],
         [ "asymmetric", "Asymmetric - require explicit location constraints" ],
     ],
     [
         [ "orphan-0", "Orphan ignore" ],
         [ "orphan-1", "Orphan stop" ],
         [ "orphan-2", "Orphan stop, remove failcount" ],
     ],
     [
         [ "params-0", "Params: No change" ],
         [ "params-1", "Params: Changed" ],
         [ "params-2", "Params: Resource definition" ],
         [ "params-3", "Params: Restart instead of reload if start pending" ],
         [ "params-4", "Params: Reload" ],
         [ "params-5", "Params: Restart based on probe digest" ],
         [ "novell-251689", "Resource definition change + target_role=stopped" ],
         [ "bug-lf-2106", "Restart all anonymous clone instances after config change" ],
         [ "params-6", "Params: Detect reload in previously migrated resource" ],
         [ "nvpair-id-ref", "Support id-ref in nvpair with optional name" ],
         [ "not-reschedule-unneeded-monitor",
                      "Do not reschedule unneeded monitors while resource definitions have changed" ],
         [ "reload-becomes-restart", "Cancel reload if restart becomes required" ],
     ],
     [
         [ "target-0", "Target Role : baseline" ],
         [ "target-1", "Target Role : master" ],
         [ "target-2", "Target Role : invalid" ],
     ],
     [
         [ "base-score", "Set a node's default score for all nodes" ],
     ],
     [
         [ "date-1", "Dates", [ "-t",  "2005-020" ] ],
         [ "date-2", "Date Spec - Pass", [ "-t", "2005-020T12:30" ] ],
         [ "date-3", "Date Spec - Fail", [ "-t", "2005-020T11:30" ] ],
         [ "origin", "Timing of recurring operations", [ "-t", "2014-05-07 00:28:00" ] ],
         [ "probe-0", "Probe (anon clone)" ],
         [ "probe-1", "Pending Probe" ],
         [ "probe-2", "Correctly re-probe cloned groups" ],
         [ "probe-3", "Probe (pending node)" ],
         [ "probe-4", "Probe (pending node + stopped resource)" ],
         [ "standby", "Standby" ],
         [ "comments", "Comments" ],
     ],
     [
         [ "one-or-more-0", "Everything starts" ],
         [ "one-or-more-1", "Nothing starts because of A" ],
         [ "one-or-more-2", "D can start because of C" ],
         [ "one-or-more-3", "D cannot start because of B and C" ],
         [ "one-or-more-4", "D cannot start because of target-role" ],
         [ "one-or-more-5", "Start A and F even though C and D are stopped" ],
         [ "one-or-more-6", "Leave A running even though B is stopped" ],
         [ "one-or-more-7", "Leave A running even though C is stopped" ],
         [ "bug-5140-require-all-false", "Allow basegrp:0 to stop" ],
         [ "clone-require-all-1", "clone B starts node 3 and 4" ],
         [ "clone-require-all-2", "clone B remains stopped everywhere" ],
         [ "clone-require-all-3", "clone B stops everywhere because A stops everywhere" ],
         [ "clone-require-all-4", "clone B remains on node 3 and 4 with only one instance of A remaining" ],
         [ "clone-require-all-5", "clone B starts on node 1 3 and 4" ],
         [ "clone-require-all-6", "clone B remains active after shutting down instances of A" ],
         [ "clone-require-all-7",
           "clone A and B both start at the same time. all instances of A start before B" ],
         [ "clone-require-all-no-interleave-1", "C starts everywhere after A and B" ],
         [ "clone-require-all-no-interleave-2",
           "C starts on nodes 1, 2, and 4 with only one active instance of B" ],
         [ "clone-require-all-no-interleave-3",
           "C remains active when instance of B is stopped on one node and started on another" ],
         [ "one-or-more-unrunnable-instances", "Avoid dependencies on instances that won't ever be started" ],
     ],
     [
         [ "location-date-rules-1", "Use location constraints with ineffective date-based rules" ],
         [ "location-date-rules-2", "Use location constraints with effective date-based rules" ],
         [ "nvpair-date-rules-1", "Use nvpair blocks with a variety of date-based rules" ],
+        [ "value-source", "Use location constraints with node attribute expressions using value-source" ],
         [ "rule-dbl-as-auto-number-match",
           "Floating-point rule values default to number comparison: match" ],
         [ "rule-dbl-as-auto-number-no-match",
           "Floating-point rule values default to number comparison: no "
                   "match" ],
         [ "rule-dbl-as-integer-match",
           "Floating-point rule values set to integer comparison: match" ],
         [ "rule-dbl-as-integer-no-match",
           "Floating-point rule values set to integer comparison: no match" ],
         [ "rule-dbl-as-number-match",
           "Floating-point rule values set to number comparison: match" ],
         [ "rule-dbl-as-number-no-match",
           "Floating-point rule values set to number comparison: no match" ],
         [ "rule-dbl-parse-fail-default-str-match",
           "Floating-point rule values fail to parse, default to string "
 	          "comparison: match" ],
         [ "rule-dbl-parse-fail-default-str-no-match",
           "Floating-point rule values fail to parse, default to string "
 	          "comparison: no match" ],
         [ "rule-int-as-auto-integer-match",
           "Integer rule values default to integer comparison: match" ],
         [ "rule-int-as-auto-integer-no-match",
           "Integer rule values default to integer comparison: no match" ],
         [ "rule-int-as-integer-match",
           "Integer rule values set to integer comparison: match" ],
         [ "rule-int-as-integer-no-match",
           "Integer rule values set to integer comparison: no match" ],
         [ "rule-int-as-number-match",
           "Integer rule values set to number comparison: match" ],
         [ "rule-int-as-number-no-match",
           "Integer rule values set to number comparison: no match" ],
         [ "rule-int-parse-fail-default-str-match",
           "Integer rule values fail to parse, default to string "
 	          "comparison: match" ],
         [ "rule-int-parse-fail-default-str-no-match",
           "Integer rule values fail to parse, default to string "
 	          "comparison: no match" ],
     ],
     [
         [ "order1", "Order start 1" ],
         [ "order2", "Order start 2" ],
         [ "order3", "Order stop" ],
         [ "order4", "Order (multiple)" ],
         [ "order5", "Order (move)" ],
         [ "order6", "Order (move w/ restart)" ],
         [ "order7", "Order (mandatory)" ],
         [ "order-optional", "Order (score=0)" ],
         [ "order-required", "Order (score=INFINITY)" ],
         [ "bug-lf-2171", "Prevent group start when clone is stopped" ],
         [ "order-clone", "Clone ordering should be able to prevent startup of dependent clones" ],
         [ "order-sets", "Ordering for resource sets" ],
         [ "order-serialize", "Serialize resources without inhibiting migration" ],
         [ "order-serialize-set", "Serialize a set of resources without inhibiting migration" ],
         [ "clone-order-primitive", "Order clone start after a primitive" ],
         [ "clone-order-16instances", "Verify ordering of 16 cloned resources" ],
         [ "order-optional-keyword", "Order (optional keyword)" ],
         [ "order-mandatory", "Order (mandatory keyword)" ],
         [ "bug-lf-2493",
           "Don't imply colocation requirements when applying ordering constraints with clones" ],
         [ "ordered-set-basic-startup", "Constraint set with default order settings" ],
         [ "ordered-set-natural", "Allow natural set ordering" ],
         [ "order-wrong-kind", "Order (error)" ],
     ],
     [
         [ "coloc-loop", "Colocation - loop" ],
         [ "coloc-many-one", "Colocation - many-to-one" ],
         [ "coloc-list", "Colocation - many-to-one with list" ],
         [ "coloc-group", "Colocation - groups" ],
         [ "coloc-slave-anti", "Anti-colocation with slave shouldn't prevent master colocation" ],
         [ "coloc-attr", "Colocation based on node attributes" ],
         [ "coloc-negative-group", "Negative colocation with a group" ],
         [ "coloc-intra-set", "Intra-set colocation" ],
         [ "bug-lf-2435", "Colocation sets with a negative score" ],
         [ "coloc-clone-stays-active",
           "Ensure clones don't get stopped/demoted because a dependent must stop" ],
         [ "coloc_fp_logic", "Verify floating point calculations in colocation are working" ],
         [ "colo_master_w_native",
           "cl#5070 - Verify promotion order is affected when colocating master to native rsc" ],
         [ "colo_slave_w_native",
           "cl#5070 - Verify promotion order is affected when colocating slave to native rsc" ],
         [ "anti-colocation-order",
           "cl#5187 - Prevent resources in an anti-colocation from even temporarily running on a same node" ],
         [ "anti-colocation-master", "Organize order of actions for master resources in anti-colocations" ],
         [ "anti-colocation-slave", "Organize order of actions for slave resources in anti-colocations" ],
         [ "enforce-colo1", "Always enforce B with A INFINITY" ],
         [ "complex_enforce_colo", "Always enforce B with A INFINITY. (make sure heat-engine stops)" ],
         [ "coloc-dependee-should-stay", "Stickiness outweighs group colocation" ],
         [ "coloc-dependee-should-move", "Group colocation outweighs stickiness" ],
     ],
     [
         [ "rsc-sets-seq-true", "Resource Sets - sequential=false" ],
         [ "rsc-sets-seq-false", "Resource Sets - sequential=true" ],
         [ "rsc-sets-clone", "Resource Sets - Clone" ],
         [ "rsc-sets-master", "Resource Sets - Master" ],
         [ "rsc-sets-clone-1", "Resource Sets - Clone (lf#2404)" ],
     ],
     [
         [ "attrs1", "string: eq (and)" ],
         [ "attrs2", "string: lt / gt (and)" ],
         [ "attrs3", "string: ne (or)" ],
         [ "attrs4", "string: exists" ],
         [ "attrs5", "string: not_exists" ],
         [ "attrs6", "is_dc: true" ],
         [ "attrs7", "is_dc: false" ],
         [ "attrs8", "score_attribute" ],
         [ "per-node-attrs", "Per node resource parameters" ],
     ],
     [
         [ "mon-rsc-1", "Schedule Monitor - start" ],
         [ "mon-rsc-2", "Schedule Monitor - move" ],
         [ "mon-rsc-3", "Schedule Monitor - pending start" ],
         [ "mon-rsc-4", "Schedule Monitor - move/pending start" ],
     ],
     [
         [ "rec-rsc-0", "Resource Recover - no start" ],
         [ "rec-rsc-1", "Resource Recover - start" ],
         [ "rec-rsc-2", "Resource Recover - monitor" ],
         [ "rec-rsc-3", "Resource Recover - stop - ignore" ],
         [ "rec-rsc-4", "Resource Recover - stop - block" ],
         [ "rec-rsc-5", "Resource Recover - stop - fence" ],
         [ "rec-rsc-6", "Resource Recover - multiple - restart" ],
         [ "rec-rsc-7", "Resource Recover - multiple - stop" ],
         [ "rec-rsc-8", "Resource Recover - multiple - block" ],
         [ "rec-rsc-9", "Resource Recover - group/group" ],
         [ "monitor-recovery", "on-fail=block + resource recovery detected by recurring monitor" ],
         [ "stop-failure-no-quorum", "Stop failure without quorum" ],
         [ "stop-failure-no-fencing", "Stop failure without fencing available" ],
         [ "stop-failure-with-fencing", "Stop failure with fencing available" ],
         [ "multiple-active-block-group", "Support of multiple-active=block for resource groups" ],
         [ "multiple-monitor-one-failed",
           "Consider resource failed if any of the configured monitor operations failed" ],
     ],
     [
         [ "quorum-1", "No quorum - ignore" ],
         [ "quorum-2", "No quorum - freeze" ],
         [ "quorum-3", "No quorum - stop" ],
         [ "quorum-4", "No quorum - start anyway" ],
         [ "quorum-5", "No quorum - start anyway (group)" ],
         [ "quorum-6", "No quorum - start anyway (clone)" ],
         [ "bug-cl-5212", "No promotion with no-quorum-policy=freeze" ],
         [ "suicide-needed-inquorate", "no-quorum-policy=suicide: suicide necessary" ],
         [ "suicide-not-needed-initial-quorum",
           "no-quorum-policy=suicide: suicide not necessary at initial quorum" ],
         [ "suicide-not-needed-never-quorate",
           "no-quorum-policy=suicide: suicide not necessary if never quorate" ],
         [ "suicide-not-needed-quorate", "no-quorum-policy=suicide: suicide necessary if quorate" ],
     ],
     [
         [ "rec-node-1", "Node Recover - Startup   - no fence" ],
         [ "rec-node-2", "Node Recover - Startup   - fence" ],
         [ "rec-node-3", "Node Recover - HA down   - no fence" ],
         [ "rec-node-4", "Node Recover - HA down   - fence" ],
         [ "rec-node-5", "Node Recover - CRM down  - no fence" ],
         [ "rec-node-6", "Node Recover - CRM down  - fence" ],
         [ "rec-node-7", "Node Recover - no quorum - ignore" ],
         [ "rec-node-8", "Node Recover - no quorum - freeze" ],
         [ "rec-node-9", "Node Recover - no quorum - stop" ],
         [ "rec-node-10", "Node Recover - no quorum - stop w/fence" ],
         [ "rec-node-11", "Node Recover - CRM down w/ group - fence" ],
         [ "rec-node-12", "Node Recover - nothing active - fence" ],
         [ "rec-node-13", "Node Recover - failed resource + shutdown - fence" ],
         [ "rec-node-15", "Node Recover - unknown lrm section" ],
         [ "rec-node-14", "Serialize all stonith's" ],
     ],
     [
         [ "multi1", "Multiple Active (stop/start)" ],
     ],
     [
         [ "migrate-begin", "Normal migration" ],
         [ "migrate-success", "Completed migration" ],
         [ "migrate-partial-1", "Completed migration, missing stop on source" ],
         [ "migrate-partial-2", "Successful migrate_to only" ],
         [ "migrate-partial-3", "Successful migrate_to only, target down" ],
         [ "migrate-partial-4", "Migrate from the correct host after migrate_to+migrate_from" ],
         [ "bug-5186-partial-migrate", "Handle partial migration when src node loses membership" ],
         [ "migrate-fail-2", "Failed migrate_from" ],
         [ "migrate-fail-3", "Failed migrate_from + stop on source" ],
         [ "migrate-fail-4",
           "Failed migrate_from + stop on target - ideally we wouldn't need to re-stop on target" ],
         [ "migrate-fail-5", "Failed migrate_from + stop on source and target" ],
         [ "migrate-fail-6", "Failed migrate_to" ],
         [ "migrate-fail-7", "Failed migrate_to + stop on source" ],
         [ "migrate-fail-8",
           "Failed migrate_to + stop on target - ideally we wouldn't need to re-stop on target" ],
         [ "migrate-fail-9", "Failed migrate_to + stop on source and target" ],
         [ "migration-ping-pong", "Old migrate_to failure + successful migrate_from on same node" ],
         [ "migrate-stop", "Migration in a stopping stack" ],
         [ "migrate-start", "Migration in a starting stack" ],
         [ "migrate-stop_start", "Migration in a restarting stack" ],
         [ "migrate-stop-complex", "Migration in a complex stopping stack" ],
         [ "migrate-start-complex", "Migration in a complex starting stack" ],
         [ "migrate-stop-start-complex", "Migration in a complex moving stack" ],
         [ "migrate-shutdown", "Order the post-migration 'stop' before node shutdown" ],
         [ "migrate-1", "Migrate (migrate)" ],
         [ "migrate-2", "Migrate (stable)" ],
         [ "migrate-3", "Migrate (failed migrate_to)" ],
         [ "migrate-4", "Migrate (failed migrate_from)" ],
         [ "novell-252693", "Migration in a stopping stack" ],
         [ "novell-252693-2", "Migration in a starting stack" ],
         [ "novell-252693-3", "Non-Migration in a starting and stopping stack" ],
         [ "bug-1820", "Migration in a group" ],
         [ "bug-1820-1", "Non-migration in a group" ],
         [ "migrate-5", "Primitive migration with a clone" ],
         [ "migrate-fencing", "Migration after Fencing" ],
         [ "migrate-both-vms", "Migrate two VMs that have no colocation" ],
         [ "migration-behind-migrating-remote", "Migrate resource behind migrating remote connection" ],
         [ "1-a-then-bm-move-b", "Advanced migrate logic. A then B. migrate B" ],
         [ "2-am-then-b-move-a", "Advanced migrate logic, A then B, migrate A without stopping B" ],
         [ "3-am-then-bm-both-migrate", "Advanced migrate logic. A then B. migrate both" ],
         [ "4-am-then-bm-b-not-migratable", "Advanced migrate logic, A then B, B not migratable" ],
         [ "5-am-then-bm-a-not-migratable", "Advanced migrate logic. A then B. move both, a not migratable" ],
         [ "6-migrate-group", "Advanced migrate logic, migrate a group" ],
         [ "7-migrate-group-one-unmigratable",
           "Advanced migrate logic, migrate group mixed with allow-migrate true/false" ],
         [ "8-am-then-bm-a-migrating-b-stopping",
           "Advanced migrate logic, A then B, A migrating, B stopping" ],
         [ "9-am-then-bm-b-migrating-a-stopping",
           "Advanced migrate logic, A then B, B migrate, A stopping" ],
         [ "10-a-then-bm-b-move-a-clone",
           "Advanced migrate logic, A clone then B, migrate B while stopping A" ],
         [ "11-a-then-bm-b-move-a-clone-starting",
           "Advanced migrate logic, A clone then B, B moving while A is start/stopping" ],
         [ "a-promote-then-b-migrate", "A promote then B start. migrate B" ],
         [ "a-demote-then-b-migrate", "A demote then B stop. migrate B" ],
 
         # @TODO: If pacemaker implements versioned attributes, uncomment this test
         #[ "migrate-versioned", "Disable migration for versioned resources" ],
 
         [ "bug-lf-2422", "Dependency on partially active group - stop ocfs:*" ],
     ],
     [
         [ "clone-anon-probe-1", "Probe the correct (anonymous) clone instance for each node" ],
         [ "clone-anon-probe-2", "Avoid needless re-probing of anonymous clones" ],
         [ "clone-anon-failcount", "Merge failcounts for anonymous clones" ],
         [ "force-anon-clone-max", "Update clone-max properly when forcing a clone to be anonymous" ],
         [ "anon-instance-pending", "Assign anonymous clone instance numbers properly when action pending" ],
         [ "inc0", "Incarnation start" ],
         [ "inc1", "Incarnation start order" ],
         [ "inc2", "Incarnation silent restart, stop, move" ],
         [ "inc3", "Inter-incarnation ordering, silent restart, stop, move" ],
         [ "inc4", "Inter-incarnation ordering, silent restart, stop, move (ordered)" ],
         [ "inc5", "Inter-incarnation ordering, silent restart, stop, move (restart 1)" ],
         [ "inc6", "Inter-incarnation ordering, silent restart, stop, move (restart 2)" ],
         [ "inc7", "Clone colocation" ],
         [ "inc8", "Clone anti-colocation" ],
         [ "inc9", "Non-unique clone" ],
         [ "inc10", "Non-unique clone (stop)" ],
         [ "inc11", "Primitive colocation with clones" ],
         [ "inc12", "Clone shutdown" ],
         [ "cloned-group", "Make sure only the correct number of cloned groups are started" ],
         [ "cloned-group-stop", "Ensure stopping qpidd also stops glance and cinder" ],
         [ "clone-no-shuffle", "Don't prioritize allocation of instances that must be moved" ],
         [ "clone-max-zero", "Orphan processing with clone-max=0" ],
         [ "clone-anon-dup",
           "Bug LF#2087 - Correctly parse the state of anonymous clones that are active more than once per node" ],
         [ "bug-lf-2160", "Don't shuffle clones due to colocation" ],
         [ "bug-lf-2213", "clone-node-max enforcement for cloned groups" ],
         [ "bug-lf-2153", "Clone ordering constraints" ],
         [ "bug-lf-2361", "Ensure clones observe mandatory ordering constraints if the LHS is unrunnable" ],
         [ "bug-lf-2317", "Avoid needless restart of primitive depending on a clone" ],
         [ "clone-colocate-instance-1", "Colocation with a specific clone instance (negative example)" ],
         [ "clone-colocate-instance-2", "Colocation with a specific clone instance" ],
         [ "clone-order-instance", "Ordering with specific clone instances" ],
         [ "bug-lf-2453", "Enforce mandatory clone ordering without colocation" ],
         [ "bug-lf-2508", "Correctly reconstruct the status of anonymous cloned groups" ],
         [ "bug-lf-2544", "Balanced clone placement" ],
         [ "bug-lf-2445", "Redistribute clones with node-max > 1 and stickiness = 0" ],
         [ "bug-lf-2574", "Avoid clone shuffle" ],
         [ "bug-lf-2581", "Avoid group restart due to unrelated clone (re)start" ],
         [ "bug-cl-5168", "Don't shuffle clones" ],
         [ "bug-cl-5170", "Prevent clone from starting with on-fail=block" ],
         [ "clone-fail-block-colocation", "Move colocated group when failed clone has on-fail=block" ],
         [ "clone-interleave-1",
           "Clone-3 cannot start on pcmk-1 due to interleaved ordering (no colocation)" ],
         [ "clone-interleave-2", "Clone-3 must stop on pcmk-1 due to interleaved ordering (no colocation)" ],
         [ "clone-interleave-3",
           "Clone-3 must be recovered on pcmk-1 due to interleaved ordering (no colocation)" ],
         [ "rebalance-unique-clones", "Rebalance unique clone instances with no stickiness" ],
         [ "clone-requires-quorum-recovery", "Clone with requires=quorum on failed node needing recovery" ],
         [ "clone-requires-quorum",
           "Clone with requires=quorum with presumed-inactive instance on failed node" ],
     ],
     [
         [ "cloned_start_one", "order first clone then clone... first clone_min=2" ],
         [ "cloned_start_two", "order first clone then clone... first clone_min=2" ],
         [ "cloned_stop_one", "order first clone then clone... first clone_min=2" ],
         [ "cloned_stop_two", "order first clone then clone... first clone_min=2" ],
         [ "clone_min_interleave_start_one",
           "order first clone then clone... first clone_min=2 and then has interleave=true" ],
         [ "clone_min_interleave_start_two",
           "order first clone then clone... first clone_min=2 and then has interleave=true" ],
         [ "clone_min_interleave_stop_one",
           "order first clone then clone... first clone_min=2 and then has interleave=true" ],
         [ "clone_min_interleave_stop_two",
           "order first clone then clone... first clone_min=2 and then has interleave=true" ],
         [ "clone_min_start_one", "order first clone then primitive... first clone_min=2" ],
         [ "clone_min_start_two", "order first clone then primitive... first clone_min=2" ],
         [ "clone_min_stop_all", "order first clone then primitive... first clone_min=2" ],
         [ "clone_min_stop_one", "order first clone then primitive... first clone_min=2" ],
         [ "clone_min_stop_two", "order first clone then primitive... first clone_min=2" ],
     ],
     [
         [ "unfence-startup", "Clean unfencing" ],
         [ "unfence-definition", "Unfencing when the agent changes" ],
         [ "unfence-parameters", "Unfencing when the agent parameters changes" ],
         [ "unfence-device", "Unfencing when a cluster has only fence devices" ],
     ],
     [
         [ "master-0", "Stopped -> Slave" ],
         [ "master-1", "Stopped -> Promote" ],
         [ "master-2", "Stopped -> Promote : notify" ],
         [ "master-3", "Stopped -> Promote : master location" ],
         [ "master-4", "Started -> Promote : master location" ],
         [ "master-5", "Promoted -> Promoted" ],
         [ "master-6", "Promoted -> Promoted (2)" ],
         [ "master-7", "Promoted -> Fenced" ],
         [ "master-8", "Promoted -> Fenced -> Moved" ],
         [ "master-9", "Stopped + Promotable + No quorum" ],
         [ "master-10", "Stopped -> Promotable : notify with monitor" ],
         [ "master-11", "Stopped -> Promote : colocation" ],
         [ "novell-239082", "Demote/Promote ordering" ],
         [ "novell-239087", "Stable master placement" ],
         [ "master-12", "Promotion based solely on rsc_location constraints" ],
         [ "master-13", "Include preferences of colocated resources when placing master" ],
         [ "master-demote", "Ordering when actions depends on demoting a slave resource" ],
         [ "master-ordering", "Prevent resources from starting that need a master" ],
         [ "bug-1765", "Master-Master Colocation (do not stop the slaves)" ],
         [ "master-group", "Promotion of cloned groups" ],
         [ "bug-lf-1852", "Don't shuffle master/slave instances unnecessarily" ],
         [ "master-failed-demote", "Don't retry failed demote actions" ],
         [ "master-failed-demote-2", "Don't retry failed demote actions (notify=false)" ],
         [ "master-depend",
           "Ensure resources that depend on the master don't get allocated until the master does" ],
         [ "master-reattach", "Re-attach to a running master" ],
         [ "master-allow-start", "Don't include master score if it would prevent allocation" ],
         [ "master-colocation",
           "Allow master instances placemaker to be influenced by colocation constraints" ],
         [ "master-pseudo", "Make sure promote/demote pseudo actions are created correctly" ],
         [ "master-role", "Prevent target-role from promoting more than master-max instances" ],
         [ "bug-lf-2358", "Master-Master anti-colocation" ],
         [ "master-promotion-constraint", "Mandatory master colocation constraints" ],
         [ "unmanaged-master", "Ensure role is preserved for unmanaged resources" ],
         [ "master-unmanaged-monitor", "Start the correct monitor operation for unmanaged masters" ],
         [ "master-demote-2", "Demote does not clear past failure" ],
         [ "master-move", "Move master based on failure of colocated group" ],
         [ "master-probed-score", "Observe the promotion score of probed resources" ],
         [ "colocation_constraint_stops_master",
           "cl#5054 - Ensure master is demoted when stopped by colocation constraint" ],
         [ "colocation_constraint_stops_slave",
           "cl#5054 - Ensure slave is not demoted when stopped by colocation constraint" ],
         [ "order_constraint_stops_master",
           "cl#5054 - Ensure master is demoted when stopped by order constraint" ],
         [ "order_constraint_stops_slave",
           "cl#5054 - Ensure slave is not demoted when stopped by order constraint" ],
         [ "master_monitor_restart", "cl#5072 - Ensure master monitor operation will start after promotion" ],
         [ "bug-rh-880249", "Handle replacement of an m/s resource with a primitive" ],
         [ "bug-5143-ms-shuffle", "Prevent master shuffling due to promotion score" ],
         [ "master-demote-block", "Block promotion if demote fails with on-fail=block" ],
         [ "master-dependent-ban",
           "Don't stop instances from being active because a dependent is banned from that host" ],
         [ "master-stop", "Stop instances due to location constraint with role=Started" ],
         [ "master-partially-demoted-group", "Allow partially demoted group to finish demoting" ],
         [ "bug-cl-5213", "Ensure role colocation with -INFINITY is enforced" ],
         [ "bug-cl-5219", "Allow unrelated resources with a common colocation target to remain promoted" ],
         [ "master-asymmetrical-order",
           "Fix the behaviors of multi-state resources with asymmetrical ordering" ],
         [ "master-notify", "Master promotion with notifies" ],
         [ "master-score-startup", "Use permanent master scores without LRM history" ],
         [ "failed-demote-recovery", "Recover resource in slave role after demote fails" ],
         [ "failed-demote-recovery-master", "Recover resource in master role after demote fails" ],
         [ "on_fail_demote1", "Recovery with on-fail=\"demote\" on healthy cluster, remote, guest, and bundle nodes" ],
         [ "on_fail_demote2", "Recovery with on-fail=\"demote\" with promotion on different node" ],
         [ "on_fail_demote3", "Recovery with on-fail=\"demote\" with no promotion" ],
         [ "on_fail_demote4", "Recovery with on-fail=\"demote\" on failed cluster, remote, guest, and bundle nodes" ],
         [ "no_quorum_demote", "Promotable demotion and primitive stop with no-quorum-policy=\"demote\"" ],
         [ "no-promote-on-unrunnable-guest", "Don't select bundle instance for promotion when container can't run" ],
     ],
     [
         [ "history-1", "Correctly parse stateful-1 resource state" ],
     ],
     [
         [ "managed-0", "Managed (reference)" ],
         [ "managed-1", "Not managed - down" ],
         [ "managed-2", "Not managed - up" ],
         [ "bug-5028", "Shutdown should block if anything depends on an unmanaged resource" ],
         [ "bug-5028-detach", "Ensure detach still works" ],
         [ "bug-5028-bottom",
           "Ensure shutdown still blocks if the blocked resource is at the bottom of the stack" ],
         [ "unmanaged-stop-1",
           "cl#5155 - Block the stop of resources if any depending resource is unmanaged" ],
         [ "unmanaged-stop-2",
           "cl#5155 - Block the stop of resources if the first resource in a mandatory stop order is unmanaged" ],
         [ "unmanaged-stop-3",
           "cl#5155 - Block the stop of resources if any depending resource in a group is unmanaged" ],
         [ "unmanaged-stop-4",
           "cl#5155 - Block the stop of resources if any depending resource in the middle of a group is unmanaged" ],
         [ "unmanaged-block-restart",
           "Block restart of resources if any dependent resource in a group is unmanaged" ],
     ],
     [
         [ "interleave-0", "Interleave (reference)" ],
         [ "interleave-1", "coloc - not interleaved" ],
         [ "interleave-2", "coloc - interleaved" ],
         [ "interleave-3", "coloc - interleaved (2)" ],
         [ "interleave-pseudo-stop", "Interleaved clone during stonith" ],
         [ "interleave-stop", "Interleaved clone during stop" ],
         [ "interleave-restart", "Interleaved clone during dependency restart" ],
     ],
     [
         [ "notify-0", "Notify reference" ],
         [ "notify-1", "Notify simple" ],
         [ "notify-2", "Notify simple, confirm" ],
         [ "notify-3", "Notify move, confirm" ],
         [ "novell-239079", "Notification priority" ],
         #[ "notify-2", "Notify - 764" ],
         [ "notifs-for-unrunnable", "Don't schedule notifications for an unrunnable action" ],
         [ "route-remote-notify", "Route remote notify actions through correct cluster node" ],
         [ "notify-behind-stopping-remote", "Don't schedule notifications behind stopped remote" ],
     ],
     [
         [ "594", "OSDL #594 - Unrunnable actions scheduled in transition" ],
         [ "662", "OSDL #662 - Two resources start on one node when incarnation_node_max = 1" ],
         [ "696", "OSDL #696 - CRM starts stonith RA without monitor" ],
         [ "726", "OSDL #726 - Attempting to schedule rsc_posic041_monitor_5000 _after_ a stop" ],
         [ "735", "OSDL #735 - Correctly detect that rsc_hadev1 is stopped on hadev3" ],
         [ "764", "OSDL #764 - Missing monitor op for DoFencing:child_DoFencing:1" ],
         [ "797", "OSDL #797 - Assert triggered: task_id_i > max_call_id" ],
         [ "829", "OSDL #829" ],
         [ "994",
           "OSDL #994 - Stopping the last resource in a resource group causes the entire group to be restarted" ],
         [ "994-2", "OSDL #994 - with a dependent resource" ],
         [ "1360", "OSDL #1360 - Clone stickiness" ],
         [ "1484", "OSDL #1484 - on_fail=stop" ],
         [ "1494", "OSDL #1494 - Clone stability" ],
         [ "unrunnable-1", "Unrunnable" ],
         [ "unrunnable-2", "Unrunnable 2" ],
         [ "stonith-0", "Stonith loop - 1" ],
         [ "stonith-1", "Stonith loop - 2" ],
         [ "stonith-2", "Stonith loop - 3" ],
         [ "stonith-3", "Stonith startup" ],
         [ "stonith-4", "Stonith node state" ],
         [ "dc-fence-ordering", "DC needs fencing while other nodes are shutting down" ],
         [ "bug-1572-1", "Recovery of groups depending on master/slave" ],
         [ "bug-1572-2", "Recovery of groups depending on master/slave when the master is never re-promoted" ],
         [ "bug-1685", "Depends-on-master ordering" ],
         [ "bug-1822", "Don't promote partially active groups" ],
         [ "bug-pm-11", "New resource added to a m/s group" ],
         [ "bug-pm-12", "Recover only the failed portion of a cloned group" ],
         [ "bug-n-387749", "Don't shuffle clone instances" ],
         [ "bug-n-385265",
           "Don't ignore the failure stickiness of group children - resource_idvscommon should stay stopped" ],
         [ "bug-n-385265-2",
           "Ensure groups are migrated instead of remaining partially active on the current node" ],
         [ "bug-lf-1920", "Correctly handle probes that find active resources" ],
         [ "bnc-515172", "Location constraint with multiple expressions" ],
         [ "colocate-primitive-with-clone", "Optional colocation with a clone" ],
         [ "use-after-free-merge", "Use-after-free in native_merge_weights" ],
         [ "bug-lf-2551", "STONITH ordering for stop" ],
         [ "bug-lf-2606", "Stonith implies demote" ],
         [ "bug-lf-2474", "Ensure resource op timeout takes precedence over op_defaults" ],
         [ "bug-suse-707150", "Prevent vm-01 from starting due to colocation/ordering" ],
         [ "bug-5014-A-start-B-start", "Verify when A starts B starts using symmetrical=false" ],
         [ "bug-5014-A-stop-B-started",
           "Verify when A stops B does not stop if it has already started using symmetric=false" ],
         [ "bug-5014-A-stopped-B-stopped",
           "Verify when A is stopped and B has not started, B does not start before A using symmetric=false" ],
         [ "bug-5014-CthenAthenB-C-stopped",
           "Verify when C then A is symmetrical=true, A then B is symmetric=false, and C is stopped that nothing starts" ],
         [ "bug-5014-CLONE-A-start-B-start",
           "Verify when A starts B starts using clone resources with symmetric=false" ],
         [ "bug-5014-CLONE-A-stop-B-started",
           "Verify when A stops B does not stop if it has already started using clone resources with symmetric=false" ],
         [ "bug-5014-GROUP-A-start-B-start",
           "Verify when A starts B starts when using group resources with symmetric=false" ],
         [ "bug-5014-GROUP-A-stopped-B-started",
           "Verify when A stops B does not stop if it has already started using group resources with symmetric=false" ],
         [ "bug-5014-GROUP-A-stopped-B-stopped",
           "Verify when A is stopped and B has not started, B does not start before A using group resources with symmetric=false" ],
         [ "bug-5014-ordered-set-symmetrical-false",
           "Verify ordered sets work with symmetrical=false" ],
         [ "bug-5014-ordered-set-symmetrical-true",
           "Verify ordered sets work with symmetrical=true" ],
         [ "bug-5007-masterslave_colocation",
           "Verify use of colocation scores other than INFINITY and -INFINITY work on multi-state resources" ],
         [ "bug-5038", "Prevent restart of anonymous clones when clone-max decreases" ],
         [ "bug-5025-1", "Automatically clean up failcount after resource config change with reload" ],
         [ "bug-5025-2", "Make sure clear failcount action isn't set when config does not change" ],
         [ "bug-5025-3", "Automatically clean up failcount after resource config change with restart" ],
         [ "bug-5025-4", "Clear failcount when last failure is a start op and rsc attributes changed" ],
         [ "failcount", "Ensure failcounts are correctly expired" ],
         [ "failcount-block", "Ensure failcounts are not expired when on-fail=block is present" ],
         [ "per-op-failcount", "Ensure per-operation failcount is handled and not passed to fence agent" ],
         [ "on-fail-ignore", "Ensure on-fail=ignore works even beyond migration-threshold" ],
         [ "monitor-onfail-restart", "bug-5058 - Monitor failure with on-fail set to restart" ],
         [ "monitor-onfail-stop", "bug-5058 - Monitor failure wiht on-fail set to stop" ],
         [ "bug-5059", "No need to restart p_stateful1:*" ],
         [ "bug-5069-op-enabled", "Test on-fail=ignore with failure when monitor is enabled" ],
         [ "bug-5069-op-disabled", "Test on-fail-ignore with failure when monitor is disabled" ],
         [ "obsolete-lrm-resource", "cl#5115 - Do not use obsolete lrm_resource sections" ],
         [ "expire-non-blocked-failure",
           "Ignore failure-timeout only if the failed operation has on-fail=block" ],
         [ "asymmetrical-order-move", "Respect asymmetrical ordering when trying to move resources" ],
         [ "asymmetrical-order-restart", "Respect asymmetrical ordering when restarting dependent resource" ],
         [ "start-then-stop-with-unfence", "Avoid graph loop with start-then-stop constraint plus unfencing" ],
         [ "order-expired-failure", "Order failcount cleanup after remote fencing" ],
     
         [ "ignore_stonith_rsc_order1",
           "cl#5056- Ignore order constraint between stonith and non-stonith rsc" ],
         [ "ignore_stonith_rsc_order2",
           "cl#5056- Ignore order constraint with group rsc containing mixed stonith and non-stonith" ],
         [ "ignore_stonith_rsc_order3", "cl#5056- Ignore order constraint, stonith clone and mixed group" ],
         [ "ignore_stonith_rsc_order4",
           "cl#5056- Ignore order constraint, stonith clone and clone with nested mixed group" ],
         [ "honor_stonith_rsc_order1",
           "cl#5056- Honor order constraint, stonith clone and pure stonith group(single rsc)" ],
         [ "honor_stonith_rsc_order2",
           "cl#5056- Honor order constraint, stonith clone and pure stonith group(multiple rsc)" ],
         [ "honor_stonith_rsc_order3",
           "cl#5056- Honor order constraint, stonith clones with nested pure stonith group" ],
         [ "honor_stonith_rsc_order4",
           "cl#5056- Honor order constraint, between two native stonith rscs" ],
         [ "multiply-active-stonith", "Multiply active stonith" ],
         [ "probe-timeout", "cl#5099 - Default probe timeout" ],
         [ "order-first-probes",
           "cl#5301 - respect order constraints when relevant resources are being probed" ],
         [ "concurrent-fencing", "Allow performing fencing operations in parallel" ],
         [ "priority-fencing-delay", "Delay fencing targeting the more significant node" ],
     ],
     [
         [ "systemhealth1", "System Health ()               #1" ],
         [ "systemhealth2", "System Health ()               #2" ],
         [ "systemhealth3", "System Health ()               #3" ],
         [ "systemhealthn1", "System Health (None)           #1" ],
         [ "systemhealthn2", "System Health (None)           #2" ],
         [ "systemhealthn3", "System Health (None)           #3" ],
         [ "systemhealthm1", "System Health (Migrate On Red) #1" ],
         [ "systemhealthm2", "System Health (Migrate On Red) #2" ],
         [ "systemhealthm3", "System Health (Migrate On Red) #3" ],
         [ "systemhealtho1", "System Health (Only Green)     #1" ],
         [ "systemhealtho2", "System Health (Only Green)     #2" ],
         [ "systemhealtho3", "System Health (Only Green)     #3" ],
         [ "systemhealthp1", "System Health (Progessive)     #1" ],
         [ "systemhealthp2", "System Health (Progessive)     #2" ],
         [ "systemhealthp3", "System Health (Progessive)     #3" ],
     ],
     [
         [ "utilization", "Placement Strategy - utilization" ],
         [ "minimal", "Placement Strategy - minimal" ],
         [ "balanced", "Placement Strategy - balanced" ],
     ],
     [
         [ "placement-stickiness", "Optimized Placement Strategy - stickiness" ],
         [ "placement-priority", "Optimized Placement Strategy - priority" ],
         [ "placement-location", "Optimized Placement Strategy - location" ],
         [ "placement-capacity", "Optimized Placement Strategy - capacity" ],
     ],
     [
         [ "utilization-order1", "Utilization Order - Simple" ],
         [ "utilization-order2", "Utilization Order - Complex" ],
         [ "utilization-order3", "Utilization Order - Migrate" ],
         [ "utilization-order4", "Utilization Order - Live Migration (bnc#695440)" ],
         [ "utilization-shuffle",
           "Don't displace prmExPostgreSQLDB2 on act2, Start prmExPostgreSQLDB1 on act3" ],
         [ "load-stopped-loop", "Avoid transition loop due to load_stopped (cl#5044)" ],
         [ "load-stopped-loop-2",
           "cl#5235 - Prevent graph loops that can be introduced by load_stopped -> migrate_to ordering" ],
     ],
     [
         [ "colocated-utilization-primitive-1", "Colocated Utilization - Primitive" ],
         [ "colocated-utilization-primitive-2", "Colocated Utilization - Choose the most capable node" ],
         [ "colocated-utilization-group", "Colocated Utilization - Group" ],
         [ "colocated-utilization-clone", "Colocated Utilization - Clone" ],
         [ "utilization-check-allowed-nodes",
           "Only check the capacities of the nodes that can run the resource" ],
     ],
     [
         [ "reprobe-target_rc", "Ensure correct target_rc for reprobe of inactive resources" ],
         [ "node-maintenance-1", "cl#5128 - Node maintenance" ],
         [ "node-maintenance-2", "cl#5128 - Node maintenance (coming out of maintenance mode)" ],
         [ "shutdown-maintenance-node", "Do not fence a maintenance node if it shuts down cleanly" ],
         [ "rsc-maintenance", "Per-resource maintenance" ],
     ],
     [
         [ "not-installed-agent", "The resource agent is missing" ],
         [ "not-installed-tools", "Something the resource agent needs is missing" ],
     ],
     [
         [ "stopped-monitor-00", "Stopped Monitor - initial start" ],
         [ "stopped-monitor-01", "Stopped Monitor - failed started" ],
         [ "stopped-monitor-02", "Stopped Monitor - started multi-up" ],
         [ "stopped-monitor-03", "Stopped Monitor - stop started" ],
         [ "stopped-monitor-04", "Stopped Monitor - failed stop" ],
         [ "stopped-monitor-05", "Stopped Monitor - start unmanaged" ],
         [ "stopped-monitor-06", "Stopped Monitor - unmanaged multi-up" ],
         [ "stopped-monitor-07", "Stopped Monitor - start unmanaged multi-up" ],
         [ "stopped-monitor-08", "Stopped Monitor - migrate" ],
         [ "stopped-monitor-09", "Stopped Monitor - unmanage started" ],
         [ "stopped-monitor-10", "Stopped Monitor - unmanaged started multi-up" ],
         [ "stopped-monitor-11", "Stopped Monitor - stop unmanaged started" ],
         [ "stopped-monitor-12", "Stopped Monitor - unmanaged started multi-up (target-role=Stopped)" ],
         [ "stopped-monitor-20", "Stopped Monitor - initial stop" ],
         [ "stopped-monitor-21", "Stopped Monitor - stopped single-up" ],
         [ "stopped-monitor-22", "Stopped Monitor - stopped multi-up" ],
         [ "stopped-monitor-23", "Stopped Monitor - start stopped" ],
         [ "stopped-monitor-24", "Stopped Monitor - unmanage stopped" ],
         [ "stopped-monitor-25", "Stopped Monitor - unmanaged stopped multi-up" ],
         [ "stopped-monitor-26", "Stopped Monitor - start unmanaged stopped" ],
         [ "stopped-monitor-27", "Stopped Monitor - unmanaged stopped multi-up (target-role=Started)" ],
         [ "stopped-monitor-30", "Stopped Monitor - new node started" ],
         [ "stopped-monitor-31", "Stopped Monitor - new node stopped" ],
     ],
     [
         # This is a combo test to check:
         # - probe timeout defaults to the minimum-interval monitor's
         # - duplicate recurring operations are ignored
         # - if timeout spec is bad, the default timeout is used
         # - failure is blocked with on-fail=block even if ISO8601 interval is specified
         # - started/stopped role monitors are started/stopped on right nodes
         [ "intervals", "Recurring monitor interval handling" ],
     ],
     [
         [ "ticket-primitive-1", "Ticket - Primitive (loss-policy=stop, initial)" ],
         [ "ticket-primitive-2", "Ticket - Primitive (loss-policy=stop, granted)" ],
         [ "ticket-primitive-3", "Ticket - Primitive (loss-policy-stop, revoked)" ],
         [ "ticket-primitive-4", "Ticket - Primitive (loss-policy=demote, initial)" ],
         [ "ticket-primitive-5", "Ticket - Primitive (loss-policy=demote, granted)" ],
         [ "ticket-primitive-6", "Ticket - Primitive (loss-policy=demote, revoked)" ],
         [ "ticket-primitive-7", "Ticket - Primitive (loss-policy=fence, initial)" ],
         [ "ticket-primitive-8", "Ticket - Primitive (loss-policy=fence, granted)" ],
         [ "ticket-primitive-9", "Ticket - Primitive (loss-policy=fence, revoked)" ],
         [ "ticket-primitive-10", "Ticket - Primitive (loss-policy=freeze, initial)" ],
         [ "ticket-primitive-11", "Ticket - Primitive (loss-policy=freeze, granted)" ],
         [ "ticket-primitive-12", "Ticket - Primitive (loss-policy=freeze, revoked)" ],
         [ "ticket-primitive-13", "Ticket - Primitive (loss-policy=stop, standby, granted)" ],
         [ "ticket-primitive-14", "Ticket - Primitive (loss-policy=stop, granted, standby)" ],
         [ "ticket-primitive-15", "Ticket - Primitive (loss-policy=stop, standby, revoked)" ],
         [ "ticket-primitive-16", "Ticket - Primitive (loss-policy=demote, standby, granted)" ],
         [ "ticket-primitive-17", "Ticket - Primitive (loss-policy=demote, granted, standby)" ],
         [ "ticket-primitive-18", "Ticket - Primitive (loss-policy=demote, standby, revoked)" ],
         [ "ticket-primitive-19", "Ticket - Primitive (loss-policy=fence, standby, granted)" ],
         [ "ticket-primitive-20", "Ticket - Primitive (loss-policy=fence, granted, standby)" ],
         [ "ticket-primitive-21", "Ticket - Primitive (loss-policy=fence, standby, revoked)" ],
         [ "ticket-primitive-22", "Ticket - Primitive (loss-policy=freeze, standby, granted)" ],
         [ "ticket-primitive-23", "Ticket - Primitive (loss-policy=freeze, granted, standby)" ],
         [ "ticket-primitive-24", "Ticket - Primitive (loss-policy=freeze, standby, revoked)" ],
     ],
     [
         [ "ticket-group-1", "Ticket - Group (loss-policy=stop, initial)" ],
         [ "ticket-group-2", "Ticket - Group (loss-policy=stop, granted)" ],
         [ "ticket-group-3", "Ticket - Group (loss-policy-stop, revoked)" ],
         [ "ticket-group-4", "Ticket - Group (loss-policy=demote, initial)" ],
         [ "ticket-group-5", "Ticket - Group (loss-policy=demote, granted)" ],
         [ "ticket-group-6", "Ticket - Group (loss-policy=demote, revoked)" ],
         [ "ticket-group-7", "Ticket - Group (loss-policy=fence, initial)" ],
         [ "ticket-group-8", "Ticket - Group (loss-policy=fence, granted)" ],
         [ "ticket-group-9", "Ticket - Group (loss-policy=fence, revoked)" ],
         [ "ticket-group-10", "Ticket - Group (loss-policy=freeze, initial)" ],
         [ "ticket-group-11", "Ticket - Group (loss-policy=freeze, granted)" ],
         [ "ticket-group-12", "Ticket - Group (loss-policy=freeze, revoked)" ],
         [ "ticket-group-13", "Ticket - Group (loss-policy=stop, standby, granted)" ],
         [ "ticket-group-14", "Ticket - Group (loss-policy=stop, granted, standby)" ],
         [ "ticket-group-15", "Ticket - Group (loss-policy=stop, standby, revoked)" ],
         [ "ticket-group-16", "Ticket - Group (loss-policy=demote, standby, granted)" ],
         [ "ticket-group-17", "Ticket - Group (loss-policy=demote, granted, standby)" ],
         [ "ticket-group-18", "Ticket - Group (loss-policy=demote, standby, revoked)" ],
         [ "ticket-group-19", "Ticket - Group (loss-policy=fence, standby, granted)" ],
         [ "ticket-group-20", "Ticket - Group (loss-policy=fence, granted, standby)" ],
         [ "ticket-group-21", "Ticket - Group (loss-policy=fence, standby, revoked)" ],
         [ "ticket-group-22", "Ticket - Group (loss-policy=freeze, standby, granted)" ],
         [ "ticket-group-23", "Ticket - Group (loss-policy=freeze, granted, standby)" ],
         [ "ticket-group-24", "Ticket - Group (loss-policy=freeze, standby, revoked)" ],
     ],
     [
         [ "ticket-clone-1", "Ticket - Clone (loss-policy=stop, initial)" ],
         [ "ticket-clone-2", "Ticket - Clone (loss-policy=stop, granted)" ],
         [ "ticket-clone-3", "Ticket - Clone (loss-policy-stop, revoked)" ],
         [ "ticket-clone-4", "Ticket - Clone (loss-policy=demote, initial)" ],
         [ "ticket-clone-5", "Ticket - Clone (loss-policy=demote, granted)" ],
         [ "ticket-clone-6", "Ticket - Clone (loss-policy=demote, revoked)" ],
         [ "ticket-clone-7", "Ticket - Clone (loss-policy=fence, initial)" ],
         [ "ticket-clone-8", "Ticket - Clone (loss-policy=fence, granted)" ],
         [ "ticket-clone-9", "Ticket - Clone (loss-policy=fence, revoked)" ],
         [ "ticket-clone-10", "Ticket - Clone (loss-policy=freeze, initial)" ],
         [ "ticket-clone-11", "Ticket - Clone (loss-policy=freeze, granted)" ],
         [ "ticket-clone-12", "Ticket - Clone (loss-policy=freeze, revoked)" ],
         [ "ticket-clone-13", "Ticket - Clone (loss-policy=stop, standby, granted)" ],
         [ "ticket-clone-14", "Ticket - Clone (loss-policy=stop, granted, standby)" ],
         [ "ticket-clone-15", "Ticket - Clone (loss-policy=stop, standby, revoked)" ],
         [ "ticket-clone-16", "Ticket - Clone (loss-policy=demote, standby, granted)" ],
         [ "ticket-clone-17", "Ticket - Clone (loss-policy=demote, granted, standby)" ],
         [ "ticket-clone-18", "Ticket - Clone (loss-policy=demote, standby, revoked)" ],
         [ "ticket-clone-19", "Ticket - Clone (loss-policy=fence, standby, granted)" ],
         [ "ticket-clone-20", "Ticket - Clone (loss-policy=fence, granted, standby)" ],
         [ "ticket-clone-21", "Ticket - Clone (loss-policy=fence, standby, revoked)" ],
         [ "ticket-clone-22", "Ticket - Clone (loss-policy=freeze, standby, granted)" ],
         [ "ticket-clone-23", "Ticket - Clone (loss-policy=freeze, granted, standby)" ],
         [ "ticket-clone-24", "Ticket - Clone (loss-policy=freeze, standby, revoked)" ],
     ],
     [
         [ "ticket-master-1", "Ticket - Master (loss-policy=stop, initial)" ],
         [ "ticket-master-2", "Ticket - Master (loss-policy=stop, granted)" ],
         [ "ticket-master-3", "Ticket - Master (loss-policy-stop, revoked)" ],
         [ "ticket-master-4", "Ticket - Master (loss-policy=demote, initial)" ],
         [ "ticket-master-5", "Ticket - Master (loss-policy=demote, granted)" ],
         [ "ticket-master-6", "Ticket - Master (loss-policy=demote, revoked)" ],
         [ "ticket-master-7", "Ticket - Master (loss-policy=fence, initial)" ],
         [ "ticket-master-8", "Ticket - Master (loss-policy=fence, granted)" ],
         [ "ticket-master-9", "Ticket - Master (loss-policy=fence, revoked)" ],
         [ "ticket-master-10", "Ticket - Master (loss-policy=freeze, initial)" ],
         [ "ticket-master-11", "Ticket - Master (loss-policy=freeze, granted)" ],
         [ "ticket-master-12", "Ticket - Master (loss-policy=freeze, revoked)" ],
         [ "ticket-master-13", "Ticket - Master (loss-policy=stop, standby, granted)" ],
         [ "ticket-master-14", "Ticket - Master (loss-policy=stop, granted, standby)" ],
         [ "ticket-master-15", "Ticket - Master (loss-policy=stop, standby, revoked)" ],
         [ "ticket-master-16", "Ticket - Master (loss-policy=demote, standby, granted)" ],
         [ "ticket-master-17", "Ticket - Master (loss-policy=demote, granted, standby)" ],
         [ "ticket-master-18", "Ticket - Master (loss-policy=demote, standby, revoked)" ],
         [ "ticket-master-19", "Ticket - Master (loss-policy=fence, standby, granted)" ],
         [ "ticket-master-20", "Ticket - Master (loss-policy=fence, granted, standby)" ],
         [ "ticket-master-21", "Ticket - Master (loss-policy=fence, standby, revoked)" ],
         [ "ticket-master-22", "Ticket - Master (loss-policy=freeze, standby, granted)" ],
         [ "ticket-master-23", "Ticket - Master (loss-policy=freeze, granted, standby)" ],
         [ "ticket-master-24", "Ticket - Master (loss-policy=freeze, standby, revoked)" ],
     ],
     [
         [ "ticket-rsc-sets-1", "Ticket - Resource sets (1 ticket, initial)" ],
         [ "ticket-rsc-sets-2", "Ticket - Resource sets (1 ticket, granted)" ],
         [ "ticket-rsc-sets-3", "Ticket - Resource sets (1 ticket, revoked)" ],
         [ "ticket-rsc-sets-4", "Ticket - Resource sets (2 tickets, initial)" ],
         [ "ticket-rsc-sets-5", "Ticket - Resource sets (2 tickets, granted)" ],
         [ "ticket-rsc-sets-6", "Ticket - Resource sets (2 tickets, granted)" ],
         [ "ticket-rsc-sets-7", "Ticket - Resource sets (2 tickets, revoked)" ],
         [ "ticket-rsc-sets-8", "Ticket - Resource sets (1 ticket, standby, granted)" ],
         [ "ticket-rsc-sets-9", "Ticket - Resource sets (1 ticket, granted, standby)" ],
         [ "ticket-rsc-sets-10", "Ticket - Resource sets (1 ticket, standby, revoked)" ],
         [ "ticket-rsc-sets-11", "Ticket - Resource sets (2 tickets, standby, granted)" ],
         [ "ticket-rsc-sets-12", "Ticket - Resource sets (2 tickets, standby, granted)" ],
         [ "ticket-rsc-sets-13", "Ticket - Resource sets (2 tickets, granted, standby)" ],
         [ "ticket-rsc-sets-14", "Ticket - Resource sets (2 tickets, standby, revoked)" ],
         [ "cluster-specific-params", "Cluster-specific instance attributes based on rules" ],
         [ "site-specific-params", "Site-specific instance attributes based on rules" ],
     ],
     [
         [ "template-1", "Template - 1" ],
         [ "template-2", "Template - 2" ],
         [ "template-3", "Template - 3 (merge operations)" ],
         [ "template-coloc-1", "Template - Colocation 1" ],
         [ "template-coloc-2", "Template - Colocation 2" ],
         [ "template-coloc-3", "Template - Colocation 3" ],
         [ "template-order-1", "Template - Order 1" ],
         [ "template-order-2", "Template - Order 2" ],
         [ "template-order-3", "Template - Order 3" ],
         [ "template-ticket", "Template - Ticket" ],
         [ "template-rsc-sets-1", "Template - Resource Sets 1" ],
         [ "template-rsc-sets-2", "Template - Resource Sets 2" ],
         [ "template-rsc-sets-3", "Template - Resource Sets 3" ],
         [ "template-rsc-sets-4", "Template - Resource Sets 4" ],
         [ "template-clone-primitive", "Cloned primitive from template" ],
         [ "template-clone-group", "Cloned group from template" ],
         [ "location-sets-templates", "Resource sets and templates - Location" ],
         [ "tags-coloc-order-1", "Tags - Colocation and Order (Simple)" ],
         [ "tags-coloc-order-2", "Tags - Colocation and Order (Resource Sets with Templates)" ],
         [ "tags-location", "Tags - Location" ],
         [ "tags-ticket", "Tags - Ticket" ],
     ],
     [
         [ "container-1", "Container - initial" ],
         [ "container-2", "Container - monitor failed" ],
         [ "container-3", "Container - stop failed" ],
         [ "container-4", "Container - reached migration-threshold" ],
         [ "container-group-1", "Container in group - initial" ],
         [ "container-group-2", "Container in group - monitor failed" ],
         [ "container-group-3", "Container in group - stop failed" ],
         [ "container-group-4", "Container in group - reached migration-threshold" ],
         [ "container-is-remote-node", "Place resource within container when container is remote-node" ],
         [ "bug-rh-1097457", "Kill user defined container/contents ordering" ],
         [ "bug-cl-5247", "Graph loop when recovering m/s resource in a container" ],
         [ "bundle-order-startup", "Bundle startup ordering" ],
         [ "bundle-order-partial-start",
           "Bundle startup ordering when some dependencies are already running" ],
         [ "bundle-order-partial-start-2",
           "Bundle startup ordering when some dependencies and the container are already running" ],
         [ "bundle-order-stop", "Bundle stop ordering" ],
         [ "bundle-order-partial-stop", "Bundle startup ordering when some dependencies are already stopped" ],
         [ "bundle-order-stop-on-remote", "Stop nested resource after bringing up the connection" ],
         [ "bundle-order-startup-clone", "Prevent startup because bundle isn't promoted" ],
         [ "bundle-order-startup-clone-2", "Bundle startup with clones" ],
         [ "bundle-order-stop-clone", "Stop bundle because clone is stopping" ],
         [ "bundle-nested-colocation", "Colocation of nested connection resources" ],
         [ "bundle-order-fencing",
           "Order pseudo bundle fencing after parent node fencing if both are happening" ],
         [ "bundle-probe-order-1", "order 1" ],
         [ "bundle-probe-order-2", "order 2" ],
         [ "bundle-probe-order-3", "order 3" ],
         [ "bundle-probe-remotes", "Ensure remotes get probed too" ],
         [ "bundle-replicas-change", "Change bundle from 1 replica to multiple" ],
         [ "nested-remote-recovery", "Recover bundle's container hosted on remote node" ],
     ],
     [
         [ "whitebox-fail1", "Fail whitebox container rsc" ],
         [ "whitebox-fail2", "Fail cluster connection to guest node" ],
         [ "whitebox-fail3", "Failed containers should not run nested on remote nodes" ],
         [ "whitebox-start", "Start whitebox container with resources assigned to it" ],
         [ "whitebox-stop", "Stop whitebox container with resources assigned to it" ],
         [ "whitebox-move", "Move whitebox container with resources assigned to it" ],
         [ "whitebox-asymmetric", "Verify connection rsc opts-in based on container resource" ],
         [ "whitebox-ms-ordering", "Verify promote/demote can not occur before connection is established" ],
         [ "whitebox-ms-ordering-move", "Stop/Start cycle within a moving container" ],
         [ "whitebox-orphaned", "Properly shutdown orphaned whitebox container" ],
         [ "whitebox-orphan-ms", "Properly tear down orphan ms resources on remote-nodes" ],
         [ "whitebox-unexpectedly-running", "Recover container nodes the cluster did not start" ],
         [ "whitebox-migrate1", "Migrate both container and connection resource" ],
         [ "whitebox-imply-stop-on-fence",
           "imply stop action on container node rsc when host node is fenced" ],
         [ "whitebox-nested-group", "Verify guest remote-node works nested in a group" ],
         [ "guest-node-host-dies", "Verify guest node is recovered if host goes away" ],
         [ "guest-node-cleanup", "Order guest node connection recovery after container probe" ],
         [ "guest-host-not-fenceable", "Actions on guest node are unrunnable if host is unclean and cannot be fenced" ],
     ],
     [
         [ "remote-startup-probes", "Baremetal remote-node startup probes" ],
         [ "remote-startup", "Startup a newly discovered remote-nodes with no status" ],
         [ "remote-fence-unclean", "Fence unclean baremetal remote-node" ],
         [ "remote-fence-unclean2",
           "Fence baremetal remote-node after cluster node fails and connection can not be recovered" ],
         [ "remote-fence-unclean-3", "Probe failed remote nodes (triggers fencing)" ],
         [ "remote-move", "Move remote-node connection resource" ],
         [ "remote-disable", "Disable a baremetal remote-node" ],
         [ "remote-probe-disable", "Probe then stop a baremetal remote-node" ],
         [ "remote-orphaned", "Properly shutdown orphaned connection resource" ],
         [ "remote-orphaned2",
           "verify we can handle orphaned remote connections with active resources on the remote" ],
         [ "remote-recover", "Recover connection resource after cluster-node fails" ],
         [ "remote-stale-node-entry",
           "Make sure we properly handle leftover remote-node entries in the node section" ],
         [ "remote-partial-migrate",
           "Make sure partial migrations are handled before ops on the remote node" ],
         [ "remote-partial-migrate2",
           "Make sure partial migration target is prefered for remote connection" ],
         [ "remote-recover-fail", "Make sure start failure causes fencing if rsc are active on remote" ],
         [ "remote-start-fail",
           "Make sure a start failure does not result in fencing if no active resources are on remote" ],
         [ "remote-unclean2",
           "Make monitor failure always results in fencing, even if no rsc are active on remote" ],
         [ "remote-fence-before-reconnect", "Fence before clearing recurring monitor failure" ],
         [ "remote-recovery", "Recover remote connections before attempting demotion" ],
         [ "remote-recover-connection", "Optimistically recovery of only the connection" ],
         [ "remote-recover-all", "Fencing when the connection has no home" ],
         [ "remote-recover-no-resources", "Fencing when the connection has no home and no active resources" ],
         [ "remote-recover-unknown",
           "Fencing when the connection has no home and the remote has no operation history" ],
         [ "remote-reconnect-delay", "Waiting for remote reconnect interval to expire" ],
         [ "remote-connection-unrecoverable",
           "Remote connection host must be fenced, with connection unrecoverable" ],
     ],
     [
         [ "resource-discovery", "Exercises resource-discovery location constraint option" ],
         [ "rsc-discovery-per-node", "Disable resource discovery per node" ],
         [ "shutdown-lock", "Ensure shutdown lock works properly" ],
         [ "shutdown-lock-expiration", "Ensure shutdown lock expiration works properly" ],
     ],
     [
         [ "op-defaults", "Test op_defaults conditional expressions" ],
         [ "op-defaults-2", "Test op_defaults AND'ed conditional expressions" ],
         [ "op-defaults-3", "Test op_defaults precedence" ],
         [ "rsc-defaults", "Test rsc_defaults conditional expressions" ],
         [ "rsc-defaults-2", "Test rsc_defaults conditional expressions without type" ],
     ],
     [   [ "stop-all-resources", "Test stop-all-resources=true "],
     ],
     [   [ "ocf_degraded-remap-ocf_ok", "Test DEGRADED remapped to OK" ],
         [ "ocf_degraded_master-remap-ocf_ok", "Test DEGRADED_MASTER remapped to OK"],
     ],
     
     # @TODO: If pacemaker implements versioned attributes, uncomment these tests
     #[
     #    [ "versioned-resources", "Start resources with #ra-version rules" ],
     #    [ "restart-versioned", "Restart resources on #ra-version change" ],
     #    [ "reload-versioned", "Reload resources on #ra-version change" ],
     #],
     #[
     #    [ "versioned-operations-1", "Use #ra-version to configure operations of native resources" ],
     #    [ "versioned-operations-2", "Use #ra-version to configure operations of stonith resources" ],
     #    [ "versioned-operations-3", "Use #ra-version to configure operations of master/slave resources" ],
     #    [ "versioned-operations-4", "Use #ra-version to configure operations of groups of the resources" ],
     #],
 ]
 
 TESTS_64BIT = [
     [
         [ "year-2038", "Check handling of timestamps beyond 2038-01-19 03:14:08 UTC" ],
     ],
 ]
 
 # Constants substituted in the build process
 class BuildVars(object):
     SBINDIR = "@sbindir@"
     BUILDDIR = "@abs_top_builddir@"
     CRM_SCHEMA_DIRECTORY = "@CRM_SCHEMA_DIRECTORY@"
 
 
 # These values must be kept in sync with include/crm/crm.h
 class CrmExit(object):
     OK                   =    0
     ERROR                =    1
     NOT_INSTALLED        =    5
     NOINPUT              =   66
 
 
 def is_executable(path):
     """ Check whether a file at a given path is executable. """
 
     try:
         return os.stat(path)[stat.ST_MODE] & stat.S_IXUSR
     except OSError:
         return False
 
 
 def diff(file1, file2, **kwargs):
     """ Call diff on two files """
 
     return subprocess.call([ "diff", "-u", "-N", "--ignore-all-space",
                              "--ignore-blank-lines", file1, file2 ], **kwargs)
 
 
 def sort_file(filename):
     """ Sort a file alphabetically """
 
     with io.open(filename, "rt") as f:
         lines = sorted(f)
     with io.open(filename, "wt") as f:
         f.writelines(lines)
 
 
 def remove_files(filenames):
     """ Remove a list of files """
 
     for filename in filenames:
         try:
             os.remove(filename)
         except OSError:
             pass
 
 
 def normalize(filename):
     """ Remove text from a file that isn't important for comparison """
 
     if not hasattr(normalize, "patterns"):
         normalize.patterns = [
             re.compile(r'crm_feature_set="[^"]*"'),
             re.compile(r'batch-limit="[0-9]*"')
         ]
     if os.path.isfile(filename):
         with io.open(filename, "rt") as f:
             lines = f.readlines()
         with io.open(filename, "wt") as f:
             for line in lines:
                 for pattern in normalize.patterns:
                     line = pattern.sub("", line)
                 f.write(line)
 
 
 def cat(filename, dest=sys.stdout):
     """ Copy a file to a destination file descriptor """
 
     with io.open(filename, "rt") as f:
         shutil.copyfileobj(f, dest)
 
 
 class CtsScheduler(object):
     """ Regression tests for Pacemaker's scheduler """
 
     def _parse_args(self, argv):
         """ Parse command-line arguments """
 
         parser = argparse.ArgumentParser(description=DESC)
 
         parser.add_argument('-V', '--verbose', action='count',
                             help='Display any differences from expected output')
 
         parser.add_argument('--run', metavar='TEST',
                             help=('Run only single specified test (any further '
                                   'arguments will be passed to crm_simulate)'))
 
         parser.add_argument('--update', action='store_true',
                             help='Update expected results with actual results')
 
         parser.add_argument('-b', '--binary', metavar='PATH',
                             help='Specify path to crm_simulate')
 
         parser.add_argument('-i', '--io-dir', metavar='PATH',
                             help='Specify path to regression test data directory')
 
         parser.add_argument('-o', '--out-dir', metavar='PATH',
                             help='Specify where intermediate and output files should go')
 
         parser.add_argument('-v', '--valgrind', action='store_true',
                             help='Run all commands under valgrind')
 
         parser.add_argument('--valgrind-dhat', action='store_true',
                             help='Run all commands under valgrind with heap analyzer')
 
         parser.add_argument('--valgrind-skip-output', action='store_true',
                             help='If running under valgrind, do not display output')
 
         parser.add_argument('--testcmd-options', metavar='OPTIONS', default='',
                             help='Additional options for command under test')
 
         # argparse can't handle "everything after --run TEST", so grab that
         self.single_test_args = []
         narg = 0
         for arg in argv:
             narg = narg + 1
             if arg == '--run':
                 (argv, self.single_test_args) = (argv[:narg+1], argv[narg+1:])
                 break
 
         self.args = parser.parse_args(argv[1:])
 
     def _error(self, s):
         print("      * ERROR:   %s" % s)
 
     def _failed(self, s):
         print("      * FAILED:  %s" % s)
 
     def _get_valgrind_cmd(self):
         """ Return command arguments needed (or not) to run valgrind """
 
         if self.args.valgrind:
             os.environ['G_SLICE'] = "always-malloc"
             return [
                 "valgrind",
                 "-q",
                 "--gen-suppressions=all",
                 "--time-stamp=yes",
                 "--trace-children=no",
                 "--show-reachable=no",
                 "--leak-check=full",
                 "--num-callers=20",
                 "--suppressions=%s/valgrind-pcmk.suppressions" % (self.test_home)
             ]
 
         if self.args.valgrind_dhat:
             os.environ['G_SLICE'] = "always-malloc"
             return [
                 "valgrind",
                 "--tool=exp-dhat",
                 "--time-stamp=yes",
                 "--trace-children=no",
                 "--show-top-n=100",
                 "--num-callers=4"
             ]
 
         return []
 
     def _get_simulator_cmd(self):
         """ Locate the simulation binary """
 
         if self.args.binary is None:
             self.args.binary = BuildVars.BUILDDIR + "/tools/crm_simulate"
             if not is_executable(self.args.binary):
                 self.args.binary = BuildVars.SBINDIR + "/crm_simulate"
 
         if not is_executable(self.args.binary):
             # @TODO it would be more pythonic to raise an exception
             self._error("Test binary " + self.args.binary + " not found")
             sys.exit(CrmExit.NOT_INSTALLED)
 
         return [ self.args.binary ] + shlex.split(self.args.testcmd_options)
 
     def set_schema_env(self):
         """ Ensure schema directory environment variable is set, if possible """
 
         try:
             return os.environ['PCMK_schema_directory']
         except KeyError:
             for d in [ os.path.join(BuildVars.BUILDDIR, "xml"),
                        BuildVars.CRM_SCHEMA_DIRECTORY ]:
                 if os.path.isdir(d):
                     os.environ['PCMK_schema_directory'] = d
                     return d
             return None
 
     def __init__(self, argv=sys.argv):
 
         self._parse_args(argv)
 
         # Where this executable lives
         self.test_home = os.path.dirname(os.path.realpath(argv[0]))
 
         # Where test data resides
         if self.args.io_dir is None:
             self.args.io_dir = os.path.join(self.test_home, "scheduler")
 
         # Where to store generated files
         if self.args.out_dir is None:
             self.args.out_dir = self.args.io_dir
             self.failed_filename = os.path.join(self.test_home, ".regression.failed.diff")
         else:
             self.failed_filename = os.path.join(self.args.out_dir, ".regression.failed.diff")
         os.environ['CIB_shadow_dir'] = self.args.out_dir
         self.failed_file = None
 
         # Single test mode (if requested)
         try:
             # User can give test base name or file name of a test input
             self.args.run = os.path.splitext(os.path.basename(self.args.run))[0]
         except (AttributeError, TypeError):
             pass # --run was not specified
 
         self.set_schema_env()
 
         # Arguments needed (or not) to run commands
         self.valgrind_args = self._get_valgrind_cmd()
         self.simulate_args = self._get_simulator_cmd()
 
         # Test counters
         self.num_failed = 0
         self.num_tests = 0
 
     def _compare_files(self, filename1, filename2):
         """ Add any file differences to failed results """
 
         with io.open("/dev/null", "wt") as dev_null:
             if diff(filename1, filename2, stdout=dev_null) != 0:
                 diff(filename1, filename2, stdout=self.failed_file, stderr=dev_null)
                 self.failed_file.write("\n");
                 return True
         return False
 
     def run_one(self, test_name, test_desc, test_args=[]):
         """ Run one scheduler test """
 
         print("  Test %-25s %s" % ((test_name + ":"), test_desc))
 
         did_fail = False
         self.num_tests = self.num_tests + 1
 
         # Test inputs
         input_filename = "%s/%s.xml" % (self.args.io_dir, test_name)
         expected_filename = "%s/%s.exp" % (self.args.io_dir, test_name)
         dot_expected_filename = "%s/%s.dot" % (self.args.io_dir, test_name)
         scores_filename = "%s/%s.scores" % (self.args.io_dir, test_name)
         summary_filename = "%s/%s.summary" % (self.args.io_dir, test_name)
         stderr_expected_filename = "%s/%s.stderr" % (self.args.io_dir, test_name)
 
         # (Intermediate) test outputs
         output_filename = "%s/%s.out" % (self.args.out_dir, test_name)
         dot_output_filename = "%s/%s.pe.dot" % (self.args.out_dir, test_name)
         score_output_filename = "%s/%s.scores.pe" % (self.args.out_dir, test_name)
         summary_output_filename = "%s/%s.summary.pe" % (self.args.out_dir, test_name)
         stderr_output_filename = "%s/%s.stderr.pe" % (self.args.out_dir, test_name)
         valgrind_output_filename = "%s/%s.valgrind" % (self.args.out_dir, test_name)
 
         # Common arguments for running test
         test_cmd = []
         if self.valgrind_args:
             test_cmd = self.valgrind_args + [ "--log-file=%s" % valgrind_output_filename ]
         test_cmd = test_cmd + self.simulate_args
 
         # @TODO It would be more pythonic to raise exceptions for errors,
         # then perhaps it would be nice to make a single-test class
 
         # Ensure necessary test inputs exist
         if not os.path.isfile(input_filename):
             self._error("No input")
             self.num_failed = self.num_failed + 1
             return CrmExit.NOINPUT
         if not self.args.update and not os.path.isfile(expected_filename):
             self._error("no stored output")
             return CrmExit.NOINPUT
 
         # Run simulation to generate summary output
         if self.args.run: # Single test mode
             test_cmd_full = test_cmd + [ '-x', input_filename, '-S' ] + test_args
             print(" ".join(test_cmd_full))
         else:
             # @TODO Why isn't test_args added here?
             test_cmd_full = test_cmd + [ '-x', input_filename, '-S' ]
         with io.open(summary_output_filename, "wt") as f:
             subprocess.call(test_cmd_full, stdout=f, stderr=subprocess.STDOUT, env=os.environ)
         if self.args.run:
             cat(summary_output_filename)
 
         # Re-run simulation to generate dot, graph, and scores
         test_cmd_full = test_cmd + [
             '-x', input_filename,
             '-D', dot_output_filename,
             '-G', output_filename,
             '-sSQ' ] + test_args
         with io.open(stderr_output_filename, "wt") as f_stderr, \
              io.open(score_output_filename,  "wt") as f_score:
             rc = subprocess.call(test_cmd_full, stdout=f_score, stderr=f_stderr, env=os.environ)
 
         # Check for test command failure
         if rc != CrmExit.OK:
             self._failed("Test returned: %d" % rc)
             did_fail = True
             print(" ".join(test_cmd_full))
 
         # Check for valgrind errors
         if self.valgrind_args and not self.args.valgrind_skip_output:
             if os.stat(valgrind_output_filename).st_size > 0:
                 self._failed("Valgrind reported errors")
                 did_fail = True
                 cat(valgrind_output_filename)
             remove_files([ valgrind_output_filename ])
 
         # Check for core dump
         if os.path.isfile("core"):
             self._failed("Core-file detected: core." + test_name)
             did_fail = True
             os.rename("core", "%s/core.%s" % (self.test_home, test_name))
 
         # Check any stderr output
         if os.path.isfile(stderr_expected_filename):
             if self._compare_files(stderr_expected_filename, stderr_output_filename):
                 self._failed("stderr changed")
                 did_fail = True
         elif os.stat(stderr_output_filename).st_size > 0:
             self._failed("Output was written to stderr")
             did_fail = True
             cat(stderr_output_filename)
         remove_files([ stderr_output_filename ])
 
         # Check whether output graph exists, and normalize it
         if (not os.path.isfile(output_filename)
             or os.stat(output_filename).st_size == 0):
             self._error("No graph produced")
             did_fail = True
             self.num_failed = self.num_failed + 1
             remove_files([ output_filename ])
             return CrmExit.ERROR
         normalize(output_filename)
 
         # Check whether dot output exists, and sort it
         if (not os.path.isfile(dot_output_filename) or
             os.stat(dot_output_filename).st_size == 0):
             self._error("No dot-file summary produced")
             did_fail = True
             self.num_failed = self.num_failed + 1
             remove_files([ dot_output_filename, output_filename ])
             return CrmExit.ERROR
         with io.open(dot_output_filename, "rt") as f:
             first_line = f.readline() # "digraph" line with opening brace
             lines = f.readlines()
             last_line = lines[-1] # closing brace
             del lines[-1]
             lines = sorted(set(lines)) # unique sort
         with io.open(dot_output_filename, "wt") as f:
             f.write(first_line)
             f.writelines(lines)
             f.write(last_line)
 
         # Check whether score output exists, and sort it
         if (not os.path.isfile(score_output_filename)
             or os.stat(score_output_filename).st_size == 0):
             self._error("No allocation scores produced")
             did_fail = True
             self.num_failed = self.num_failed + 1
             remove_files([ score_output_filename, output_filename ])
             return CrmExit.ERROR
         else:
             sort_file(score_output_filename)
 
         if self.args.update:
             shutil.copyfile(output_filename, expected_filename)
             shutil.copyfile(dot_output_filename, dot_expected_filename)
             shutil.copyfile(score_output_filename, scores_filename)
             shutil.copyfile(summary_output_filename, summary_filename)
             print("  Updated expected outputs")
 
         if self._compare_files(summary_filename, summary_output_filename):
             self._failed("summary changed")
             did_fail = True
 
         if self._compare_files(dot_expected_filename, dot_output_filename):
             self._failed("dot-file summary changed")
             did_fail = True
         else:
             remove_files([ dot_output_filename ])
 
         if self._compare_files(expected_filename, output_filename):
             self._failed("xml-file changed")
             did_fail = True
 
         if self._compare_files(scores_filename, score_output_filename):
             self._failed("scores-file changed")
             did_fail = True
 
         remove_files([ output_filename,
                        score_output_filename,
                        summary_output_filename])
 
         if did_fail:
             self.num_failed = self.num_failed + 1
             return CrmExit.ERROR
 
         return CrmExit.OK
 
     def run_all(self):
         """ Run all defined tests """
 
         if platform.architecture()[0] == "64bit":
             TESTS.extend(TESTS_64BIT)
 
         for group in TESTS:
             for test in group:
                 try:
                     args = test[2]
                 except IndexError:
                     args = []
                 self.run_one(test[0], test[1], args)
             print()
 
     def _print_summary(self):
         """ Print a summary of parameters for this test run """
 
         print("Test home is:\t" + self.test_home)
         print("Test binary is:\t" + self.args.binary)
         if 'PCMK_schema_directory' in os.environ:
             print("Schema home is:\t" + os.environ['PCMK_schema_directory'])
         if self.valgrind_args != []:
             print("Activating memory testing with valgrind")
         print()
 
     def _test_results(self):
         if self.num_failed == 0:
             return CrmExit.OK
 
         if os.path.isfile(self.failed_filename) and os.stat(self.failed_filename).st_size != 0:
             if self.args.verbose:
                 self._error("Results of %d failed tests (out of %d):" %
                     (self.num_failed, self.num_tests))
                 cat(self.failed_filename)
             else:
                 self._error("Results of %d failed tests (out of %d) are in %s" %
                     (self.num_failed, self.num_tests, self.failed_filename))
                 self._error("Use -V to display them after running the tests")
         else:
             self._error("%d (of %d) tests failed (no diff results)" %
                 (self.num_failed, self.num_tests))
             if os.path.isfile(self.failed_filename):
                 os.remove(self.failed_filename)
         return CrmExit.ERROR
 
     def run(self):
         """ Run test(s) as specified """
 
         self._print_summary()
 
         # Zero out the error log
         self.failed_file = io.open(self.failed_filename, "wt")
 
         if self.args.run is None:
             print("Performing the following tests from " + self.args.io_dir)
             print()
             self.run_all()
             print()
             self.failed_file.close()
             rc = self._test_results()
         else:
             rc = self.run_one(self.args.run, "Single shot", self.single_test_args)
             self.failed_file.close()
             cat(self.failed_filename)
 
         return rc
 
 
 if __name__ == "__main__":
     sys.exit(CtsScheduler().run())
 
 # vim: set filetype=python expandtab tabstop=4 softtabstop=4 shiftwidth=4 textwidth=120:
diff --git a/cts/scheduler/value-source.dot b/cts/scheduler/value-source.dot
new file mode 100644
index 0000000000..dfb61e9ee8
--- /dev/null
+++ b/cts/scheduler/value-source.dot
@@ -0,0 +1,29 @@
+ digraph "g" {
+"Fencing_monitor_120000 rhel7-1" [ style=bold color="green" fontcolor="black"]
+"Fencing_start_0 rhel7-1" -> "Fencing_monitor_120000 rhel7-1" [ style = bold]
+"Fencing_start_0 rhel7-1" [ style=bold color="green" fontcolor="black"]
+"insane-rsc_monitor_10000 rhel7-4" [ style=bold color="green" fontcolor="black"]
+"insane-rsc_start_0 rhel7-4" -> "insane-rsc_monitor_10000 rhel7-4" [ style = bold]
+"insane-rsc_start_0 rhel7-4" [ style=bold color="green" fontcolor="black"]
+"invert-match_monitor_10000 rhel7-1" [ style=bold color="green" fontcolor="black"]
+"invert-match_start_0 rhel7-1" -> "invert-match_monitor_10000 rhel7-1" [ style = bold]
+"invert-match_start_0 rhel7-1" [ style=bold color="green" fontcolor="black"]
+"meta-rsc_monitor_10000 rhel7-5" [ style=bold color="green" fontcolor="black"]
+"meta-rsc_start_0 rhel7-5" -> "meta-rsc_monitor_10000 rhel7-5" [ style = bold]
+"meta-rsc_start_0 rhel7-5" [ style=bold color="green" fontcolor="black"]
+"rsc1_monitor_10000 rhel7-4" [ style=bold color="green" fontcolor="black"]
+"rsc1_start_0 rhel7-4" -> "rsc1_monitor_10000 rhel7-4" [ style = bold]
+"rsc1_start_0 rhel7-4" [ style=bold color="green" fontcolor="black"]
+"rsc2_monitor_10000 rhel7-5" [ style=bold color="green" fontcolor="black"]
+"rsc2_start_0 rhel7-5" -> "rsc2_monitor_10000 rhel7-5" [ style = bold]
+"rsc2_start_0 rhel7-5" [ style=bold color="green" fontcolor="black"]
+"set-rsc1_monitor_10000 rhel7-3" [ style=bold color="green" fontcolor="black"]
+"set-rsc1_start_0 rhel7-3" -> "set-rsc1_monitor_10000 rhel7-3" [ style = bold]
+"set-rsc1_start_0 rhel7-3" [ style=bold color="green" fontcolor="black"]
+"set-rsc2_monitor_10000 rhel7-4" [ style=bold color="green" fontcolor="black"]
+"set-rsc2_start_0 rhel7-4" -> "set-rsc2_monitor_10000 rhel7-4" [ style = bold]
+"set-rsc2_start_0 rhel7-4" [ style=bold color="green" fontcolor="black"]
+"single-rsc_monitor_10000 rhel7-2" [ style=bold color="green" fontcolor="black"]
+"single-rsc_start_0 rhel7-2" -> "single-rsc_monitor_10000 rhel7-2" [ style = bold]
+"single-rsc_start_0 rhel7-2" [ style=bold color="green" fontcolor="black"]
+}
diff --git a/cts/scheduler/value-source.exp b/cts/scheduler/value-source.exp
new file mode 100644
index 0000000000..4bf469fda8
--- /dev/null
+++ b/cts/scheduler/value-source.exp
@@ -0,0 +1,200 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="1"  transition_id="0">
+  <synapse id="0">
+    <action_set>
+      <rsc_op id="2" operation="monitor" operation_key="Fencing_monitor_120000" on_node="rhel7-1" on_node_uuid="1">
+        <primitive id="Fencing" class="stonith" type="fence_xvm"/>
+        <attributes CRM_meta_interval="120000" CRM_meta_name="monitor" CRM_meta_on_node="rhel7-1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="120000"  key_file="/etc/pacemaker/fence_xvm.key" multicast_address="239.255.100.100" pcmk_host_list="rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5"/>
+      </rsc_op>
+    </action_set>
+    <inputs>
+      <trigger>
+        <rsc_op id="1" operation="start" operation_key="Fencing_start_0" on_node="rhel7-1" on_node_uuid="1"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="1">
+    <action_set>
+      <rsc_op id="1" operation="start" operation_key="Fencing_start_0" on_node="rhel7-1" on_node_uuid="1">
+        <primitive id="Fencing" class="stonith" type="fence_xvm"/>
+        <attributes CRM_meta_on_node="rhel7-1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="90000"  key_file="/etc/pacemaker/fence_xvm.key" multicast_address="239.255.100.100" pcmk_host_list="rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5"/>
+      </rsc_op>
+    </action_set>
+    <inputs/>
+  </synapse>
+  <synapse id="2">
+    <action_set>
+      <rsc_op id="4" operation="monitor" operation_key="rsc1_monitor_10000" on_node="rhel7-4" on_node_uuid="4">
+        <primitive id="rsc1" class="ocf" provider="pacemaker" type="Dummy"/>
+        <attributes CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_on_node="rhel7-4" CRM_meta_on_node_uuid="4" CRM_meta_timeout="20000"  fake="300"/>
+      </rsc_op>
+    </action_set>
+    <inputs>
+      <trigger>
+        <rsc_op id="3" operation="start" operation_key="rsc1_start_0" on_node="rhel7-4" on_node_uuid="4"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="3">
+    <action_set>
+      <rsc_op id="3" operation="start" operation_key="rsc1_start_0" on_node="rhel7-4" on_node_uuid="4">
+        <primitive id="rsc1" class="ocf" provider="pacemaker" type="Dummy"/>
+        <attributes CRM_meta_name="start" CRM_meta_on_node="rhel7-4" CRM_meta_on_node_uuid="4" CRM_meta_timeout="20000"  fake="300"/>
+      </rsc_op>
+    </action_set>
+    <inputs/>
+  </synapse>
+  <synapse id="4">
+    <action_set>
+      <rsc_op id="6" operation="monitor" operation_key="rsc2_monitor_10000" on_node="rhel7-5" on_node_uuid="5">
+        <primitive id="rsc2" class="ocf" provider="pacemaker" type="Dummy"/>
+        <attributes CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_on_node="rhel7-5" CRM_meta_on_node_uuid="5" CRM_meta_timeout="20000"  fake="400"/>
+      </rsc_op>
+    </action_set>
+    <inputs>
+      <trigger>
+        <rsc_op id="5" operation="start" operation_key="rsc2_start_0" on_node="rhel7-5" on_node_uuid="5"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="5">
+    <action_set>
+      <rsc_op id="5" operation="start" operation_key="rsc2_start_0" on_node="rhel7-5" on_node_uuid="5">
+        <primitive id="rsc2" class="ocf" provider="pacemaker" type="Dummy"/>
+        <attributes CRM_meta_name="start" CRM_meta_on_node="rhel7-5" CRM_meta_on_node_uuid="5" CRM_meta_timeout="20000"  fake="400"/>
+      </rsc_op>
+    </action_set>
+    <inputs/>
+  </synapse>
+  <synapse id="6">
+    <action_set>
+      <rsc_op id="8" operation="monitor" operation_key="invert-match_monitor_10000" on_node="rhel7-1" on_node_uuid="1">
+        <primitive id="invert-match" class="ocf" provider="pacemaker" type="Dummy"/>
+        <attributes CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_on_node="rhel7-1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000"  fake="200"/>
+      </rsc_op>
+    </action_set>
+    <inputs>
+      <trigger>
+        <rsc_op id="7" operation="start" operation_key="invert-match_start_0" on_node="rhel7-1" on_node_uuid="1"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="7">
+    <action_set>
+      <rsc_op id="7" operation="start" operation_key="invert-match_start_0" on_node="rhel7-1" on_node_uuid="1">
+        <primitive id="invert-match" class="ocf" provider="pacemaker" type="Dummy"/>
+        <attributes CRM_meta_name="start" CRM_meta_on_node="rhel7-1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000"  fake="200"/>
+      </rsc_op>
+    </action_set>
+    <inputs/>
+  </synapse>
+  <synapse id="8">
+    <action_set>
+      <rsc_op id="10" operation="monitor" operation_key="single-rsc_monitor_10000" on_node="rhel7-2" on_node_uuid="2">
+        <primitive id="single-rsc" class="ocf" provider="pacemaker" type="Dummy"/>
+        <attributes CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_on_node="rhel7-2" CRM_meta_on_node_uuid="2" CRM_meta_timeout="20000"  fake="200"/>
+      </rsc_op>
+    </action_set>
+    <inputs>
+      <trigger>
+        <rsc_op id="9" operation="start" operation_key="single-rsc_start_0" on_node="rhel7-2" on_node_uuid="2"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="9">
+    <action_set>
+      <rsc_op id="9" operation="start" operation_key="single-rsc_start_0" on_node="rhel7-2" on_node_uuid="2">
+        <primitive id="single-rsc" class="ocf" provider="pacemaker" type="Dummy"/>
+        <attributes CRM_meta_name="start" CRM_meta_on_node="rhel7-2" CRM_meta_on_node_uuid="2" CRM_meta_timeout="20000"  fake="200"/>
+      </rsc_op>
+    </action_set>
+    <inputs/>
+  </synapse>
+  <synapse id="10">
+    <action_set>
+      <rsc_op id="12" operation="monitor" operation_key="set-rsc1_monitor_10000" on_node="rhel7-3" on_node_uuid="3">
+        <primitive id="set-rsc1" class="ocf" provider="pacemaker" type="Dummy"/>
+        <attributes CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_on_node="rhel7-3" CRM_meta_on_node_uuid="3" CRM_meta_timeout="20000"  fake="300"/>
+      </rsc_op>
+    </action_set>
+    <inputs>
+      <trigger>
+        <rsc_op id="11" operation="start" operation_key="set-rsc1_start_0" on_node="rhel7-3" on_node_uuid="3"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="11">
+    <action_set>
+      <rsc_op id="11" operation="start" operation_key="set-rsc1_start_0" on_node="rhel7-3" on_node_uuid="3">
+        <primitive id="set-rsc1" class="ocf" provider="pacemaker" type="Dummy"/>
+        <attributes CRM_meta_name="start" CRM_meta_on_node="rhel7-3" CRM_meta_on_node_uuid="3" CRM_meta_timeout="20000"  fake="300"/>
+      </rsc_op>
+    </action_set>
+    <inputs/>
+  </synapse>
+  <synapse id="12">
+    <action_set>
+      <rsc_op id="14" operation="monitor" operation_key="set-rsc2_monitor_10000" on_node="rhel7-4" on_node_uuid="4">
+        <primitive id="set-rsc2" class="ocf" provider="pacemaker" type="Dummy"/>
+        <attributes CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_on_node="rhel7-4" CRM_meta_on_node_uuid="4" CRM_meta_timeout="20000"  fake="400"/>
+      </rsc_op>
+    </action_set>
+    <inputs>
+      <trigger>
+        <rsc_op id="13" operation="start" operation_key="set-rsc2_start_0" on_node="rhel7-4" on_node_uuid="4"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="13">
+    <action_set>
+      <rsc_op id="13" operation="start" operation_key="set-rsc2_start_0" on_node="rhel7-4" on_node_uuid="4">
+        <primitive id="set-rsc2" class="ocf" provider="pacemaker" type="Dummy"/>
+        <attributes CRM_meta_name="start" CRM_meta_on_node="rhel7-4" CRM_meta_on_node_uuid="4" CRM_meta_timeout="20000"  fake="400"/>
+      </rsc_op>
+    </action_set>
+    <inputs/>
+  </synapse>
+  <synapse id="14">
+    <action_set>
+      <rsc_op id="16" operation="monitor" operation_key="meta-rsc_monitor_10000" on_node="rhel7-5" on_node_uuid="5">
+        <primitive id="meta-rsc" class="ocf" provider="pacemaker" type="Dummy"/>
+        <attributes CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_on_node="rhel7-5" CRM_meta_on_node_uuid="5" CRM_meta_timeout="20000"  fake="500"/>
+      </rsc_op>
+    </action_set>
+    <inputs>
+      <trigger>
+        <rsc_op id="15" operation="start" operation_key="meta-rsc_start_0" on_node="rhel7-5" on_node_uuid="5"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="15">
+    <action_set>
+      <rsc_op id="15" operation="start" operation_key="meta-rsc_start_0" on_node="rhel7-5" on_node_uuid="5">
+        <primitive id="meta-rsc" class="ocf" provider="pacemaker" type="Dummy"/>
+        <attributes CRM_meta_name="start" CRM_meta_on_node="rhel7-5" CRM_meta_on_node_uuid="5" CRM_meta_timeout="20000"  fake="500"/>
+      </rsc_op>
+    </action_set>
+    <inputs/>
+  </synapse>
+  <synapse id="16">
+    <action_set>
+      <rsc_op id="18" operation="monitor" operation_key="insane-rsc_monitor_10000" on_node="rhel7-4" on_node_uuid="4">
+        <primitive id="insane-rsc" class="ocf" provider="pacemaker" type="Dummy"/>
+        <attributes CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_on_node="rhel7-4" CRM_meta_on_node_uuid="4" CRM_meta_timeout="20000"  fake="400"/>
+      </rsc_op>
+    </action_set>
+    <inputs>
+      <trigger>
+        <rsc_op id="17" operation="start" operation_key="insane-rsc_start_0" on_node="rhel7-4" on_node_uuid="4"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="17">
+    <action_set>
+      <rsc_op id="17" operation="start" operation_key="insane-rsc_start_0" on_node="rhel7-4" on_node_uuid="4">
+        <primitive id="insane-rsc" class="ocf" provider="pacemaker" type="Dummy"/>
+        <attributes CRM_meta_name="start" CRM_meta_on_node="rhel7-4" CRM_meta_on_node_uuid="4" CRM_meta_timeout="20000"  fake="400"/>
+      </rsc_op>
+    </action_set>
+    <inputs/>
+  </synapse>
+</transition_graph>
diff --git a/cts/scheduler/value-source.scores b/cts/scheduler/value-source.scores
new file mode 100644
index 0000000000..1f781c4dc2
--- /dev/null
+++ b/cts/scheduler/value-source.scores
@@ -0,0 +1,47 @@
+Allocation scores:
+Using the original execution date of: 2020-11-12 21:28:08Z
+pcmk__native_allocate: Fencing allocation score on rhel7-1: 0
+pcmk__native_allocate: Fencing allocation score on rhel7-2: 0
+pcmk__native_allocate: Fencing allocation score on rhel7-3: 0
+pcmk__native_allocate: Fencing allocation score on rhel7-4: 0
+pcmk__native_allocate: Fencing allocation score on rhel7-5: 0
+pcmk__native_allocate: insane-rsc allocation score on rhel7-1: 0
+pcmk__native_allocate: insane-rsc allocation score on rhel7-2: 0
+pcmk__native_allocate: insane-rsc allocation score on rhel7-3: 0
+pcmk__native_allocate: insane-rsc allocation score on rhel7-4: INFINITY
+pcmk__native_allocate: insane-rsc allocation score on rhel7-5: 0
+pcmk__native_allocate: invert-match allocation score on rhel7-1: INFINITY
+pcmk__native_allocate: invert-match allocation score on rhel7-2: 0
+pcmk__native_allocate: invert-match allocation score on rhel7-3: 0
+pcmk__native_allocate: invert-match allocation score on rhel7-4: 0
+pcmk__native_allocate: invert-match allocation score on rhel7-5: 0
+pcmk__native_allocate: meta-rsc allocation score on rhel7-1: 0
+pcmk__native_allocate: meta-rsc allocation score on rhel7-2: 0
+pcmk__native_allocate: meta-rsc allocation score on rhel7-3: 0
+pcmk__native_allocate: meta-rsc allocation score on rhel7-4: INFINITY
+pcmk__native_allocate: meta-rsc allocation score on rhel7-5: INFINITY
+pcmk__native_allocate: rsc1 allocation score on rhel7-1: 0
+pcmk__native_allocate: rsc1 allocation score on rhel7-2: 0
+pcmk__native_allocate: rsc1 allocation score on rhel7-3: 0
+pcmk__native_allocate: rsc1 allocation score on rhel7-4: INFINITY
+pcmk__native_allocate: rsc1 allocation score on rhel7-5: INFINITY
+pcmk__native_allocate: rsc2 allocation score on rhel7-1: 0
+pcmk__native_allocate: rsc2 allocation score on rhel7-2: 0
+pcmk__native_allocate: rsc2 allocation score on rhel7-3: 0
+pcmk__native_allocate: rsc2 allocation score on rhel7-4: 0
+pcmk__native_allocate: rsc2 allocation score on rhel7-5: INFINITY
+pcmk__native_allocate: set-rsc1 allocation score on rhel7-1: 0
+pcmk__native_allocate: set-rsc1 allocation score on rhel7-2: 0
+pcmk__native_allocate: set-rsc1 allocation score on rhel7-3: INFINITY
+pcmk__native_allocate: set-rsc1 allocation score on rhel7-4: 0
+pcmk__native_allocate: set-rsc1 allocation score on rhel7-5: 0
+pcmk__native_allocate: set-rsc2 allocation score on rhel7-1: 0
+pcmk__native_allocate: set-rsc2 allocation score on rhel7-2: 0
+pcmk__native_allocate: set-rsc2 allocation score on rhel7-3: 0
+pcmk__native_allocate: set-rsc2 allocation score on rhel7-4: INFINITY
+pcmk__native_allocate: set-rsc2 allocation score on rhel7-5: 0
+pcmk__native_allocate: single-rsc allocation score on rhel7-1: 0
+pcmk__native_allocate: single-rsc allocation score on rhel7-2: INFINITY
+pcmk__native_allocate: single-rsc allocation score on rhel7-3: 0
+pcmk__native_allocate: single-rsc allocation score on rhel7-4: 0
+pcmk__native_allocate: single-rsc allocation score on rhel7-5: 0
diff --git a/cts/scheduler/value-source.summary b/cts/scheduler/value-source.summary
new file mode 100644
index 0000000000..a9b0392918
--- /dev/null
+++ b/cts/scheduler/value-source.summary
@@ -0,0 +1,60 @@
+Using the original execution date of: 2020-11-12 21:28:08Z
+
+Current cluster status:
+Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ]
+
+ Fencing	(stonith:fence_xvm):	 Stopped
+ rsc1	(ocf::pacemaker:Dummy):	 Stopped
+ rsc2	(ocf::pacemaker:Dummy):	 Stopped
+ invert-match	(ocf::pacemaker:Dummy):	 Stopped
+ single-rsc	(ocf::pacemaker:Dummy):	 Stopped
+ set-rsc1	(ocf::pacemaker:Dummy):	 Stopped
+ set-rsc2	(ocf::pacemaker:Dummy):	 Stopped
+ meta-rsc	(ocf::pacemaker:Dummy):	 Stopped
+ insane-rsc	(ocf::pacemaker:Dummy):	 Stopped
+
+Transition Summary:
+ * Start      Fencing          ( rhel7-1 )  
+ * Start      rsc1             ( rhel7-4 )  
+ * Start      rsc2             ( rhel7-5 )  
+ * Start      invert-match     ( rhel7-1 )  
+ * Start      single-rsc       ( rhel7-2 )  
+ * Start      set-rsc1         ( rhel7-3 )  
+ * Start      set-rsc2         ( rhel7-4 )  
+ * Start      meta-rsc         ( rhel7-5 )  
+ * Start      insane-rsc       ( rhel7-4 )  
+
+Executing cluster transition:
+ * Resource action: Fencing         start on rhel7-1
+ * Resource action: rsc1            start on rhel7-4
+ * Resource action: rsc2            start on rhel7-5
+ * Resource action: invert-match    start on rhel7-1
+ * Resource action: single-rsc      start on rhel7-2
+ * Resource action: set-rsc1        start on rhel7-3
+ * Resource action: set-rsc2        start on rhel7-4
+ * Resource action: meta-rsc        start on rhel7-5
+ * Resource action: insane-rsc      start on rhel7-4
+ * Resource action: Fencing         monitor=120000 on rhel7-1
+ * Resource action: rsc1            monitor=10000 on rhel7-4
+ * Resource action: rsc2            monitor=10000 on rhel7-5
+ * Resource action: invert-match    monitor=10000 on rhel7-1
+ * Resource action: single-rsc      monitor=10000 on rhel7-2
+ * Resource action: set-rsc1        monitor=10000 on rhel7-3
+ * Resource action: set-rsc2        monitor=10000 on rhel7-4
+ * Resource action: meta-rsc        monitor=10000 on rhel7-5
+ * Resource action: insane-rsc      monitor=10000 on rhel7-4
+Using the original execution date of: 2020-11-12 21:28:08Z
+
+Revised cluster status:
+Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ]
+
+ Fencing	(stonith:fence_xvm):	 Started rhel7-1
+ rsc1	(ocf::pacemaker:Dummy):	 Started rhel7-4
+ rsc2	(ocf::pacemaker:Dummy):	 Started rhel7-5
+ invert-match	(ocf::pacemaker:Dummy):	 Started rhel7-1
+ single-rsc	(ocf::pacemaker:Dummy):	 Started rhel7-2
+ set-rsc1	(ocf::pacemaker:Dummy):	 Started rhel7-3
+ set-rsc2	(ocf::pacemaker:Dummy):	 Started rhel7-4
+ meta-rsc	(ocf::pacemaker:Dummy):	 Started rhel7-5
+ insane-rsc	(ocf::pacemaker:Dummy):	 Started rhel7-4
+
diff --git a/cts/scheduler/value-source.xml b/cts/scheduler/value-source.xml
new file mode 100644
index 0000000000..95e7d57206
--- /dev/null
+++ b/cts/scheduler/value-source.xml
@@ -0,0 +1,463 @@
+<cib crm_feature_set="3.6.3" validate-with="pacemaker-3.5" epoch="96" num_updates="0" admin_epoch="1" cib-last-written="Thu Nov 12 15:28:08 2020" update-origin="rhel7-2" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="3" execution-date="1605216488">
+  <!-- This is a multiple test of location constraints with node attribute
+       expressions using a value-source of "param" or "meta". The non-fencing
+       resources are all Dummy resources, and their resource parameter "fake"
+       is used in the rules, along with a node attribute also named "fake".
+
+       There are the following 5 nodes with these node attribute values:
+
+        rhel7-1 fake=100 rack=1
+        rhel7-2 fake=200 rack=1
+        rhel7-3 fake=300 rack=1
+        rhel7-4 fake=400 rack=2
+        rhel7-5 fake=500 rack=2
+
+       A location constraint with a rsc-pattern of "^rsc[0-9]$" matches the
+       following resources, and uses a +INFINITY expression where
+       node fake > resource fake:
+         rsc1 fake=300
+         rsc2 fake=400
+       Thus rsc1 should prefer node rhel7-4 or rhel7-5, and rsc2 should
+       prefer rhel7-5.
+
+       A location constraint with an inverted rsc-pattern of "!rsc" matches
+       the following resource, and uses a +INFINITY expression where
+       node fake < resource fake:
+         invert-match fake=200
+       Thus invert-match should prefer rhel7-1.
+
+       A location constraint with rsc="single-rsc" uses a +INFINITY expression
+       where node fake == resource fake, and the resource is:
+         single-rsc fake=200
+       Thus single-rsc should prefer rhel7-2.
+
+       A location constraint with a resource set uses a +INFINITY expression
+       where node fake == resource fake, and the resources in the set are:
+         set-rsc1 fake=300
+         set-rsc2 fake=400
+       Thus set-rsc1 should prefer rhel7-3, and set-rsc2 should prefer rhel7-4.
+
+       To test value-source="meta", a location constraint with a rsc-pattern of
+       "meta-" uses a +INFINITY expression where node rack == resource meta rack,
+       and there is a resource:
+         meta-rsc fake=500 rack=2
+       Thus meta-rsc should prefer rhel7-4 or rhel7-5.
+
+       Finally there is a configuration that is useful solely for verifying
+       that the code can handle it. A location constraint with rsc="insane-rsc"
+       uses a +INFINITY expression where node fake == resource fake. However
+       the resource parameter itself uses a rule with a node attribute
+       expression such that it is 200 on rhel7-1, 400 on rhel7-4, and 100
+       everywhere else. Thus insane-rsc will prefer rhel7-4.
+    -->
+  <configuration>
+    <crm_config>
+      <cluster_property_set id="cib-bootstrap-options">
+        <nvpair id="cts-stonith-enabled" name="stonith-enabled" value="1"/>
+        <nvpair id="cts-start-failure-is-fatal" name="start-failure-is-fatal" value="false"/>
+        <nvpair id="cts-pe-input-series-max" name="pe-input-series-max" value="5000"/>
+        <nvpair id="cts-shutdown-escalation" name="shutdown-escalation" value="5min"/>
+        <nvpair id="cts-batch-limit" name="batch-limit" value="10"/>
+        <nvpair id="cts-dc-deadtime" name="dc-deadtime" value="5s"/>
+        <nvpair id="cts-no-quorum-policy" name="no-quorum-policy" value="stop"/>
+        <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+        <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.0.5-650.0cdd837.git.el7_9-0cdd837"/>
+        <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+        <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="mycluster"/>
+        <nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1604951742"/>
+      </cluster_property_set>
+    </crm_config>
+    <nodes>
+      <node id="1" uname="rhel7-1">
+        <instance_attributes id="nodes-1">
+          <nvpair id="nodes-1-fake" name="fake" value="100"/>
+          <nvpair id="nodes-1-rack" name="rack" value="1"/>
+        </instance_attributes>
+      </node>
+      <node id="2" uname="rhel7-2">
+        <instance_attributes id="nodes-2">
+          <nvpair id="nodes-2-fake" name="fake" value="200"/>
+          <nvpair id="nodes-2-rack" name="rack" value="1"/>
+        </instance_attributes>
+      </node>
+      <node id="3" uname="rhel7-3">
+        <instance_attributes id="nodes-3">
+          <nvpair id="nodes-3-fake" name="fake" value="300"/>
+          <nvpair id="nodes-3-rack" name="rack" value="1"/>
+        </instance_attributes>
+      </node>
+      <node id="4" uname="rhel7-4">
+        <instance_attributes id="nodes-4">
+          <nvpair id="nodes-4-fake" name="fake" value="400"/>
+          <nvpair id="nodes-4-rack" name="rack" value="2"/>
+        </instance_attributes>
+      </node>
+      <node id="5" uname="rhel7-5">
+        <instance_attributes id="nodes-5">
+          <nvpair id="nodes-5-fake" name="fake" value="500"/>
+          <nvpair id="nodes-5-rack" name="rack" value="2"/>
+        </instance_attributes>
+      </node>
+    </nodes>
+    <resources>
+      <primitive class="stonith" id="Fencing" type="fence_xvm">
+        <instance_attributes id="Fencing-params">
+          <nvpair id="Fencing-key_file" name="key_file" value="/etc/pacemaker/fence_xvm.key"/>
+          <nvpair id="Fencing-multicast_address" name="multicast_address" value="239.255.100.100"/>
+          <nvpair id="Fencing-pcmk_host_list" name="pcmk_host_list" value="rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5"/>
+        </instance_attributes>
+        <operations>
+          <op id="Fencing-monitor-120s" interval="120s" name="monitor" timeout="120s"/>
+        </operations>
+      </primitive>
+      <primitive class="ocf" id="rsc1" provider="pacemaker" type="Dummy">
+        <instance_attributes id="rsc1-instance_attributes" score="10">
+          <nvpair id="rsc1-instance_attributes-fake" name="fake" value="300"/>
+        </instance_attributes>
+        <operations>
+          <op id="rsc1-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="20s"/>
+          <op id="rsc1-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="20s"/>
+          <op id="rsc1-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
+          <op id="rsc1-reload-interval-0s" interval="0s" name="reload" timeout="20s"/>
+          <op id="rsc1-start-interval-0s" interval="0s" name="start" timeout="20s"/>
+          <op id="rsc1-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+        </operations>
+      </primitive>
+      <primitive class="ocf" id="rsc2" provider="pacemaker" type="Dummy">
+        <instance_attributes id="rsc2-instance_attributes" score="10">
+          <nvpair id="rsc2-instance_attributes-fake" name="fake" value="400"/>
+        </instance_attributes>
+        <operations>
+          <op id="rsc2-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="20s"/>
+          <op id="rsc2-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="20s"/>
+          <op id="rsc2-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
+          <op id="rsc2-reload-interval-0s" interval="0s" name="reload" timeout="20s"/>
+          <op id="rsc2-start-interval-0s" interval="0s" name="start" timeout="20s"/>
+          <op id="rsc2-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+        </operations>
+      </primitive>
+      <primitive class="ocf" id="invert-match" provider="pacemaker" type="Dummy">
+        <instance_attributes id="invert-match-instance_attributes" score="10">
+          <nvpair id="invert-match-instance_attributes-fake" name="fake" value="200"/>
+        </instance_attributes>
+        <operations>
+          <op id="invert-match-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="20s"/>
+          <op id="invert-match-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="20s"/>
+          <op id="invert-match-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
+          <op id="invert-match-reload-interval-0s" interval="0s" name="reload" timeout="20s"/>
+          <op id="invert-match-start-interval-0s" interval="0s" name="start" timeout="20s"/>
+          <op id="invert-match-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+        </operations>
+      </primitive>
+      <primitive class="ocf" id="single-rsc" provider="pacemaker" type="Dummy">
+        <instance_attributes id="single-rsc-instance_attributes" score="10">
+          <nvpair id="single-rsc-instance_attributes-fake" name="fake" value="200"/>
+        </instance_attributes>
+        <operations>
+          <op id="single-rsc-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="20s"/>
+          <op id="single-rsc-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="20s"/>
+          <op id="single-rsc-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
+          <op id="single-rsc-reload-interval-0s" interval="0s" name="reload" timeout="20s"/>
+          <op id="single-rsc-start-interval-0s" interval="0s" name="start" timeout="20s"/>
+          <op id="single-rsc-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+        </operations>
+      </primitive>
+      <primitive class="ocf" id="set-rsc1" provider="pacemaker" type="Dummy">
+        <instance_attributes id="set-rsc1-instance_attributes" score="10">
+          <nvpair id="set-rsc1-instance_attributes-fake" name="fake" value="300"/>
+        </instance_attributes>
+        <operations>
+          <op id="set-rsc1-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="20s"/>
+          <op id="set-rsc1-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="20s"/>
+          <op id="set-rsc1-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
+          <op id="set-rsc1-reload-interval-0s" interval="0s" name="reload" timeout="20s"/>
+          <op id="set-rsc1-start-interval-0s" interval="0s" name="start" timeout="20s"/>
+          <op id="set-rsc1-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+        </operations>
+      </primitive>
+      <primitive class="ocf" id="set-rsc2" provider="pacemaker" type="Dummy">
+        <instance_attributes id="set-rsc2-instance_attributes" score="10">
+          <nvpair id="set-rsc2-instance_attributes-fake" name="fake" value="400"/>
+        </instance_attributes>
+        <operations>
+          <op id="set-rsc2-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="20s"/>
+          <op id="set-rsc2-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="20s"/>
+          <op id="set-rsc2-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
+          <op id="set-rsc2-reload-interval-0s" interval="0s" name="reload" timeout="20s"/>
+          <op id="set-rsc2-start-interval-0s" interval="0s" name="start" timeout="20s"/>
+          <op id="set-rsc2-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+        </operations>
+      </primitive>
+      <primitive class="ocf" id="meta-rsc" provider="pacemaker" type="Dummy">
+        <meta_attributes id="meta-rsc-meta_attributes">
+          <nvpair id="meta-rsc-meta_attributes-rack" name="rack" value="2"/>
+        </meta_attributes>
+        <instance_attributes id="meta-rsc-instance_attributes" score="10">
+          <nvpair id="meta-rsc-instance_attributes-fake" name="fake" value="500"/>
+        </instance_attributes>
+        <operations>
+          <op id="meta-rsc-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="20s"/>
+          <op id="meta-rsc-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="20s"/>
+          <op id="meta-rsc-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
+          <op id="meta-rsc-reload-interval-0s" interval="0s" name="reload" timeout="20s"/>
+          <op id="meta-rsc-start-interval-0s" interval="0s" name="start" timeout="20s"/>
+          <op id="meta-rsc-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+        </operations>
+      </primitive>
+      <primitive class="ocf" id="insane-rsc" provider="pacemaker" type="Dummy">
+        <instance_attributes id="insane-rsc-instance_attributes" score="10">
+          <nvpair id="insane-rsc-instance_attributes-fake" name="fake" value="100"/>
+        </instance_attributes>
+        <instance_attributes id="insane-rsc-instance_attributes2" score="20">
+          <rule id="insane-rsc-rule1" score="INFINITY">
+            <expression id="insane-rsc-rule1-expr1" attribute="#uname" operation="eq" value="rhel7-1"/>
+          </rule>
+          <nvpair id="insane-rsc-instance_attributes2-fake" name="fake" value="200"/>
+        </instance_attributes>
+        <instance_attributes id="insane-rsc-instance_attributes3" score="20">
+          <rule id="insane-rsc-rule2" score="INFINITY">
+            <expression id="insane-rsc-rule2-expr1" attribute="#uname" operation="eq" value="rhel7-4"/>
+          </rule>
+          <nvpair id="insane-rsc-instance_attributes23fake" name="fake" value="400"/>
+        </instance_attributes>
+        <operations>
+          <op id="insane-rsc-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="20s"/>
+          <op id="insane-rsc-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="20s"/>
+          <op id="insane-rsc-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
+          <op id="insane-rsc-reload-interval-0s" interval="0s" name="reload" timeout="20s"/>
+          <op id="insane-rsc-start-interval-0s" interval="0s" name="start" timeout="20s"/>
+          <op id="insane-rsc-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+        </operations>
+      </primitive>
+    </resources>
+    <constraints>
+      <rsc_location id="location-rscN" rsc-pattern="^rsc[0-9]$">
+        <rule id="location-rule1" score="INFINITY">
+          <expression id="location-rule1-expr1" attribute="fake" operation="gt" value="fake" value-source="param"/>
+        </rule>
+      </rsc_location>
+      <rsc_location id="location-not-rsc" rsc-pattern="!rsc">
+        <rule id="location-rule2" score="INFINITY">
+          <expression id="location-rule2-expr1" attribute="fake" operation="lt" value="fake" value-source="param"/>
+        </rule>
+      </rsc_location>
+      <rsc_location id="location-single-rsc" rsc="single-rsc">
+        <rule id="location-rule3" score="INFINITY">
+          <expression id="location-rule3-expr1" attribute="fake" operation="eq" value="fake" value-source="param"/>
+        </rule>
+      </rsc_location>
+      <rsc_location id="location-set">
+        <resource_set id="resource-set1">
+          <resource_ref id="set-rsc1"/>
+          <resource_ref id="set-rsc2"/>
+        </resource_set>
+        <rule id="location-rule4" score="INFINITY">
+          <expression id="location-rule4-expr1" attribute="fake" operation="eq" value="fake" value-source="param"/>
+        </rule>
+      </rsc_location>
+      <rsc_location id="location-meta" rsc-pattern="meta-">
+        <rule id="location-rule5" score="INFINITY">
+          <expression id="location-rule5-expr1" attribute="rack" operation="eq" value="rack" value-source="meta"/>
+        </rule>
+      </rsc_location>
+      <rsc_location id="location-insane-rsc" rsc="insane-rsc">
+        <rule id="location-rule6" score="INFINITY">
+          <expression id="location-rule6-expr1" attribute="fake" operation="eq" value="fake" value-source="param"/>
+        </rule>
+      </rsc_location>
+    </constraints>
+    <fencing-topology/>
+    <op_defaults>
+      <meta_attributes id="cts-op_defaults-meta">
+        <nvpair id="cts-op_defaults-timeout" name="timeout" value="90s"/>
+      </meta_attributes>
+    </op_defaults>
+    <alerts>
+      <alert id="alert-1" path="/var/lib/pacemaker/notify.sh">
+        <recipient id="alert-1-recipient-1" value="/run/crm/alert.log"/>
+      </alert>
+    </alerts>
+    <rsc_defaults>
+      <meta_attributes id="rsc_defaults-options">
+        <nvpair id="rsc_defaults-options-target-role" name="target-role" value="Started"/>
+      </meta_attributes>
+    </rsc_defaults>
+  </configuration>
+  <status>
+    <node_state id="1" uname="rhel7-1" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
+      <lrm id="1">
+        <lrm_resources>
+          <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+            <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_stop_0" operation="stop" crm-debug-origin="do_update_resource" crm_feature_set="3.6.3" transition-key="10:33:0:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" transition-magic="0:0;10:33:0:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" exit-reason="" on_node="rhel7-1" call-id="63" rc-code="0" op-status="0" interval="0" last-rc-change="1605216481" last-run="1605216481" exec-time="1" queue-time="0" op-digest="422e90c96b7732222706b322138f43c8"/>
+            <lrm_rsc_op id="Fencing_monitor_120000" operation_key="Fencing_monitor_120000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.6.3" transition-key="2:15:0:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" transition-magic="0:0;2:15:0:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" exit-reason="" on_node="rhel7-1" call-id="49" rc-code="0" op-status="0" interval="120000" last-rc-change="1605212720" exec-time="75" queue-time="0" op-digest="75eb18cbd607b3b4911f723ab1c89388"/>
+          </lrm_resource>
+          <lrm_resource id="meta-rsc" type="Dummy" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="meta-rsc_last_0" operation_key="meta-rsc_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.6.3" transition-key="8:23:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" transition-magic="0:7;8:23:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" exit-reason="" on_node="rhel7-1" call-id="57" rc-code="7" op-status="0" interval="0" last-rc-change="1605215239" last-run="1605215239" exec-time="34" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart="  envfile op_sleep passwd state  " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params="  passwd  " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+          </lrm_resource>
+          <lrm_resource id="rsc1" type="Dummy" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="rsc1_last_0" operation_key="rsc1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.6.3" transition-key="2:0:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" transition-magic="0:7;2:0:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" exit-reason="" on_node="rhel7-1" call-id="13" rc-code="7" op-status="0" interval="0" last-rc-change="1605205770" last-run="1605205770" exec-time="21" queue-time="0" op-digest="740d07f67c20fca411b03996e601119e" op-force-restart="  envfile op_sleep passwd state  " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params="  passwd  " op-secure-digest="740d07f67c20fca411b03996e601119e"/>
+          </lrm_resource>
+          <lrm_resource id="rsc2" type="Dummy" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="rsc2_last_0" operation_key="rsc2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.6.3" transition-key="3:0:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" transition-magic="0:7;3:0:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" exit-reason="" on_node="rhel7-1" call-id="17" rc-code="7" op-status="0" interval="0" last-rc-change="1605205770" last-run="1605205770" exec-time="26" queue-time="0" op-digest="e4a0cc96ccaaaa02bea1e0be9c4dee84" op-force-restart="  envfile op_sleep passwd state  " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params="  passwd  " op-secure-digest="e4a0cc96ccaaaa02bea1e0be9c4dee84"/>
+          </lrm_resource>
+          <lrm_resource id="invert-match" type="Dummy" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="invert-match_last_0" operation_key="invert-match_stop_0" operation="stop" crm-debug-origin="do_update_resource" crm_feature_set="3.6.3" transition-key="13:33:0:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" transition-magic="0:0;13:33:0:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" exit-reason="" on_node="rhel7-1" call-id="67" rc-code="0" op-status="0" interval="0" last-rc-change="1605216481" last-run="1605216481" exec-time="59" queue-time="0" op-digest="1bd01c2c51e0e8a776ac0e84d5715d70" op-force-restart="  envfile op_sleep passwd state  " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params="  passwd  " op-secure-digest="1bd01c2c51e0e8a776ac0e84d5715d70"/>
+            <lrm_rsc_op id="invert-match_monitor_10000" operation_key="invert-match_monitor_10000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.6.3" transition-key="8:15:0:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" transition-magic="0:0;8:15:0:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" exit-reason="" on_node="rhel7-1" call-id="51" rc-code="0" op-status="0" interval="10000" last-rc-change="1605212720" exec-time="52" queue-time="3" op-digest="53e49b6c90e7e71546c273e0f220a321" op-secure-params="  passwd  " op-secure-digest="1bd01c2c51e0e8a776ac0e84d5715d70"/>
+          </lrm_resource>
+          <lrm_resource id="single-rsc" type="Dummy" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="single-rsc_last_0" operation_key="single-rsc_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.6.3" transition-key="5:0:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" transition-magic="0:7;5:0:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" exit-reason="" on_node="rhel7-1" call-id="29" rc-code="7" op-status="0" interval="0" last-rc-change="1605205770" last-run="1605205770" exec-time="25" queue-time="0" op-digest="1bd01c2c51e0e8a776ac0e84d5715d70" op-force-restart="  envfile op_sleep passwd state  " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params="  passwd  " op-secure-digest="1bd01c2c51e0e8a776ac0e84d5715d70"/>
+          </lrm_resource>
+          <lrm_resource id="insane-rsc" type="Dummy" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="insane-rsc_last_0" operation_key="insane-rsc_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.6.3" transition-key="9:29:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" transition-magic="0:7;9:29:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" exit-reason="" on_node="rhel7-1" call-id="61" rc-code="7" op-status="0" interval="0" last-rc-change="1605216014" last-run="1605216014" exec-time="33" queue-time="0" op-digest="1905d2bf96a4b1c41f38dff8c7b84308" op-force-restart="  envfile op_sleep passwd state  " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params="  passwd  " op-secure-digest="1905d2bf96a4b1c41f38dff8c7b84308"/>
+          </lrm_resource>
+          <lrm_resource id="set-rsc1" type="Dummy" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="set-rsc1_last_0" operation_key="set-rsc1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.6.3" transition-key="6:0:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" transition-magic="0:7;6:0:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" exit-reason="" on_node="rhel7-1" call-id="33" rc-code="7" op-status="0" interval="0" last-rc-change="1605205770" last-run="1605205770" exec-time="20" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart="  envfile op_sleep passwd state  " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params="  passwd  " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+          </lrm_resource>
+          <lrm_resource id="set-rsc2" type="Dummy" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="set-rsc2_last_0" operation_key="set-rsc2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.6.3" transition-key="7:0:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" transition-magic="0:7;7:0:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" exit-reason="" on_node="rhel7-1" call-id="37" rc-code="7" op-status="0" interval="0" last-rc-change="1605205770" last-run="1605205770" exec-time="20" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart="  envfile op_sleep passwd state  " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params="  passwd  " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+          </lrm_resource>
+        </lrm_resources>
+      </lrm>
+    </node_state>
+    <node_state id="2" uname="rhel7-2" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
+      <lrm id="2">
+        <lrm_resources>
+          <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+            <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.6.3" transition-key="8:0:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" transition-magic="0:7;8:0:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" exit-reason="" on_node="rhel7-2" call-id="5" rc-code="7" op-status="0" interval="0" last-rc-change="1605205770" last-run="1605205770" exec-time="5" queue-time="0" op-digest="422e90c96b7732222706b322138f43c8"/>
+          </lrm_resource>
+          <lrm_resource id="meta-rsc" type="Dummy" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="meta-rsc_last_0" operation_key="meta-rsc_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.6.3" transition-key="23:27:0:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" transition-magic="0:0;23:27:0:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" exit-reason="" on_node="rhel7-2" call-id="57" rc-code="0" op-status="0" interval="0" last-rc-change="1605215447" last-run="1605215447" exec-time="24" queue-time="0" op-digest="4269017c9755b64d3c5687024693ab8d"/>
+          </lrm_resource>
+          <lrm_resource id="rsc1" type="Dummy" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="rsc1_last_0" operation_key="rsc1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.6.3" transition-key="9:0:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" transition-magic="0:7;9:0:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" exit-reason="" on_node="rhel7-2" call-id="9" rc-code="7" op-status="0" interval="0" last-rc-change="1605205770" last-run="1605205770" exec-time="24" queue-time="0" op-digest="740d07f67c20fca411b03996e601119e" op-force-restart="  envfile op_sleep passwd state  " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params="  passwd  " op-secure-digest="740d07f67c20fca411b03996e601119e"/>
+          </lrm_resource>
+          <lrm_resource id="rsc2" type="Dummy" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="rsc2_last_0" operation_key="rsc2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.6.3" transition-key="10:0:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" transition-magic="0:7;10:0:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" exit-reason="" on_node="rhel7-2" call-id="13" rc-code="7" op-status="0" interval="0" last-rc-change="1605205770" last-run="1605205770" exec-time="35" queue-time="0" op-digest="e4a0cc96ccaaaa02bea1e0be9c4dee84" op-force-restart="  envfile op_sleep passwd state  " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params="  passwd  " op-secure-digest="e4a0cc96ccaaaa02bea1e0be9c4dee84"/>
+          </lrm_resource>
+          <lrm_resource id="invert-match" type="Dummy" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="invert-match_last_0" operation_key="invert-match_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.6.3" transition-key="11:0:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" transition-magic="0:7;11:0:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" exit-reason="" on_node="rhel7-2" call-id="17" rc-code="7" op-status="0" interval="0" last-rc-change="1605205770" last-run="1605205770" exec-time="29" queue-time="0" op-digest="1bd01c2c51e0e8a776ac0e84d5715d70" op-force-restart="  envfile op_sleep passwd state  " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params="  passwd  " op-secure-digest="1bd01c2c51e0e8a776ac0e84d5715d70"/>
+          </lrm_resource>
+          <lrm_resource id="single-rsc" type="Dummy" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="single-rsc_last_0" operation_key="single-rsc_stop_0" operation="stop" crm-debug-origin="do_update_resource" crm_feature_set="3.6.3" transition-key="14:33:0:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" transition-magic="0:0;14:33:0:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" exit-reason="" on_node="rhel7-2" call-id="73" rc-code="0" op-status="0" interval="0" last-rc-change="1605216481" last-run="1605216481" exec-time="70" queue-time="0" op-digest="1bd01c2c51e0e8a776ac0e84d5715d70" op-force-restart="  envfile op_sleep passwd state  " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params="  passwd  " op-secure-digest="1bd01c2c51e0e8a776ac0e84d5715d70"/>
+            <lrm_rsc_op id="single-rsc_monitor_10000" operation_key="single-rsc_monitor_10000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.6.3" transition-key="10:15:0:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" transition-magic="0:0;10:15:0:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" exit-reason="" on_node="rhel7-2" call-id="40" rc-code="0" op-status="0" interval="10000" last-rc-change="1605212720" exec-time="32" queue-time="0" op-digest="53e49b6c90e7e71546c273e0f220a321" op-secure-params="  passwd  " op-secure-digest="1bd01c2c51e0e8a776ac0e84d5715d70"/>
+          </lrm_resource>
+          <lrm_resource id="insane-rsc" type="Dummy" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="insane-rsc_last_0" operation_key="insane-rsc_stop_0" operation="stop" crm-debug-origin="do_update_resource" crm_feature_set="3.6.3" transition-key="26:32:0:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" transition-magic="0:0;26:32:0:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" exit-reason="" on_node="rhel7-2" call-id="69" rc-code="0" op-status="0" interval="0" last-rc-change="1605216258" last-run="1605216258" exec-time="27" queue-time="0" op-digest="1905d2bf96a4b1c41f38dff8c7b84308" op-force-restart="  envfile op_sleep passwd state  " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params="  passwd  " op-secure-digest="1905d2bf96a4b1c41f38dff8c7b84308"/>
+            <lrm_rsc_op id="insane-rsc_monitor_10000" operation_key="insane-rsc_monitor_10000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.6.3" transition-key="31:29:0:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" transition-magic="0:0;31:29:0:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" exit-reason="" on_node="rhel7-2" call-id="66" rc-code="0" op-status="0" interval="10000" last-rc-change="1605216014" exec-time="21" queue-time="0" op-digest="b87d7fef533f90972e41fe89986ae9ff" op-secure-params="  passwd  " op-secure-digest="1905d2bf96a4b1c41f38dff8c7b84308"/>
+          </lrm_resource>
+          <lrm_resource id="set-rsc1" type="Dummy" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="set-rsc1_last_0" operation_key="set-rsc1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.6.3" transition-key="13:0:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" transition-magic="0:7;13:0:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" exit-reason="" on_node="rhel7-2" call-id="25" rc-code="7" op-status="0" interval="0" last-rc-change="1605205770" last-run="1605205770" exec-time="28" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart="  envfile op_sleep passwd state  " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params="  passwd  " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+          </lrm_resource>
+          <lrm_resource id="set-rsc2" type="Dummy" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="set-rsc2_last_0" operation_key="set-rsc2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.6.3" transition-key="14:0:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" transition-magic="0:7;14:0:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" exit-reason="" on_node="rhel7-2" call-id="29" rc-code="7" op-status="0" interval="0" last-rc-change="1605205770" last-run="1605205770" exec-time="18" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart="  envfile op_sleep passwd state  " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params="  passwd  " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+          </lrm_resource>
+        </lrm_resources>
+      </lrm>
+    </node_state>
+    <node_state id="5" uname="rhel7-5" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
+      <lrm id="5">
+        <lrm_resources>
+          <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+            <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.6.3" transition-key="29:0:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" transition-magic="0:7;29:0:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" exit-reason="" on_node="rhel7-5" call-id="5" rc-code="7" op-status="0" interval="0" last-rc-change="1605205770" last-run="1605205770" exec-time="1" queue-time="0" op-digest="422e90c96b7732222706b322138f43c8"/>
+          </lrm_resource>
+          <lrm_resource id="meta-rsc" type="Dummy" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="meta-rsc_last_0" operation_key="meta-rsc_stop_0" operation="stop" crm-debug-origin="do_update_resource" crm_feature_set="3.6.3" transition-key="17:33:0:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" transition-magic="0:0;17:33:0:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" exit-reason="" on_node="rhel7-5" call-id="72" rc-code="0" op-status="0" interval="0" last-rc-change="1605216481" last-run="1605216481" exec-time="78" queue-time="0" op-digest="4269017c9755b64d3c5687024693ab8d" op-force-restart="  envfile op_sleep passwd state  " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params="  passwd  " op-secure-digest="4269017c9755b64d3c5687024693ab8d"/>
+            <lrm_rsc_op id="meta-rsc_monitor_10000" operation_key="meta-rsc_monitor_10000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.6.3" transition-key="25:27:0:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" transition-magic="0:0;25:27:0:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" exit-reason="" on_node="rhel7-5" call-id="62" rc-code="0" op-status="0" interval="10000" last-rc-change="1605215447" exec-time="20" queue-time="0" op-digest="ce835727a24a9c27be80f53be9716835" op-secure-params="  passwd  " op-secure-digest="4269017c9755b64d3c5687024693ab8d"/>
+          </lrm_resource>
+          <lrm_resource id="rsc1" type="Dummy" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="rsc1_last_0" operation_key="rsc1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.6.3" transition-key="30:0:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" transition-magic="0:7;30:0:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" exit-reason="" on_node="rhel7-5" call-id="9" rc-code="7" op-status="0" interval="0" last-rc-change="1605205770" last-run="1605205770" exec-time="27" queue-time="0" op-digest="740d07f67c20fca411b03996e601119e" op-force-restart="  envfile op_sleep passwd state  " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params="  passwd  " op-secure-digest="740d07f67c20fca411b03996e601119e"/>
+          </lrm_resource>
+          <lrm_resource id="rsc2" type="Dummy" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="rsc2_last_0" operation_key="rsc2_stop_0" operation="stop" crm-debug-origin="do_update_resource" crm_feature_set="3.6.3" transition-key="12:33:0:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" transition-magic="0:0;12:33:0:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" exit-reason="" on_node="rhel7-5" call-id="69" rc-code="0" op-status="0" interval="0" last-rc-change="1605216481" last-run="1605216481" exec-time="85" queue-time="0" op-digest="e4a0cc96ccaaaa02bea1e0be9c4dee84" op-force-restart="  envfile op_sleep passwd state  " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params="  passwd  " op-secure-digest="e4a0cc96ccaaaa02bea1e0be9c4dee84"/>
+            <lrm_rsc_op id="rsc2_monitor_10000" operation_key="rsc2_monitor_10000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.6.3" transition-key="14:18:0:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" transition-magic="0:0;14:18:0:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" exit-reason="" on_node="rhel7-5" call-id="50" rc-code="0" op-status="0" interval="10000" last-rc-change="1605214609" exec-time="19" queue-time="0" op-digest="86322d62158bad207944738f0a1bac13" op-secure-params="  passwd  " op-secure-digest="e4a0cc96ccaaaa02bea1e0be9c4dee84"/>
+          </lrm_resource>
+          <lrm_resource id="invert-match" type="Dummy" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="invert-match_last_0" operation_key="invert-match_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.6.3" transition-key="32:0:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" transition-magic="0:7;32:0:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" exit-reason="" on_node="rhel7-5" call-id="17" rc-code="7" op-status="0" interval="0" last-rc-change="1605205770" last-run="1605205770" exec-time="29" queue-time="0" op-digest="1bd01c2c51e0e8a776ac0e84d5715d70" op-force-restart="  envfile op_sleep passwd state  " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params="  passwd  " op-secure-digest="1bd01c2c51e0e8a776ac0e84d5715d70"/>
+          </lrm_resource>
+          <lrm_resource id="single-rsc" type="Dummy" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="single-rsc_last_0" operation_key="single-rsc_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.6.3" transition-key="33:0:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" transition-magic="0:7;33:0:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" exit-reason="" on_node="rhel7-5" call-id="21" rc-code="7" op-status="0" interval="0" last-rc-change="1605205770" last-run="1605205770" exec-time="28" queue-time="0" op-digest="1bd01c2c51e0e8a776ac0e84d5715d70" op-force-restart="  envfile op_sleep passwd state  " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params="  passwd  " op-secure-digest="1bd01c2c51e0e8a776ac0e84d5715d70"/>
+          </lrm_resource>
+          <lrm_resource id="insane-rsc" type="Dummy" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="insane-rsc_last_0" operation_key="insane-rsc_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.6.3" transition-key="13:29:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" transition-magic="0:7;13:29:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" exit-reason="" on_node="rhel7-5" call-id="67" rc-code="7" op-status="0" interval="0" last-rc-change="1605216014" last-run="1605216014" exec-time="32" queue-time="0" op-digest="1905d2bf96a4b1c41f38dff8c7b84308" op-force-restart="  envfile op_sleep passwd state  " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params="  passwd  " op-secure-digest="1905d2bf96a4b1c41f38dff8c7b84308"/>
+          </lrm_resource>
+          <lrm_resource id="set-rsc1" type="Dummy" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="set-rsc1_last_0" operation_key="set-rsc1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.6.3" transition-key="34:0:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" transition-magic="0:7;34:0:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" exit-reason="" on_node="rhel7-5" call-id="25" rc-code="7" op-status="0" interval="0" last-rc-change="1605205770" last-run="1605205770" exec-time="35" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart="  envfile op_sleep passwd state  " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params="  passwd  " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+          </lrm_resource>
+          <lrm_resource id="set-rsc2" type="Dummy" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="set-rsc2_last_0" operation_key="set-rsc2_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.6.3" transition-key="20:19:0:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" transition-magic="0:0;20:19:0:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" exit-reason="" on_node="rhel7-5" call-id="53" rc-code="0" op-status="0" interval="0" last-rc-change="1605214850" last-run="1605214850" exec-time="22" queue-time="0" op-digest="e4a0cc96ccaaaa02bea1e0be9c4dee84"/>
+          </lrm_resource>
+        </lrm_resources>
+      </lrm>
+    </node_state>
+    <node_state id="3" uname="rhel7-3" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
+      <lrm id="3">
+        <lrm_resources>
+          <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+            <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.6.3" transition-key="15:0:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" transition-magic="0:7;15:0:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" exit-reason="" on_node="rhel7-3" call-id="5" rc-code="7" op-status="0" interval="0" last-rc-change="1605205770" last-run="1605205770" exec-time="9" queue-time="0" op-digest="422e90c96b7732222706b322138f43c8"/>
+          </lrm_resource>
+          <lrm_resource id="meta-rsc" type="Dummy" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="meta-rsc_last_0" operation_key="meta-rsc_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.6.3" transition-key="10:23:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" transition-magic="0:7;10:23:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" exit-reason="" on_node="rhel7-3" call-id="51" rc-code="7" op-status="0" interval="0" last-rc-change="1605215239" last-run="1605215239" exec-time="68" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart="  envfile op_sleep passwd state  " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params="  passwd  " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+          </lrm_resource>
+          <lrm_resource id="rsc1" type="Dummy" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="rsc1_last_0" operation_key="rsc1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.6.3" transition-key="16:0:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" transition-magic="0:7;16:0:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" exit-reason="" on_node="rhel7-3" call-id="9" rc-code="7" op-status="0" interval="0" last-rc-change="1605205770" last-run="1605205770" exec-time="32" queue-time="0" op-digest="740d07f67c20fca411b03996e601119e" op-force-restart="  envfile op_sleep passwd state  " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params="  passwd  " op-secure-digest="740d07f67c20fca411b03996e601119e"/>
+          </lrm_resource>
+          <lrm_resource id="rsc2" type="Dummy" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="rsc2_last_0" operation_key="rsc2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.6.3" transition-key="17:0:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" transition-magic="0:7;17:0:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" exit-reason="" on_node="rhel7-3" call-id="13" rc-code="7" op-status="0" interval="0" last-rc-change="1605205770" last-run="1605205770" exec-time="20" queue-time="0" op-digest="e4a0cc96ccaaaa02bea1e0be9c4dee84" op-force-restart="  envfile op_sleep passwd state  " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params="  passwd  " op-secure-digest="e4a0cc96ccaaaa02bea1e0be9c4dee84"/>
+          </lrm_resource>
+          <lrm_resource id="invert-match" type="Dummy" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="invert-match_last_0" operation_key="invert-match_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.6.3" transition-key="18:0:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" transition-magic="0:7;18:0:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" exit-reason="" on_node="rhel7-3" call-id="17" rc-code="7" op-status="0" interval="0" last-rc-change="1605205770" last-run="1605205770" exec-time="33" queue-time="0" op-digest="1bd01c2c51e0e8a776ac0e84d5715d70" op-force-restart="  envfile op_sleep passwd state  " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params="  passwd  " op-secure-digest="1bd01c2c51e0e8a776ac0e84d5715d70"/>
+          </lrm_resource>
+          <lrm_resource id="single-rsc" type="Dummy" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="single-rsc_last_0" operation_key="single-rsc_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.6.3" transition-key="19:0:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" transition-magic="0:7;19:0:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" exit-reason="" on_node="rhel7-3" call-id="21" rc-code="7" op-status="0" interval="0" last-rc-change="1605205770" last-run="1605205770" exec-time="31" queue-time="0" op-digest="1bd01c2c51e0e8a776ac0e84d5715d70" op-force-restart="  envfile op_sleep passwd state  " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params="  passwd  " op-secure-digest="1bd01c2c51e0e8a776ac0e84d5715d70"/>
+          </lrm_resource>
+          <lrm_resource id="insane-rsc" type="Dummy" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="insane-rsc_last_0" operation_key="insane-rsc_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.6.3" transition-key="11:29:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" transition-magic="0:7;11:29:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" exit-reason="" on_node="rhel7-3" call-id="55" rc-code="7" op-status="0" interval="0" last-rc-change="1605216014" last-run="1605216014" exec-time="46" queue-time="0" op-digest="1905d2bf96a4b1c41f38dff8c7b84308" op-force-restart="  envfile op_sleep passwd state  " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params="  passwd  " op-secure-digest="1905d2bf96a4b1c41f38dff8c7b84308"/>
+          </lrm_resource>
+          <lrm_resource id="set-rsc1" type="Dummy" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="set-rsc1_last_0" operation_key="set-rsc1_stop_0" operation="stop" crm-debug-origin="do_update_resource" crm_feature_set="3.6.3" transition-key="15:33:0:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" transition-magic="0:0;15:33:0:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" exit-reason="" on_node="rhel7-3" call-id="57" rc-code="0" op-status="0" interval="0" last-rc-change="1605216481" last-run="1605216481" exec-time="110" queue-time="0" op-digest="740d07f67c20fca411b03996e601119e" op-force-restart="  envfile op_sleep passwd state  " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params="  passwd  " op-secure-digest="740d07f67c20fca411b03996e601119e"/>
+            <lrm_rsc_op id="set-rsc1_monitor_10000" operation_key="set-rsc1_monitor_10000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.6.3" transition-key="12:15:0:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" transition-magic="0:0;12:15:0:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" exit-reason="" on_node="rhel7-3" call-id="46" rc-code="0" op-status="0" interval="10000" last-rc-change="1605212720" exec-time="34" queue-time="0" op-digest="72d8b1ceea915fac5b98d7c16b3cb81a" op-secure-params="  passwd  " op-secure-digest="740d07f67c20fca411b03996e601119e"/>
+          </lrm_resource>
+          <lrm_resource id="set-rsc2" type="Dummy" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="set-rsc2_last_0" operation_key="set-rsc2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.6.3" transition-key="21:0:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" transition-magic="0:7;21:0:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" exit-reason="" on_node="rhel7-3" call-id="29" rc-code="7" op-status="0" interval="0" last-rc-change="1605205770" last-run="1605205770" exec-time="33" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart="  envfile op_sleep passwd state  " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params="  passwd  " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+          </lrm_resource>
+        </lrm_resources>
+      </lrm>
+    </node_state>
+    <node_state id="4" uname="rhel7-4" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
+      <lrm id="4">
+        <lrm_resources>
+          <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+            <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.6.3" transition-key="22:0:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" transition-magic="0:7;22:0:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" exit-reason="" on_node="rhel7-4" call-id="10" rc-code="7" op-status="0" interval="0" last-rc-change="1605205770" last-run="1605205770" exec-time="7" queue-time="0" op-digest="422e90c96b7732222706b322138f43c8"/>
+          </lrm_resource>
+          <lrm_resource id="meta-rsc" type="Dummy" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="meta-rsc_last_0" operation_key="meta-rsc_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.6.3" transition-key="11:23:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" transition-magic="0:7;11:23:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" exit-reason="" on_node="rhel7-4" call-id="70" rc-code="7" op-status="0" interval="0" last-rc-change="1605215239" last-run="1605215239" exec-time="36" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart="  envfile op_sleep passwd state  " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params="  passwd  " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+          </lrm_resource>
+          <lrm_resource id="rsc1" type="Dummy" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="rsc1_last_0" operation_key="rsc1_stop_0" operation="stop" crm-debug-origin="do_update_resource" crm_feature_set="3.6.3" transition-key="11:33:0:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" transition-magic="0:0;11:33:0:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" exit-reason="" on_node="rhel7-4" call-id="80" rc-code="0" op-status="0" interval="0" last-rc-change="1605216481" last-run="1605216481" exec-time="77" queue-time="0" op-digest="740d07f67c20fca411b03996e601119e" op-force-restart="  envfile op_sleep passwd state  " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params="  passwd  " op-secure-digest="740d07f67c20fca411b03996e601119e"/>
+            <lrm_rsc_op id="rsc1_monitor_10000" operation_key="rsc1_monitor_10000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.6.3" transition-key="4:15:0:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" transition-magic="0:0;4:15:0:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" exit-reason="" on_node="rhel7-4" call-id="54" rc-code="0" op-status="0" interval="10000" last-rc-change="1605212720" exec-time="56" queue-time="0" op-digest="72d8b1ceea915fac5b98d7c16b3cb81a" op-secure-params="  passwd  " op-secure-digest="740d07f67c20fca411b03996e601119e"/>
+          </lrm_resource>
+          <lrm_resource id="rsc2" type="Dummy" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="rsc2_last_0" operation_key="rsc2_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.6.3" transition-key="12:18:0:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" transition-magic="0:0;12:18:0:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" exit-reason="" on_node="rhel7-4" call-id="60" rc-code="0" op-status="0" interval="0" last-rc-change="1605214609" last-run="1605214609" exec-time="29" queue-time="0" op-digest="e4a0cc96ccaaaa02bea1e0be9c4dee84"/>
+          </lrm_resource>
+          <lrm_resource id="invert-match" type="Dummy" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="invert-match_last_0" operation_key="invert-match_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.6.3" transition-key="25:0:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" transition-magic="0:7;25:0:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" exit-reason="" on_node="rhel7-4" call-id="22" rc-code="7" op-status="0" interval="0" last-rc-change="1605205770" last-run="1605205770" exec-time="69" queue-time="0" op-digest="1bd01c2c51e0e8a776ac0e84d5715d70" op-force-restart="  envfile op_sleep passwd state  " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params="  passwd  " op-secure-digest="1bd01c2c51e0e8a776ac0e84d5715d70"/>
+          </lrm_resource>
+          <lrm_resource id="single-rsc" type="Dummy" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="single-rsc_last_0" operation_key="single-rsc_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.6.3" transition-key="26:0:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" transition-magic="0:7;26:0:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" exit-reason="" on_node="rhel7-4" call-id="26" rc-code="7" op-status="0" interval="0" last-rc-change="1605205770" last-run="1605205770" exec-time="39" queue-time="0" op-digest="1bd01c2c51e0e8a776ac0e84d5715d70" op-force-restart="  envfile op_sleep passwd state  " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params="  passwd  " op-secure-digest="1bd01c2c51e0e8a776ac0e84d5715d70"/>
+          </lrm_resource>
+          <lrm_resource id="insane-rsc" type="Dummy" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="insane-rsc_last_0" operation_key="insane-rsc_stop_0" operation="stop" crm-debug-origin="do_update_resource" crm_feature_set="3.6.3" transition-key="18:33:0:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" transition-magic="0:0;18:33:0:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" exit-reason="" on_node="rhel7-4" call-id="88" rc-code="0" op-status="0" interval="0" last-rc-change="1605216481" last-run="1605216481" exec-time="39" queue-time="0" op-digest="e4a0cc96ccaaaa02bea1e0be9c4dee84" op-force-restart="  envfile op_sleep passwd state  " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params="  passwd  " op-secure-digest="e4a0cc96ccaaaa02bea1e0be9c4dee84"/>
+            <lrm_rsc_op id="insane-rsc_monitor_10000" operation_key="insane-rsc_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.6.3" transition-key="28:32:0:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" transition-magic="0:0;28:32:0:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" exit-reason="" on_node="rhel7-4" call-id="77" rc-code="0" op-status="0" interval="10000" last-rc-change="1605216258" exec-time="21" queue-time="0" op-digest="86322d62158bad207944738f0a1bac13" op-secure-params="  passwd  " op-secure-digest="e4a0cc96ccaaaa02bea1e0be9c4dee84"/>
+          </lrm_resource>
+          <lrm_resource id="set-rsc1" type="Dummy" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="set-rsc1_last_0" operation_key="set-rsc1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.6.3" transition-key="27:0:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" transition-magic="0:7;27:0:7:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" exit-reason="" on_node="rhel7-4" call-id="30" rc-code="7" op-status="0" interval="0" last-rc-change="1605205770" last-run="1605205770" exec-time="28" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart="  envfile op_sleep passwd state  " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params="  passwd  " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+          </lrm_resource>
+          <lrm_resource id="set-rsc2" type="Dummy" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="set-rsc2_last_0" operation_key="set-rsc2_stop_0" operation="stop" crm-debug-origin="do_update_resource" crm_feature_set="3.6.3" transition-key="16:33:0:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" transition-magic="0:0;16:33:0:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" exit-reason="" on_node="rhel7-4" call-id="83" rc-code="0" op-status="0" interval="0" last-rc-change="1605216481" last-run="1605216481" exec-time="71" queue-time="0" op-digest="e4a0cc96ccaaaa02bea1e0be9c4dee84" op-force-restart="  envfile op_sleep passwd state  " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params="  passwd  " op-secure-digest="e4a0cc96ccaaaa02bea1e0be9c4dee84"/>
+            <lrm_rsc_op id="set-rsc2_monitor_10000" operation_key="set-rsc2_monitor_10000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.6.3" transition-key="22:19:0:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" transition-magic="0:0;22:19:0:2b51cdb9-fb8b-43dd-905e-e7c0472fdaca" exit-reason="" on_node="rhel7-4" call-id="65" rc-code="0" op-status="0" interval="10000" last-rc-change="1605214850" exec-time="19" queue-time="0" op-digest="86322d62158bad207944738f0a1bac13" op-secure-params="  passwd  " op-secure-digest="e4a0cc96ccaaaa02bea1e0be9c4dee84"/>
+          </lrm_resource>
+        </lrm_resources>
+      </lrm>
+    </node_state>
+  </status>
+</cib>
diff --git a/daemons/execd/cts-exec-helper.c b/daemons/execd/cts-exec-helper.c
index 8be8e18fad..423b54d687 100644
--- a/daemons/execd/cts-exec-helper.c
+++ b/daemons/execd/cts-exec-helper.c
@@ -1,692 +1,690 @@
 /*
  * Copyright 2012-2020 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <glib.h>
 #include <unistd.h>
 
 #include <crm/crm.h>
 #include <crm/services.h>
 #include <crm/common/mainloop.h>
 
 #include <crm/pengine/status.h>
 #include <crm/pengine/internal.h>
 #include <crm/cib.h>
 #include <crm/lrmd.h>
 
 static pcmk__cli_option_t long_options[] = {
     // long option, argument type, storage, short option, description, flags
     {
         "help", no_argument, NULL, '?',
         NULL, pcmk__option_default
     },
     {
         "verbose", no_argument, NULL, 'V',
         "\t\tPrint out logs and events to screen", pcmk__option_default
     },
     {
         "quiet", no_argument, NULL, 'Q',
         "\t\tSuppress all output to screen", pcmk__option_default
     },
     {
         "tls", no_argument, NULL, 'S',
         "\t\tUse TLS backend for local connection", pcmk__option_default
     },
     {
         "listen", required_argument, NULL, 'l',
         "\tListen for a specific event string", pcmk__option_default
     },
     {
         "api-call", required_argument, NULL, 'c',
         "\tDirectly relates to executor API functions", pcmk__option_default
     },
     {
         "no-wait", no_argument, NULL, 'w',
         "\tMake api call and do not wait for result", pcmk__option_default
     },
     {
         "is-running", no_argument, NULL, 'R',
         "\tDetermine if a resource is registered and running",
         pcmk__option_default
     },
     {
         "notify-orig", no_argument, NULL, 'n',
         "\tOnly notify this client the results of an API action",
         pcmk__option_default
     },
     {
         "notify-changes", no_argument, NULL, 'o',
         "\tOnly notify client changes to recurring operations",
         pcmk__option_default
     },
     {
         "-spacer-", no_argument, NULL, '-',
         "\nParameters for api-call option", pcmk__option_default
     },
     {
         "action", required_argument, NULL, 'a',
         NULL, pcmk__option_default
     },
     {
         "rsc-id", required_argument, NULL, 'r',
         NULL, pcmk__option_default
     },
     {
         "cancel-call-id", required_argument, NULL, 'x',
         NULL, pcmk__option_default
     },
     {
         "provider", required_argument, NULL, 'P',
         NULL, pcmk__option_default
     },
     {
         "class", required_argument, NULL, 'C',
         NULL, pcmk__option_default
     },
     {
         "type", required_argument, NULL, 'T',
         NULL, pcmk__option_default
     },
     {
         "interval", required_argument, NULL, 'i',
         NULL, pcmk__option_default
     },
     {
         "timeout", required_argument, NULL, 't',
         NULL, pcmk__option_default
     },
     {
         "start-delay", required_argument, NULL, 's',
         NULL, pcmk__option_default
     },
     {
         "param-key", required_argument, NULL, 'k',
         NULL, pcmk__option_default
     },
     {
         "param-val", required_argument, NULL, 'v',
         NULL, pcmk__option_default
     },
     {
         "-spacer-", no_argument, NULL, '-',
         NULL, pcmk__option_default
     },
     { 0, 0, 0, 0 }
 };
 
 static cib_t *cib_conn = NULL;
 static int exec_call_id = 0;
 static int exec_call_opts = 0;
 static gboolean start_test(gpointer user_data);
 static void try_connect(void);
 
 static struct {
     int verbose;
     int quiet;
     guint interval_ms;
     int timeout;
     int start_delay;
     int cancel_call_id;
     int no_wait;
     int is_running;
     int no_connect;
     const char *api_call;
     const char *rsc_id;
     const char *provider;
     const char *class;
     const char *type;
     const char *action;
     const char *listen;
     lrmd_key_value_t *params;
 } options;
 
 static GMainLoop *mainloop = NULL;
 static lrmd_t *lrmd_conn = NULL;
 
 static char event_buf_v0[1024];
 
 static void
 test_exit(crm_exit_t exit_code)
 {
     lrmd_api_delete(lrmd_conn);
     crm_exit(exit_code);
 }
 
 #define print_result(result) \
     if (!options.quiet) {    \
         result;              \
     }                        \
 
 #define report_event(event)                                             \
     snprintf(event_buf_v0, sizeof(event_buf_v0), "NEW_EVENT event_type:%s rsc_id:%s action:%s rc:%s op_status:%s", \
              lrmd_event_type2str(event->type),                          \
              event->rsc_id,                                             \
              event->op_type ? event->op_type : "none",                  \
              services_ocf_exitcode_str(event->rc),                              \
              services_lrm_status_str(event->op_status));                \
     crm_info("%s", event_buf_v0);
 
 static void
 test_shutdown(int nsig)
 {
     lrmd_api_delete(lrmd_conn);
     lrmd_conn = NULL;
 }
 
 static void
 read_events(lrmd_event_data_t * event)
 {
     report_event(event);
     if (options.listen) {
         if (pcmk__str_eq(options.listen, event_buf_v0, pcmk__str_casei)) {
             print_result(printf("LISTEN EVENT SUCCESSFUL\n"));
             test_exit(CRM_EX_OK);
         }
     }
 
     if (exec_call_id && (event->call_id == exec_call_id)) {
         if (event->op_status == 0 && event->rc == 0) {
             print_result(printf("API-CALL SUCCESSFUL for 'exec'\n"));
         } else {
             print_result(printf("API-CALL FAILURE for 'exec', rc:%d lrmd_op_status:%s\n",
                                 event->rc, services_lrm_status_str(event->op_status)));
             test_exit(CRM_EX_ERROR);
         }
 
         if (!options.listen) {
             test_exit(CRM_EX_OK);
         }
     }
 }
 
 static gboolean
 timeout_err(gpointer data)
 {
     print_result(printf("LISTEN EVENT FAILURE - timeout occurred, never found.\n"));
     test_exit(CRM_EX_TIMEOUT);
     return FALSE;
 }
 
 static void
 connection_events(lrmd_event_data_t * event)
 {
     int rc = event->connection_rc;
 
     if (event->type != lrmd_event_connect) {
         /* ignore */
         return;
     }
 
     if (!rc) {
         crm_info("Executor client connection established");
         start_test(NULL);
         return;
     } else {
         sleep(1);
         try_connect();
         crm_notice("Executor client connection failed");
     }
 }
 
 static void
 try_connect(void)
 {
     int tries = 10;
     static int num_tries = 0;
     int rc = 0;
 
     lrmd_conn->cmds->set_callback(lrmd_conn, connection_events);
     for (; num_tries < tries; num_tries++) {
         rc = lrmd_conn->cmds->connect_async(lrmd_conn, "pacemaker-execd", 3000);
 
         if (!rc) {
             return;             /* we'll hear back in async callback */
         }
         sleep(1);
     }
 
     print_result(printf("API CONNECTION FAILURE\n"));
     test_exit(CRM_EX_ERROR);
 }
 
 static gboolean
 start_test(gpointer user_data)
 {
     int rc = 0;
 
     if (!options.no_connect) {
         if (!lrmd_conn->cmds->is_connected(lrmd_conn)) {
             try_connect();
             /* async connect -- this function will get called back into */
             return 0;
         }
     }
     lrmd_conn->cmds->set_callback(lrmd_conn, read_events);
 
     if (options.timeout) {
         g_timeout_add(options.timeout, timeout_err, NULL);
     }
 
     if (!options.api_call) {
         return 0;
     }
 
     if (pcmk__str_eq(options.api_call, "exec", pcmk__str_casei)) {
         rc = lrmd_conn->cmds->exec(lrmd_conn,
                                    options.rsc_id,
                                    options.action,
                                    NULL,
                                    options.interval_ms,
                                    options.timeout,
                                    options.start_delay, exec_call_opts, options.params);
 
         if (rc > 0) {
             exec_call_id = rc;
             print_result(printf("API-CALL 'exec' action pending, waiting on response\n"));
         }
 
     } else if (pcmk__str_eq(options.api_call, "register_rsc", pcmk__str_casei)) {
         rc = lrmd_conn->cmds->register_rsc(lrmd_conn,
                                            options.rsc_id,
                                            options.class, options.provider, options.type, 0);
     } else if (pcmk__str_eq(options.api_call, "get_rsc_info", pcmk__str_casei)) {
         lrmd_rsc_info_t *rsc_info;
 
         rsc_info = lrmd_conn->cmds->get_rsc_info(lrmd_conn, options.rsc_id, 0);
 
         if (rsc_info) {
             print_result(printf("RSC_INFO: id:%s class:%s provider:%s type:%s\n",
                                 rsc_info->id, rsc_info->standard,
                                 rsc_info->provider ? rsc_info->provider : "<none>",
                                 rsc_info->type));
             lrmd_free_rsc_info(rsc_info);
             rc = pcmk_ok;
         } else {
             rc = -1;
         }
     } else if (pcmk__str_eq(options.api_call, "unregister_rsc", pcmk__str_casei)) {
         rc = lrmd_conn->cmds->unregister_rsc(lrmd_conn, options.rsc_id, 0);
     } else if (pcmk__str_eq(options.api_call, "cancel", pcmk__str_casei)) {
         rc = lrmd_conn->cmds->cancel(lrmd_conn, options.rsc_id, options.action,
                                      options.interval_ms);
     } else if (pcmk__str_eq(options.api_call, "metadata", pcmk__str_casei)) {
         char *output = NULL;
 
         rc = lrmd_conn->cmds->get_metadata(lrmd_conn,
                                            options.class,
                                            options.provider, options.type, &output, 0);
         if (rc == pcmk_ok) {
             print_result(printf("%s", output));
             free(output);
         }
     } else if (pcmk__str_eq(options.api_call, "list_agents", pcmk__str_casei)) {
         lrmd_list_t *list = NULL;
         lrmd_list_t *iter = NULL;
 
         rc = lrmd_conn->cmds->list_agents(lrmd_conn, &list, options.class, options.provider);
 
         if (rc > 0) {
             print_result(printf("%d agents found\n", rc));
             for (iter = list; iter != NULL; iter = iter->next) {
                 print_result(printf("%s\n", iter->val));
             }
             lrmd_list_freeall(list);
             rc = 0;
         } else {
             print_result(printf("API_CALL FAILURE - no agents found\n"));
             rc = -1;
         }
     } else if (pcmk__str_eq(options.api_call, "list_ocf_providers", pcmk__str_casei)) {
         lrmd_list_t *list = NULL;
         lrmd_list_t *iter = NULL;
 
         rc = lrmd_conn->cmds->list_ocf_providers(lrmd_conn, options.type, &list);
 
         if (rc > 0) {
             print_result(printf("%d providers found\n", rc));
             for (iter = list; iter != NULL; iter = iter->next) {
                 print_result(printf("%s\n", iter->val));
             }
             lrmd_list_freeall(list);
             rc = 0;
         } else {
             print_result(printf("API_CALL FAILURE - no providers found\n"));
             rc = -1;
         }
 
     } else if (pcmk__str_eq(options.api_call, "list_standards", pcmk__str_casei)) {
         lrmd_list_t *list = NULL;
         lrmd_list_t *iter = NULL;
 
         rc = lrmd_conn->cmds->list_standards(lrmd_conn, &list);
 
         if (rc > 0) {
             print_result(printf("%d standards found\n", rc));
             for (iter = list; iter != NULL; iter = iter->next) {
                 print_result(printf("%s\n", iter->val));
             }
             lrmd_list_freeall(list);
             rc = 0;
         } else {
             print_result(printf("API_CALL FAILURE - no providers found\n"));
             rc = -1;
         }
 
     } else if (pcmk__str_eq(options.api_call, "get_recurring_ops", pcmk__str_casei)) {
         GList *op_list = NULL;
         GList *op_item = NULL;
         rc = lrmd_conn->cmds->get_recurring_ops(lrmd_conn, options.rsc_id, 0, 0,
                                                 &op_list);
 
         for (op_item = op_list; op_item != NULL; op_item = op_item->next) {
             lrmd_op_info_t *op_info = op_item->data;
 
             print_result(printf("RECURRING_OP: %s_%s_%s timeout=%sms\n",
                                 op_info->rsc_id, op_info->action,
                                 op_info->interval_ms_s, op_info->timeout_ms_s));
             lrmd_free_op_info(op_info);
         }
         g_list_free(op_list);
 
     } else if (options.api_call) {
         print_result(printf("API-CALL FAILURE unknown action '%s'\n", options.action));
         test_exit(CRM_EX_ERROR);
     }
 
     if (rc < 0) {
         print_result(printf("API-CALL FAILURE for '%s' api_rc:%d\n", options.api_call, rc));
         test_exit(CRM_EX_ERROR);
     }
 
     if (options.api_call && rc == pcmk_ok) {
         print_result(printf("API-CALL SUCCESSFUL for '%s'\n", options.api_call));
         if (!options.listen) {
             test_exit(CRM_EX_OK);
         }
     }
 
     if (options.no_wait) {
         /* just make the call and exit regardless of anything else. */
         test_exit(CRM_EX_OK);
     }
 
     return 0;
 }
 
 static int
 generate_params(void)
 {
     int rc = 0;
     pe_working_set_t *data_set = NULL;
     xmlNode *cib_xml_copy = NULL;
     pe_resource_t *rsc = NULL;
     GHashTable *params = NULL;
     GHashTable *meta = NULL;
     GHashTableIter iter;
 
     if (options.params) {
         return 0;
     }
 
     data_set = pe_new_working_set();
     if (data_set == NULL) {
         crm_crit("Could not allocate working set");
         return -ENOMEM;
     }
     pe__set_working_set_flags(data_set, pe_flag_no_counts|pe_flag_no_compat);
 
     cib_conn = cib_new();
     rc = cib_conn->cmds->signon(cib_conn, "cts-exec-helper", cib_query);
     if (rc != pcmk_ok) {
         crm_err("Could not connect to the CIB manager: %s", pcmk_strerror(rc));
         rc = -1;
         goto param_gen_bail;
     }
 
     rc = cib_conn->cmds->query(cib_conn, NULL, &cib_xml_copy, cib_scope_local | cib_sync_call);
     if (rc != pcmk_ok) {
         crm_err("Error retrieving cib copy: %s (%d)", pcmk_strerror(rc), rc);
         goto param_gen_bail;
 
     } else if (cib_xml_copy == NULL) {
         rc = -ENODATA;
         crm_err("Error retrieving cib copy: %s (%d)", pcmk_strerror(rc), rc);
         goto param_gen_bail;
     }
 
     if (cli_config_update(&cib_xml_copy, NULL, FALSE) == FALSE) {
         crm_err("Error updating cib configuration");
         rc = -1;
         goto param_gen_bail;
     }
 
     data_set->input = cib_xml_copy;
     data_set->now = crm_time_new(NULL);
 
     cluster_status(data_set);
     if (options.rsc_id) {
         rsc = pe_find_resource_with_flags(data_set->resources, options.rsc_id,
                                           pe_find_renamed|pe_find_any);
     }
 
     if (!rsc) {
         crm_err("Resource does not exist in config");
         rc = -1;
         goto param_gen_bail;
     }
 
-    params = crm_str_table_new();
+    params = pe_rsc_params(rsc, NULL, data_set);
     meta = crm_str_table_new();
 
-    get_rsc_attributes(params, rsc, NULL, data_set);
     get_meta_attributes(meta, rsc, NULL, data_set);
 
-    if (params) {
+    if (params != NULL) {
         char *key = NULL;
         char *value = NULL;
 
         g_hash_table_iter_init(&iter, params);
         while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) {
             options.params = lrmd_key_value_add(options.params, key, value);
         }
-        g_hash_table_destroy(params);
     }
 
     if (meta) {
         char *key = NULL;
         char *value = NULL;
 
         g_hash_table_iter_init(&iter, meta);
         while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) {
             char *crm_name = crm_meta_name(key);
 
             options.params = lrmd_key_value_add(options.params, crm_name, value);
             free(crm_name);
         }
         g_hash_table_destroy(meta);
     }
 
   param_gen_bail:
     pe_free_working_set(data_set);
     return rc;
 }
 
 int
 main(int argc, char **argv)
 {
     int option_index = 0;
     int argerr = 0;
     int flag;
     char *key = NULL;
     char *val = NULL;
     gboolean use_tls = FALSE;
     crm_trigger_t *trig;
 
     crm_log_cli_init("cts-exec-helper");
     pcmk__set_cli_options(NULL, "<mode> [options]", long_options,
                           "inject commands into the Pacemaker executor, "
                           "and watch for events");
 
     while (1) {
         flag = pcmk__next_cli_option(argc, argv, &option_index, NULL);
         if (flag == -1)
             break;
 
         switch (flag) {
             case '?':
                 pcmk__cli_help(flag, CRM_EX_OK);
                 break;
             case 'V':
                 ++options.verbose;
                 crm_bump_log_level(argc, argv);
                 break;
             case 'Q':
                 options.quiet = 1;
                 options.verbose = 0;
                 break;
             case 'l':
                 options.listen = optarg;
                 break;
             case 'w':
                 options.no_wait = 1;
                 break;
             case 'R':
                 options.is_running = 1;
                 break;
             case 'n':
                 exec_call_opts = lrmd_opt_notify_orig_only;
                 break;
             case 'o':
                 exec_call_opts = lrmd_opt_notify_changes_only;
                 break;
             case 'c':
                 options.api_call = optarg;
                 break;
             case 'a':
                 options.action = optarg;
                 break;
             case 'r':
                 options.rsc_id = optarg;
                 break;
             case 'x':
                 if(optarg) {
                     options.cancel_call_id = atoi(optarg);
                 }
                 break;
             case 'P':
                 options.provider = optarg;
                 break;
             case 'C':
                 options.class = optarg;
                 break;
             case 'T':
                 options.type = optarg;
                 break;
             case 'i':
                 if(optarg) {
                     options.interval_ms = crm_parse_interval_spec(optarg);
                 }
                 break;
             case 't':
                 if(optarg) {
                     options.timeout = atoi(optarg);
                 }
                 break;
             case 's':
                 if(optarg) {
                     options.start_delay = atoi(optarg);
                 }
                 break;
             case 'k':
                 key = optarg;
                 if (key && val) {
                     options.params = lrmd_key_value_add(options.params, key, val);
                     key = val = NULL;
                 }
                 break;
             case 'v':
                 val = optarg;
                 if (key && val) {
                     options.params = lrmd_key_value_add(options.params, key, val);
                     key = val = NULL;
                 }
                 break;
             case 'S':
                 use_tls = TRUE;
                 break;
             default:
                 ++argerr;
                 break;
         }
     }
 
     if (argerr) {
         pcmk__cli_help('?', CRM_EX_USAGE);
     }
     if (optind > argc) {
         ++argerr;
     }
 
     if (!options.listen && pcmk__strcase_any_of(options.api_call, "metadata", "list_agents",
                                                 "list_standards", "list_ocf_providers", NULL)) {
         options.no_connect = 1;
     }
 
     crm_log_init(NULL, LOG_INFO, TRUE, (options.verbose? TRUE : FALSE),
                  argc, argv, FALSE);
 
     if (options.is_running) {
         if (!options.timeout) {
             options.timeout = 30000;
         }
         options.interval_ms = 0;
         if (!options.rsc_id) {
             crm_err("rsc-id must be given when is-running is used");
             test_exit(CRM_EX_ERROR);
         }
 
         if (generate_params()) {
             print_result(printf
                          ("Failed to retrieve rsc parameters from cib, can not determine if rsc is running.\n"));
             test_exit(CRM_EX_ERROR);
         }
         options.api_call = "exec";
         options.action = "monitor";
         exec_call_opts = lrmd_opt_notify_orig_only;
     }
 
     /* if we can't perform an api_call or listen for events, 
      * there is nothing to do */
     if (!options.api_call && !options.listen) {
         crm_err("Nothing to be done.  Please specify 'api-call' and/or 'listen'");
         return CRM_EX_OK;
     }
 
     if (use_tls) {
         lrmd_conn = lrmd_remote_api_new(NULL, "localhost", 0);
     } else {
         lrmd_conn = lrmd_api_new();
     }
     trig = mainloop_add_trigger(G_PRIORITY_HIGH, start_test, NULL);
     mainloop_set_trigger(trig);
     mainloop_add_signal(SIGTERM, test_shutdown);
 
     crm_info("Starting");
     mainloop = g_main_loop_new(NULL, FALSE);
     g_main_loop_run(mainloop);
 
     if (cib_conn != NULL) {
         cib_conn->cmds->signoff(cib_conn);
         cib_delete(cib_conn);
     }
 
     test_exit(CRM_EX_OK);
     return CRM_EX_OK;
 }
diff --git a/daemons/fenced/pacemaker-fenced.c b/daemons/fenced/pacemaker-fenced.c
index 69c29a74bf..5390d66720 100644
--- a/daemons/fenced/pacemaker-fenced.c
+++ b/daemons/fenced/pacemaker-fenced.c
@@ -1,1549 +1,1550 @@
 /*
  * Copyright 2009-2020 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <sys/param.h>
 #include <stdio.h>
 #include <sys/types.h>
 #include <sys/stat.h>
 #include <unistd.h>
 #include <sys/utsname.h>
 
 #include <stdlib.h>
 #include <errno.h>
 #include <fcntl.h>
 #include <inttypes.h>  // PRIu32, PRIx32
 
 #include <crm/crm.h>
 #include <crm/msg_xml.h>
 #include <crm/common/ipc.h>
 #include <crm/common/ipc_internal.h>
 #include <crm/cluster/internal.h>
 
 #include <crm/stonith-ng.h>
 #include <crm/fencing/internal.h>
 #include <crm/common/xml.h>
 #include <crm/common/xml_internal.h>
 
 #include <crm/common/mainloop.h>
 
 #include <crm/cib/internal.h>
 #include <crm/pengine/status.h>
 #include <pacemaker-internal.h>
 
 #include <pacemaker-fenced.h>
 
 char *stonith_our_uname = NULL;
 char *stonith_our_uuid = NULL;
 long stonith_watchdog_timeout_ms = 0;
 
 static GMainLoop *mainloop = NULL;
 
 gboolean stand_alone = FALSE;
 static gboolean no_cib_connect = FALSE;
 static gboolean stonith_shutdown_flag = FALSE;
 
 static qb_ipcs_service_t *ipcs = NULL;
 static xmlNode *local_cib = NULL;
 static pe_working_set_t *fenced_data_set = NULL;
 
 static cib_t *cib_api = NULL;
 
 static void stonith_shutdown(int nsig);
 static void stonith_cleanup(void);
 
 static int32_t
 st_ipc_accept(qb_ipcs_connection_t * c, uid_t uid, gid_t gid)
 {
     if (stonith_shutdown_flag) {
         crm_info("Ignoring new client [%d] during shutdown",
                  pcmk__client_pid(c));
         return -EPERM;
     }
 
     if (pcmk__new_client(c, uid, gid) == NULL) {
         return -EIO;
     }
     return 0;
 }
 
 /* Exit code means? */
 static int32_t
 st_ipc_dispatch(qb_ipcs_connection_t * qbc, void *data, size_t size)
 {
     uint32_t id = 0;
     uint32_t flags = 0;
     int call_options = 0;
     xmlNode *request = NULL;
     pcmk__client_t *c = pcmk__find_client(qbc);
     const char *op = NULL;
 
     if (c == NULL) {
         crm_info("Invalid client: %p", qbc);
         return 0;
     }
 
     request = pcmk__client_data2xml(c, data, &id, &flags);
     if (request == NULL) {
         pcmk__ipc_send_ack(c, id, flags, "nack", CRM_EX_PROTOCOL);
         return 0;
     }
 
 
     op = crm_element_value(request, F_CRM_TASK);
     if(pcmk__str_eq(op, CRM_OP_RM_NODE_CACHE, pcmk__str_casei)) {
         crm_xml_add(request, F_TYPE, T_STONITH_NG);
         crm_xml_add(request, F_STONITH_OPERATION, op);
         crm_xml_add(request, F_STONITH_CLIENTID, c->id);
         crm_xml_add(request, F_STONITH_CLIENTNAME, pcmk__client_name(c));
         crm_xml_add(request, F_STONITH_CLIENTNODE, stonith_our_uname);
 
         send_cluster_message(NULL, crm_msg_stonith_ng, request, FALSE);
         free_xml(request);
         return 0;
     }
 
     if (c->name == NULL) {
         const char *value = crm_element_value(request, F_STONITH_CLIENTNAME);
 
         if (value == NULL) {
             value = "unknown";
         }
         c->name = crm_strdup_printf("%s.%u", value, c->pid);
     }
 
     crm_element_value_int(request, F_STONITH_CALLOPTS, &call_options);
     crm_trace("Flags 0x%08" PRIx32 "/0x%08x for command %" PRIu32
               " from client %s", flags, call_options, id, pcmk__client_name(c));
 
     if (pcmk_is_set(call_options, st_opt_sync_call)) {
         CRM_ASSERT(flags & crm_ipc_client_response);
         CRM_LOG_ASSERT(c->request_id == 0);     /* This means the client has two synchronous events in-flight */
         c->request_id = id;     /* Reply only to the last one */
     }
 
     crm_xml_add(request, F_STONITH_CLIENTID, c->id);
     crm_xml_add(request, F_STONITH_CLIENTNAME, pcmk__client_name(c));
     crm_xml_add(request, F_STONITH_CLIENTNODE, stonith_our_uname);
 
     stonith_command(c, id, flags, request, NULL);
 
     free_xml(request);
     return 0;
 }
 
 /* Error code means? */
 static int32_t
 st_ipc_closed(qb_ipcs_connection_t * c)
 {
     pcmk__client_t *client = pcmk__find_client(c);
 
     if (client == NULL) {
         return 0;
     }
 
     crm_trace("Connection %p closed", c);
     pcmk__free_client(client);
 
     /* 0 means: yes, go ahead and destroy the connection */
     return 0;
 }
 
 static void
 st_ipc_destroy(qb_ipcs_connection_t * c)
 {
     crm_trace("Connection %p destroyed", c);
     st_ipc_closed(c);
 }
 
 static void
 stonith_peer_callback(xmlNode * msg, void *private_data)
 {
     const char *remote_peer = crm_element_value(msg, F_ORIG);
     const char *op = crm_element_value(msg, F_STONITH_OPERATION);
 
     if (pcmk__str_eq(op, "poke", pcmk__str_none)) {
         return;
     }
 
     crm_log_xml_trace(msg, "Peer[inbound]");
     stonith_command(NULL, 0, 0, msg, remote_peer);
 }
 
 #if SUPPORT_COROSYNC
 static void
 stonith_peer_ais_callback(cpg_handle_t handle,
                           const struct cpg_name *groupName,
                           uint32_t nodeid, uint32_t pid, void *msg, size_t msg_len)
 {
     uint32_t kind = 0;
     xmlNode *xml = NULL;
     const char *from = NULL;
     char *data = pcmk_message_common_cs(handle, nodeid, pid, msg, &kind, &from);
 
     if(data == NULL) {
         return;
     }
     if (kind == crm_class_cluster) {
         xml = string2xml(data);
         if (xml == NULL) {
             crm_err("Invalid XML: '%.120s'", data);
             free(data);
             return;
         }
         crm_xml_add(xml, F_ORIG, from);
         /* crm_xml_add_int(xml, F_SEQ, wrapper->id); */
         stonith_peer_callback(xml, NULL);
     }
 
     free_xml(xml);
     free(data);
     return;
 }
 
 static void
 stonith_peer_cs_destroy(gpointer user_data)
 {
     crm_crit("Lost connection to cluster layer, shutting down");
     stonith_shutdown(0);
 }
 #endif
 
 void
 do_local_reply(xmlNode * notify_src, const char *client_id, gboolean sync_reply, gboolean from_peer)
 {
     /* send callback to originating child */
     pcmk__client_t *client_obj = NULL;
     int local_rc = pcmk_rc_ok;
 
     crm_trace("Sending response");
     client_obj = pcmk__find_client_by_id(client_id);
 
     crm_trace("Sending callback to request originator");
     if (client_obj == NULL) {
         local_rc = EPROTO;
         crm_trace("No client to sent the response to.  F_STONITH_CLIENTID not set.");
 
     } else {
         int rid = 0;
 
         if (sync_reply) {
             CRM_LOG_ASSERT(client_obj->request_id);
 
             rid = client_obj->request_id;
             client_obj->request_id = 0;
 
             crm_trace("Sending response %d to client %s%s",
                       rid, pcmk__client_name(client_obj),
                       (from_peer? " (originator of delegated request)" : ""));
 
         } else {
             crm_trace("Sending an event to client %s%s",
                       pcmk__client_name(client_obj),
                       (from_peer? " (originator of delegated request)" : ""));
         }
 
         local_rc = pcmk__ipc_send_xml(client_obj, rid, notify_src,
                                       (sync_reply? crm_ipc_flags_none
                                        : crm_ipc_server_event));
     }
 
     if ((local_rc != pcmk_rc_ok) && (client_obj != NULL)) {
         crm_warn("%s reply to client %s failed: %s",
                  (sync_reply? "Synchronous" : "Asynchronous"),
                  pcmk__client_name(client_obj), pcmk_rc_str(local_rc));
     }
 }
 
 uint64_t
 get_stonith_flag(const char *name)
 {
     if (pcmk__str_eq(name, T_STONITH_NOTIFY_FENCE, pcmk__str_casei)) {
         return st_callback_notify_fence;
 
     } else if (pcmk__str_eq(name, STONITH_OP_DEVICE_ADD, pcmk__str_casei)) {
         return st_callback_device_add;
 
     } else if (pcmk__str_eq(name, STONITH_OP_DEVICE_DEL, pcmk__str_casei)) {
         return st_callback_device_del;
 
     } else if (pcmk__str_eq(name, T_STONITH_NOTIFY_HISTORY, pcmk__str_casei)) {
         return st_callback_notify_history;
 
     } else if (pcmk__str_eq(name, T_STONITH_NOTIFY_HISTORY_SYNCED, pcmk__str_casei)) {
         return st_callback_notify_history_synced;
 
     }
     return st_callback_unknown;
 }
 
 static void
 stonith_notify_client(gpointer key, gpointer value, gpointer user_data)
 {
 
     xmlNode *update_msg = user_data;
     pcmk__client_t *client = value;
     const char *type = NULL;
 
     CRM_CHECK(client != NULL, return);
     CRM_CHECK(update_msg != NULL, return);
 
     type = crm_element_value(update_msg, F_SUBTYPE);
     CRM_CHECK(type != NULL, crm_log_xml_err(update_msg, "notify"); return);
 
     if (client->ipcs == NULL) {
         crm_trace("Skipping client with NULL channel");
         return;
     }
 
     if (pcmk_is_set(client->flags, get_stonith_flag(type))) {
         int rc = pcmk__ipc_send_xml(client, 0, update_msg,
                                     crm_ipc_server_event|crm_ipc_server_error);
 
         if (rc != pcmk_rc_ok) {
             crm_warn("%s notification of client %s failed: %s "
                      CRM_XS " id=%.8s rc=%d", type, pcmk__client_name(client),
                      pcmk_rc_str(rc), client->id, rc);
         } else {
             crm_trace("Sent %s notification to client %s",
                       type, pcmk__client_name(client));
         }
     }
 }
 
 void
 do_stonith_async_timeout_update(const char *client_id, const char *call_id, int timeout)
 {
     pcmk__client_t *client = NULL;
     xmlNode *notify_data = NULL;
 
     if (!timeout || !call_id || !client_id) {
         return;
     }
 
     client = pcmk__find_client_by_id(client_id);
     if (!client) {
         return;
     }
 
     notify_data = create_xml_node(NULL, T_STONITH_TIMEOUT_VALUE);
     crm_xml_add(notify_data, F_TYPE, T_STONITH_TIMEOUT_VALUE);
     crm_xml_add(notify_data, F_STONITH_CALLID, call_id);
     crm_xml_add_int(notify_data, F_STONITH_TIMEOUT, timeout);
 
     crm_trace("timeout update is %d for client %s and call id %s", timeout, client_id, call_id);
 
     if (client) {
         pcmk__ipc_send_xml(client, 0, notify_data, crm_ipc_server_event);
     }
 
     free_xml(notify_data);
 }
 
 void
 do_stonith_notify(int options, const char *type, int result, xmlNode * data)
 {
     /* TODO: Standardize the contents of data */
     xmlNode *update_msg = create_xml_node(NULL, "notify");
 
     CRM_CHECK(type != NULL,;);
 
     crm_xml_add(update_msg, F_TYPE, T_STONITH_NOTIFY);
     crm_xml_add(update_msg, F_SUBTYPE, type);
     crm_xml_add(update_msg, F_STONITH_OPERATION, type);
     crm_xml_add_int(update_msg, F_STONITH_RC, result);
 
     if (data != NULL) {
         add_message_xml(update_msg, F_STONITH_CALLDATA, data);
     }
 
     crm_trace("Notifying clients");
     pcmk__foreach_ipc_client(stonith_notify_client, update_msg);
     free_xml(update_msg);
     crm_trace("Notify complete");
 }
 
 static void
 do_stonith_notify_config(int options, const char *op, int rc,
                          const char *desc, int active)
 {
     xmlNode *notify_data = create_xml_node(NULL, op);
 
     CRM_CHECK(notify_data != NULL, return);
 
     crm_xml_add(notify_data, F_STONITH_DEVICE, desc);
     crm_xml_add_int(notify_data, F_STONITH_ACTIVE, active);
 
     do_stonith_notify(options, op, rc, notify_data);
     free_xml(notify_data);
 }
 
 void
 do_stonith_notify_device(int options, const char *op, int rc, const char *desc)
 {
     do_stonith_notify_config(options, op, rc, desc, g_hash_table_size(device_list));
 }
 
 void
 do_stonith_notify_level(int options, const char *op, int rc, const char *desc)
 {
     do_stonith_notify_config(options, op, rc, desc, g_hash_table_size(topology));
 }
 
 static void
 topology_remove_helper(const char *node, int level)
 {
     int rc;
     char *desc = NULL;
     xmlNode *data = create_xml_node(NULL, XML_TAG_FENCING_LEVEL);
 
     crm_xml_add(data, F_STONITH_ORIGIN, __func__);
     crm_xml_add_int(data, XML_ATTR_STONITH_INDEX, level);
     crm_xml_add(data, XML_ATTR_STONITH_TARGET, node);
 
     rc = stonith_level_remove(data, &desc);
     do_stonith_notify_level(0, STONITH_OP_LEVEL_DEL, rc, desc);
 
     free_xml(data);
     free(desc);
 }
 
 static void
 remove_cib_device(xmlXPathObjectPtr xpathObj)
 {
     int max = numXpathResults(xpathObj), lpc = 0;
 
     for (lpc = 0; lpc < max; lpc++) {
         const char *rsc_id = NULL;
         const char *standard = NULL;
         xmlNode *match = getXpathResult(xpathObj, lpc);
 
         CRM_LOG_ASSERT(match != NULL);
         if(match != NULL) {
             standard = crm_element_value(match, XML_AGENT_ATTR_CLASS);
         }
 
         if (!pcmk__str_eq(standard, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) {
             continue;
         }
 
         rsc_id = crm_element_value(match, XML_ATTR_ID);
 
         stonith_device_remove(rsc_id, TRUE);
     }
 }
 
 static void
 handle_topology_change(xmlNode *match, bool remove) 
 {
     int rc;
     char *desc = NULL;
 
     CRM_CHECK(match != NULL, return);
     crm_trace("Updating %s", ID(match));
 
     if(remove) {
         int index = 0;
         char *key = stonith_level_key(match, -1);
 
         crm_element_value_int(match, XML_ATTR_STONITH_INDEX, &index);
         topology_remove_helper(key, index);
         free(key);
     }
 
     rc = stonith_level_register(match, &desc);
     do_stonith_notify_level(0, STONITH_OP_LEVEL_ADD, rc, desc);
 
     free(desc);
 }
 
 static void
 remove_fencing_topology(xmlXPathObjectPtr xpathObj)
 {
     int max = numXpathResults(xpathObj), lpc = 0;
 
     for (lpc = 0; lpc < max; lpc++) {
         xmlNode *match = getXpathResult(xpathObj, lpc);
 
         CRM_LOG_ASSERT(match != NULL);
         if (match && crm_element_value(match, XML_DIFF_MARKER)) {
             /* Deletion */
             int index = 0;
             char *target = stonith_level_key(match, -1);
 
             crm_element_value_int(match, XML_ATTR_STONITH_INDEX, &index);
             if (target == NULL) {
                 crm_err("Invalid fencing target in element %s", ID(match));
 
             } else if (index <= 0) {
                 crm_err("Invalid level for %s in element %s", target, ID(match));
 
             } else {
                 topology_remove_helper(target, index);
             }
             /* } else { Deal with modifications during the 'addition' stage */
         }
     }
 }
 
 static void
 register_fencing_topology(xmlXPathObjectPtr xpathObj)
 {
     int max = numXpathResults(xpathObj), lpc = 0;
 
     for (lpc = 0; lpc < max; lpc++) {
         xmlNode *match = getXpathResult(xpathObj, lpc);
 
         handle_topology_change(match, TRUE);
     }
 }
 
 /* Fencing
 <diff crm_feature_set="3.0.6">
   <diff-removed>
     <fencing-topology>
       <fencing-level id="f-p1.1" target="pcmk-1" index="1" devices="poison-pill" __crm_diff_marker__="removed:top"/>
       <fencing-level id="f-p1.2" target="pcmk-1" index="2" devices="power" __crm_diff_marker__="removed:top"/>
       <fencing-level devices="disk,network" id="f-p2.1"/>
     </fencing-topology>
   </diff-removed>
   <diff-added>
     <fencing-topology>
       <fencing-level id="f-p.1" target="pcmk-1" index="1" devices="poison-pill" __crm_diff_marker__="added:top"/>
       <fencing-level id="f-p2.1" target="pcmk-2" index="1" devices="disk,something"/>
       <fencing-level id="f-p3.1" target="pcmk-2" index="2" devices="power" __crm_diff_marker__="added:top"/>
     </fencing-topology>
   </diff-added>
 </diff>
 */
 
 static void
 fencing_topology_init(void)
 {
     xmlXPathObjectPtr xpathObj = NULL;
     const char *xpath = "//" XML_TAG_FENCING_LEVEL;
 
     crm_trace("Full topology refresh");
     free_topology_list();
     init_topology_list();
 
     /* Grab everything */
     xpathObj = xpath_search(local_cib, xpath);
     register_fencing_topology(xpathObj);
 
     freeXpathObject(xpathObj);
 }
 
 #define rsc_name(x) x->clone_name?x->clone_name:x->id
 
 /*!
  * \internal
  * \brief Check whether our uname is in a resource's allowed node list
  *
  * \param[in] rsc  Resource to check
  *
  * \return Pointer to node object if found, NULL otherwise
  */
 static pe_node_t *
 our_node_allowed_for(pe_resource_t *rsc)
 {
     GHashTableIter iter;
     pe_node_t *node = NULL;
 
     if (rsc && stonith_our_uname) {
         g_hash_table_iter_init(&iter, rsc->allowed_nodes);
         while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
             if (node && strcmp(node->details->uname, stonith_our_uname) == 0) {
                 break;
             }
             node = NULL;
         }
     }
     return node;
 }
 
 /*!
  * \internal
  * \brief If a resource or any of its children are STONITH devices, update their
  *        definitions given a cluster working set.
  *
  * \param[in] rsc       Resource to check
  * \param[in] data_set  Cluster working set with device information
  */
 static void cib_device_update(pe_resource_t *rsc, pe_working_set_t *data_set)
 {
     pe_node_t *node = NULL;
     const char *value = NULL;
     const char *rclass = NULL;
     pe_node_t *parent = NULL;
     gboolean remove = TRUE;
 
     /* If this is a complex resource, check children rather than this resource itself.
      * TODO: Mark each installed device and remove if untouched when this process finishes.
      */
     if(rsc->children) {
         GListPtr gIter = NULL;
         for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
             cib_device_update(gIter->data, data_set);
             if(pe_rsc_is_clone(rsc)) {
                 crm_trace("Only processing one copy of the clone %s", rsc->id);
                 break;
             }
         }
         return;
     }
 
     /* We only care about STONITH resources. */
     rclass = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
     if (!pcmk__str_eq(rclass, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) {
         return;
     }
 
     /* If this STONITH resource is disabled, just remove it. */
     if (pe__resource_is_disabled(rsc)) {
         crm_info("Device %s has been disabled", rsc->id);
         goto update_done;
     }
 
     /* Check whether our node is allowed for this resource (and its parent if in a group) */
     node = our_node_allowed_for(rsc);
     if (rsc->parent && (rsc->parent->variant == pe_group)) {
         parent = our_node_allowed_for(rsc->parent);
     }
 
     if(node == NULL) {
         /* Our node is disallowed, so remove the device */
         GHashTableIter iter;
 
         crm_info("Device %s has been disabled on %s: unknown", rsc->id, stonith_our_uname);
         g_hash_table_iter_init(&iter, rsc->allowed_nodes);
         while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
             crm_trace("Available: %s = %d", node->details->uname, node->weight);
         }
 
         goto update_done;
 
     } else if(node->weight < 0 || (parent && parent->weight < 0)) {
         /* Our node (or its group) is disallowed by score, so remove the device */
         char *score = score2char((node->weight < 0) ? node->weight : parent->weight);
 
         crm_info("Device %s has been disabled on %s: score=%s", rsc->id, stonith_our_uname, score);
         free(score);
 
         goto update_done;
 
     } else {
         /* Our node is allowed, so update the device information */
         int rc;
         xmlNode *data;
+        GHashTable *rsc_params = NULL;
         GHashTableIter gIter;
         stonith_key_value_t *params = NULL;
 
         const char *name = NULL;
         const char *agent = crm_element_value(rsc->xml, XML_EXPR_ATTR_TYPE);
         const char *rsc_provides = NULL;
 
         crm_debug("Device %s is allowed on %s: score=%d", rsc->id, stonith_our_uname, node->weight);
-        get_rsc_attributes(rsc->parameters, rsc, node, data_set);
+        rsc_params = pe_rsc_params(rsc, node, data_set);
         get_meta_attributes(rsc->meta, rsc, node, data_set);
 
         rsc_provides = g_hash_table_lookup(rsc->meta, PCMK_STONITH_PROVIDES);
 
-        g_hash_table_iter_init(&gIter, rsc->parameters);
+        g_hash_table_iter_init(&gIter, rsc_params);
         while (g_hash_table_iter_next(&gIter, (gpointer *) & name, (gpointer *) & value)) {
             if (!name || !value) {
                 continue;
             }
             params = stonith_key_value_add(params, name, value);
             crm_trace(" %s=%s", name, value);
         }
 
         remove = FALSE;
         data = create_device_registration_xml(rsc_name(rsc), st_namespace_any,
                                               agent, params, rsc_provides);
         stonith_key_value_freeall(params, 1, 1);
         rc = stonith_device_register(data, NULL, TRUE);
         CRM_ASSERT(rc == pcmk_ok);
         free_xml(data);
     }
 
 update_done:
 
     if(remove && g_hash_table_lookup(device_list, rsc_name(rsc))) {
         stonith_device_remove(rsc_name(rsc), TRUE);
     }
 }
 
 /*!
  * \internal
  * \brief Update all STONITH device definitions based on current CIB
  */
 static void
 cib_devices_update(void)
 {
     GListPtr gIter = NULL;
 
     crm_info("Updating devices to version %s.%s.%s",
              crm_element_value(local_cib, XML_ATTR_GENERATION_ADMIN),
              crm_element_value(local_cib, XML_ATTR_GENERATION),
              crm_element_value(local_cib, XML_ATTR_NUMUPDATES));
 
     CRM_ASSERT(fenced_data_set != NULL);
     fenced_data_set->input = local_cib;
     fenced_data_set->now = crm_time_new(NULL);
     fenced_data_set->localhost = stonith_our_uname;
     pe__set_working_set_flags(fenced_data_set, pe_flag_quick_location);
 
     cluster_status(fenced_data_set);
     pcmk__schedule_actions(fenced_data_set, NULL, NULL);
 
     for (gIter = fenced_data_set->resources; gIter != NULL; gIter = gIter->next) {
         cib_device_update(gIter->data, fenced_data_set);
     }
     fenced_data_set->input = NULL; // Wasn't a copy, so don't let API free it
     pe_reset_working_set(fenced_data_set);
 }
 
 static void
 update_cib_stonith_devices_v2(const char *event, xmlNode * msg)
 {
     xmlNode *change = NULL;
     char *reason = NULL;
     bool needs_update = FALSE;
     xmlNode *patchset = get_message_xml(msg, F_CIB_UPDATE_RESULT);
 
     for (change = pcmk__xml_first_child(patchset); change != NULL;
          change = pcmk__xml_next(change)) {
         const char *op = crm_element_value(change, XML_DIFF_OP);
         const char *xpath = crm_element_value(change, XML_DIFF_PATH);
         const char *shortpath = NULL;
 
         if ((op == NULL) ||
             (strcmp(op, "move") == 0) ||
             strstr(xpath, "/"XML_CIB_TAG_STATUS)) {
             continue;
         } else if (pcmk__str_eq(op, "delete", pcmk__str_casei) && strstr(xpath, "/"XML_CIB_TAG_RESOURCE)) {
             const char *rsc_id = NULL;
             char *search = NULL;
             char *mutable = NULL;
 
             if (strstr(xpath, XML_TAG_ATTR_SETS) ||
                 strstr(xpath, XML_TAG_META_SETS)) {
                 needs_update = TRUE;
                 reason = strdup("(meta) attribute deleted from resource");
                 break;
             } 
             mutable = strdup(xpath);
             rsc_id = strstr(mutable, "primitive[@id=\'");
             if (rsc_id != NULL) {
                 rsc_id += strlen("primitive[@id=\'");
                 search = strchr(rsc_id, '\'');
             }
             if (search != NULL) {
                 *search = 0;
                 stonith_device_remove(rsc_id, TRUE);
             } else {
                 crm_warn("Ignoring malformed CIB update (resource deletion)");
             }
             free(mutable);
 
         } else if (strstr(xpath, "/"XML_CIB_TAG_RESOURCES) ||
                    strstr(xpath, "/"XML_CIB_TAG_CONSTRAINTS) ||
                    strstr(xpath, "/"XML_CIB_TAG_RSCCONFIG)) {
             shortpath = strrchr(xpath, '/'); CRM_ASSERT(shortpath);
             reason = crm_strdup_printf("%s %s", op, shortpath+1);
             needs_update = TRUE;
             break;
         }
     }
 
     if(needs_update) {
         crm_info("Updating device list from CIB: %s", reason);
         cib_devices_update();
     } else {
         crm_trace("No updates for device list found in CIB");
     }
     free(reason);
 }
 
 
 static void
 update_cib_stonith_devices_v1(const char *event, xmlNode * msg)
 {
     const char *reason = "none";
     gboolean needs_update = FALSE;
     xmlXPathObjectPtr xpath_obj = NULL;
 
     /* process new constraints */
     xpath_obj = xpath_search(msg, "//" F_CIB_UPDATE_RESULT "//" XML_CONS_TAG_RSC_LOCATION);
     if (numXpathResults(xpath_obj) > 0) {
         int max = numXpathResults(xpath_obj), lpc = 0;
 
         /* Safest and simplest to always recompute */
         needs_update = TRUE;
         reason = "new location constraint";
 
         for (lpc = 0; lpc < max; lpc++) {
             xmlNode *match = getXpathResult(xpath_obj, lpc);
 
             crm_log_xml_trace(match, "new constraint");
         }
     }
     freeXpathObject(xpath_obj);
 
     /* process deletions */
     xpath_obj = xpath_search(msg, "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_REMOVED "//" XML_CIB_TAG_RESOURCE);
     if (numXpathResults(xpath_obj) > 0) {
         remove_cib_device(xpath_obj);
     }
     freeXpathObject(xpath_obj);
 
     /* process additions */
     xpath_obj = xpath_search(msg, "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_ADDED "//" XML_CIB_TAG_RESOURCE);
     if (numXpathResults(xpath_obj) > 0) {
         int max = numXpathResults(xpath_obj), lpc = 0;
 
         for (lpc = 0; lpc < max; lpc++) {
             const char *rsc_id = NULL;
             const char *standard = NULL;
             xmlNode *match = getXpathResult(xpath_obj, lpc);
 
             rsc_id = crm_element_value(match, XML_ATTR_ID);
             standard = crm_element_value(match, XML_AGENT_ATTR_CLASS);
 
             if (!pcmk__str_eq(standard, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) {
                 continue;
             }
 
             crm_trace("Fencing resource %s was added or modified", rsc_id);
             reason = "new resource";
             needs_update = TRUE;
         }
     }
     freeXpathObject(xpath_obj);
 
     if(needs_update) {
         crm_info("Updating device list from CIB: %s", reason);
         cib_devices_update();
     }
 }
 
 static void
 update_cib_stonith_devices(const char *event, xmlNode * msg)
 {
     int format = 1;
     xmlNode *patchset = get_message_xml(msg, F_CIB_UPDATE_RESULT);
 
     CRM_ASSERT(patchset);
     crm_element_value_int(patchset, "format", &format);
     switch(format) {
         case 1:
             update_cib_stonith_devices_v1(event, msg);
             break;
         case 2:
             update_cib_stonith_devices_v2(event, msg);
             break;
         default:
             crm_warn("Unknown patch format: %d", format);
     }
 }
 
 /* Needs to hold node name + attribute name + attribute value + 75 */
 #define XPATH_MAX 512
 
 /*!
  * \internal
  * \brief Check whether a node has a specific attribute name/value
  *
  * \param[in] node    Name of node to check
  * \param[in] name    Name of an attribute to look for
  * \param[in] value   The value the named attribute needs to be set to in order to be considered a match
  *
  * \return TRUE if the locally cached CIB has the specified node attribute
  */
 gboolean
 node_has_attr(const char *node, const char *name, const char *value)
 {
     char xpath[XPATH_MAX];
     xmlNode *match;
     int n;
 
     CRM_CHECK(local_cib != NULL, return FALSE);
 
     /* Search for the node's attributes in the CIB. While the schema allows
      * multiple sets of instance attributes, and allows instance attributes to
      * use id-ref to reference values elsewhere, that is intended for resources,
      * so we ignore that here.
      */
     n = snprintf(xpath, XPATH_MAX, "//" XML_CIB_TAG_NODES
                  "/" XML_CIB_TAG_NODE "[@uname='%s']/" XML_TAG_ATTR_SETS
                  "/" XML_CIB_TAG_NVPAIR "[@name='%s' and @value='%s']",
                  node, name, value);
     match = get_xpath_object(xpath, local_cib, LOG_NEVER);
 
     CRM_CHECK(n < XPATH_MAX, return FALSE);
     return (match != NULL);
 }
 
 static void
 update_fencing_topology(const char *event, xmlNode * msg)
 {
     int format = 1;
     const char *xpath;
     xmlXPathObjectPtr xpathObj = NULL;
     xmlNode *patchset = get_message_xml(msg, F_CIB_UPDATE_RESULT);
 
     CRM_ASSERT(patchset);
     crm_element_value_int(patchset, "format", &format);
 
     if(format == 1) {
         /* Process deletions (only) */
         xpath = "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_REMOVED "//" XML_TAG_FENCING_LEVEL;
         xpathObj = xpath_search(msg, xpath);
 
         remove_fencing_topology(xpathObj);
         freeXpathObject(xpathObj);
 
         /* Process additions and changes */
         xpath = "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_ADDED "//" XML_TAG_FENCING_LEVEL;
         xpathObj = xpath_search(msg, xpath);
 
         register_fencing_topology(xpathObj);
         freeXpathObject(xpathObj);
 
     } else if(format == 2) {
         xmlNode *change = NULL;
         int add[] = { 0, 0, 0 };
         int del[] = { 0, 0, 0 };
 
         xml_patch_versions(patchset, add, del);
 
         for (change = pcmk__xml_first_child(patchset); change != NULL;
              change = pcmk__xml_next(change)) {
             const char *op = crm_element_value(change, XML_DIFF_OP);
             const char *xpath = crm_element_value(change, XML_DIFF_PATH);
 
             if(op == NULL) {
                 continue;
 
             } else if(strstr(xpath, "/" XML_TAG_FENCING_LEVEL) != NULL) {
                 /* Change to a specific entry */
 
                 crm_trace("Handling %s operation %d.%d.%d for %s", op, add[0], add[1], add[2], xpath);
                 if(strcmp(op, "move") == 0) {
                     continue;
 
                 } else if(strcmp(op, "create") == 0) {
                     handle_topology_change(change->children, FALSE);
 
                 } else if(strcmp(op, "modify") == 0) {
                     xmlNode *match = first_named_child(change, XML_DIFF_RESULT);
 
                     if(match) {
                         handle_topology_change(match->children, TRUE);
                     }
 
                 } else if(strcmp(op, "delete") == 0) {
                     /* Nuclear option, all we have is the path and an id... not enough to remove a specific entry */
                     crm_info("Re-initializing fencing topology after %s operation %d.%d.%d for %s",
                              op, add[0], add[1], add[2], xpath);
                     fencing_topology_init();
                     return;
                 }
 
             } else if (strstr(xpath, "/" XML_TAG_FENCING_TOPOLOGY) != NULL) {
                 /* Change to the topology in general */
                 crm_info("Re-initializing fencing topology after top-level %s operation  %d.%d.%d for %s",
                          op, add[0], add[1], add[2], xpath);
                 fencing_topology_init();
                 return;
 
             } else if (strstr(xpath, "/" XML_CIB_TAG_CONFIGURATION)) {
                 /* Changes to the whole config section, possibly including the topology as a whild */
                 if(first_named_child(change, XML_TAG_FENCING_TOPOLOGY) == NULL) {
                     crm_trace("Nothing for us in %s operation %d.%d.%d for %s.",
                               op, add[0], add[1], add[2], xpath);
 
                 } else if(strcmp(op, "delete") == 0 || strcmp(op, "create") == 0) {
                     crm_info("Re-initializing fencing topology after top-level %s operation %d.%d.%d for %s.",
                              op, add[0], add[1], add[2], xpath);
                     fencing_topology_init();
                     return;
                 }
 
             } else {
                 crm_trace("Nothing for us in %s operation %d.%d.%d for %s",
                           op, add[0], add[1], add[2], xpath);
             }
         }
 
     } else {
         crm_warn("Unknown patch format: %d", format);
     }
 }
 static bool have_cib_devices = FALSE;
 
 static void
 update_cib_cache_cb(const char *event, xmlNode * msg)
 {
     int rc = pcmk_ok;
     xmlNode *stonith_enabled_xml = NULL;
     xmlNode *stonith_watchdog_xml = NULL;
     const char *stonith_enabled_s = NULL;
     static gboolean stonith_enabled_saved = TRUE;
 
     if(!have_cib_devices) {
         crm_trace("Skipping updates until we get a full dump");
         return;
 
     } else if(msg == NULL) {
         crm_trace("Missing %s update", event);
         return;
     }
 
     /* Maintain a local copy of the CIB so that we have full access
      * to device definitions, location constraints, and node attributes
      */
     if (local_cib != NULL) {
         int rc = pcmk_ok;
         xmlNode *patchset = NULL;
 
         crm_element_value_int(msg, F_CIB_RC, &rc);
         if (rc != pcmk_ok) {
             return;
         }
 
         patchset = get_message_xml(msg, F_CIB_UPDATE_RESULT);
         xml_log_patchset(LOG_TRACE, "Config update", patchset);
         rc = xml_apply_patchset(local_cib, patchset, TRUE);
         switch (rc) {
             case pcmk_ok:
             case -pcmk_err_old_data:
                 break;
             case -pcmk_err_diff_resync:
             case -pcmk_err_diff_failed:
                 crm_notice("[%s] Patch aborted: %s (%d)", event, pcmk_strerror(rc), rc);
                 free_xml(local_cib);
                 local_cib = NULL;
                 break;
             default:
                 crm_warn("[%s] ABORTED: %s (%d)", event, pcmk_strerror(rc), rc);
                 free_xml(local_cib);
                 local_cib = NULL;
         }
     }
 
     if (local_cib == NULL) {
         crm_trace("Re-requesting full CIB");
         rc = cib_api->cmds->query(cib_api, NULL, &local_cib, cib_scope_local | cib_sync_call);
         if(rc != pcmk_ok) {
             crm_err("Couldn't retrieve the CIB: %s (%d)", pcmk_strerror(rc), rc);
             return;
         }
         CRM_ASSERT(local_cib != NULL);
         stonith_enabled_saved = FALSE; /* Trigger a full refresh below */
     }
 
     pcmk__refresh_node_caches_from_cib(local_cib);
 
     stonith_enabled_xml = get_xpath_object("//nvpair[@name='stonith-enabled']",
                                            local_cib, LOG_NEVER);
     if (stonith_enabled_xml) {
         stonith_enabled_s = crm_element_value(stonith_enabled_xml, XML_NVPAIR_ATTR_VALUE);
     }
 
     if (stonith_enabled_s == NULL || crm_is_true(stonith_enabled_s)) {
         long timeout_ms = 0;
         const char *value = NULL;
 
         stonith_watchdog_xml = get_xpath_object("//nvpair[@name='stonith-watchdog-timeout']",
                                                 local_cib, LOG_NEVER);
         if (stonith_watchdog_xml) {
             value = crm_element_value(stonith_watchdog_xml, XML_NVPAIR_ATTR_VALUE);
         }
 
         if(value) {
             timeout_ms = crm_get_msec(value);
         }
         if (timeout_ms < 0) {
             timeout_ms = pcmk__auto_watchdog_timeout();
         }
 
         if(timeout_ms != stonith_watchdog_timeout_ms) {
             crm_notice("New watchdog timeout %lds (was %lds)", timeout_ms/1000, stonith_watchdog_timeout_ms/1000);
             stonith_watchdog_timeout_ms = timeout_ms;
         }
 
     } else {
         stonith_watchdog_timeout_ms = 0;
     }
 
     if (stonith_enabled_s && crm_is_true(stonith_enabled_s) == FALSE) {
         crm_trace("Ignoring CIB updates while fencing is disabled");
         stonith_enabled_saved = FALSE;
         return;
 
     } else if (stonith_enabled_saved == FALSE) {
         crm_info("Updating fencing device and topology lists "
                  "now that fencing is enabled");
         stonith_enabled_saved = TRUE;
         fencing_topology_init();
         cib_devices_update();
 
     } else {
         update_fencing_topology(event, msg);
         update_cib_stonith_devices(event, msg);
     }
 }
 
 static void
 init_cib_cache_cb(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data)
 {
     crm_info("Updating device list from CIB");
     have_cib_devices = TRUE;
     local_cib = copy_xml(output);
 
     pcmk__refresh_node_caches_from_cib(local_cib);
 
     fencing_topology_init();
     cib_devices_update();
 }
 
 static void
 stonith_shutdown(int nsig)
 {
     crm_info("Terminating with %d clients", pcmk__ipc_client_count());
     stonith_shutdown_flag = TRUE;
     if (mainloop != NULL && g_main_loop_is_running(mainloop)) {
         g_main_loop_quit(mainloop);
     } else {
         stonith_cleanup();
         crm_exit(CRM_EX_OK);
     }
 }
 
 static void
 cib_connection_destroy(gpointer user_data)
 {
     if (stonith_shutdown_flag) {
         crm_info("Connection to the CIB manager closed");
         return;
     } else {
         crm_crit("Lost connection to the CIB manager, shutting down");
     }
     if (cib_api) {
         cib_api->cmds->signoff(cib_api);
     }
     stonith_shutdown(0);
 }
 
 static void
 stonith_cleanup(void)
 {
     if (cib_api) {
         cib_api->cmds->del_notify_callback(cib_api, T_CIB_DIFF_NOTIFY, update_cib_cache_cb);
         cib_api->cmds->signoff(cib_api);
     }
 
     if (ipcs) {
         qb_ipcs_destroy(ipcs);
     }
 
     crm_peer_destroy();
     pcmk__client_cleanup();
     free_stonith_remote_op_list();
     free_topology_list();
     free_device_list();
     free_metadata_cache();
 
     free(stonith_our_uname);
     stonith_our_uname = NULL;
 
     free_xml(local_cib);
     local_cib = NULL;
 }
 
 static pcmk__cli_option_t long_options[] = {
     // long option, argument type, storage, short option, description, flags
     {
         "stand-alone", no_argument, 0, 's',
         NULL, pcmk__option_default
     },
     {
         "stand-alone-w-cpg", no_argument, 0, 'c',
         NULL, pcmk__option_default
     },
     {
         "logfile", required_argument, 0, 'l',
         NULL, pcmk__option_default
     },
     {
         "verbose", no_argument, 0, 'V',
         NULL, pcmk__option_default
     },
     {
         "version", no_argument, 0, '$',
         NULL, pcmk__option_default
     },
     {
         "help", no_argument, 0, '?',
         NULL, pcmk__option_default
     },
     { 0, 0, 0, 0 }
 };
 
 static void
 setup_cib(void)
 {
     int rc, retries = 0;
 
     cib_api = cib_new();
     if (cib_api == NULL) {
         crm_err("No connection to the CIB manager");
         return;
     }
 
     do {
         sleep(retries);
         rc = cib_api->cmds->signon(cib_api, CRM_SYSTEM_STONITHD, cib_command);
     } while (rc == -ENOTCONN && ++retries < 5);
 
     if (rc != pcmk_ok) {
         crm_err("Could not connect to the CIB manager: %s (%d)", pcmk_strerror(rc), rc);
 
     } else if (pcmk_ok !=
                cib_api->cmds->add_notify_callback(cib_api, T_CIB_DIFF_NOTIFY, update_cib_cache_cb)) {
         crm_err("Could not set CIB notification callback");
 
     } else {
         rc = cib_api->cmds->query(cib_api, NULL, NULL, cib_scope_local);
         cib_api->cmds->register_callback(cib_api, rc, 120, FALSE, NULL, "init_cib_cache_cb",
                                          init_cib_cache_cb);
         cib_api->cmds->set_connection_dnotify(cib_api, cib_connection_destroy);
         crm_info("Watching for fencing topology changes");
     }
 }
 
 struct qb_ipcs_service_handlers ipc_callbacks = {
     .connection_accept = st_ipc_accept,
     .connection_created = NULL,
     .msg_process = st_ipc_dispatch,
     .connection_closed = st_ipc_closed,
     .connection_destroyed = st_ipc_destroy
 };
 
 /*!
  * \internal
  * \brief Callback for peer status changes
  *
  * \param[in] type  What changed
  * \param[in] node  What peer had the change
  * \param[in] data  Previous value of what changed
  */
 static void
 st_peer_update_callback(enum crm_status_type type, crm_node_t * node, const void *data)
 {
     if ((type != crm_status_processes)
         && !pcmk_is_set(node->flags, crm_remote_node)) {
         /*
          * This is a hack until we can send to a nodeid and/or we fix node name lookups
          * These messages are ignored in stonith_peer_callback()
          */
         xmlNode *query = create_xml_node(NULL, "stonith_command");
 
         crm_xml_add(query, F_XML_TAGNAME, "stonith_command");
         crm_xml_add(query, F_TYPE, T_STONITH_NG);
         crm_xml_add(query, F_STONITH_OPERATION, "poke");
 
         crm_debug("Broadcasting our uname because of node %u", node->id);
         send_cluster_message(NULL, crm_msg_stonith_ng, query, FALSE);
 
         free_xml(query);
     }
 }
 
 int
 main(int argc, char **argv)
 {
     int flag;
     int lpc = 0;
     int argerr = 0;
     int option_index = 0;
     crm_cluster_t cluster;
     const char *actions[] = { "reboot", "off", "on", "list", "monitor", "status" };
     crm_ipc_t *old_instance = NULL;
 
     crm_log_preinit(NULL, argc, argv);
     pcmk__set_cli_options(NULL, "[options]", long_options,
                           "daemon for executing fencing devices in a "
                           "Pacemaker cluster");
 
     while (1) {
         flag = pcmk__next_cli_option(argc, argv, &option_index, NULL);
         if (flag == -1) {
             break;
         }
 
         switch (flag) {
             case 'V':
                 crm_bump_log_level(argc, argv);
                 break;
             case 'l':
                 crm_add_logfile(optarg);
                 break;
             case 's':
                 stand_alone = TRUE;
                 break;
             case 'c':
                 stand_alone = FALSE;
                 no_cib_connect = TRUE;
                 break;
             case '$':
             case '?':
                 pcmk__cli_help(flag, CRM_EX_OK);
                 break;
             default:
                 ++argerr;
                 break;
         }
     }
 
     if (argc - optind == 1 && pcmk__str_eq("metadata", argv[optind], pcmk__str_casei)) {
         printf("<?xml version=\"1.0\"?><!DOCTYPE resource-agent SYSTEM \"ra-api-1.dtd\">\n");
         printf("<resource-agent name=\"pacemaker-fenced\">\n");
         printf(" <version>1.0</version>\n");
         printf(" <longdesc lang=\"en\">Instance attributes available for all \"stonith\"-class resources"
                                        " and used by Pacemaker's fence daemon, formerly known as stonithd</longdesc>\n");
         printf(" <shortdesc lang=\"en\">Instance attributes available for all \"stonith\"-class resources</shortdesc>\n");
         printf(" <parameters>\n");
 
 #if 0
         // priority is not implemented yet
         printf("  <parameter name=\"priority\" unique=\"0\">\n");
         printf("    <shortdesc lang=\"en\">Devices that are not in a topology "
                "are tried in order of highest to lowest integer priority</shortdesc>\n");
         printf("    <content type=\"integer\" default=\"0\"/>\n");
         printf("  </parameter>\n");
 #endif
 
         printf("  <parameter name=\"%s\" unique=\"0\">\n",
                PCMK_STONITH_HOST_ARGUMENT);
         printf
             ("    <shortdesc lang=\"en\">Advanced use only: An alternate parameter to supply instead of 'port'</shortdesc>\n");
         printf
             ("    <longdesc lang=\"en\">Some devices do not support the standard 'port' parameter or may provide additional ones.\n"
              "Use this to specify an alternate, device-specific, parameter that should indicate the machine to be fenced.\n"
              "A value of 'none' can be used to tell the cluster not to supply any additional parameters.\n"
              "     </longdesc>\n");
         printf("    <content type=\"string\" default=\"port\"/>\n");
         printf("  </parameter>\n");
 
         printf("  <parameter name=\"%s\" unique=\"0\">\n",
                PCMK_STONITH_HOST_MAP);
         printf
             ("    <shortdesc lang=\"en\">A mapping of host names to ports numbers for devices that do not support host names.</shortdesc>\n");
         printf
             ("    <longdesc lang=\"en\">Eg. node1:1;node2:2,3 would tell the cluster to use port 1 for node1 and ports 2 and 3 for node2</longdesc>\n");
         printf("    <content type=\"string\" default=\"\"/>\n");
         printf("  </parameter>\n");
 
         printf("  <parameter name=\"%s\" unique=\"0\">\n",
                PCMK_STONITH_HOST_LIST);
         printf("    <shortdesc lang=\"en\">A list of machines controlled by "
                "this device (Optional unless %s=static-list).</shortdesc>\n",
                PCMK_STONITH_HOST_CHECK);
         printf("    <content type=\"string\" default=\"\"/>\n");
         printf("  </parameter>\n");
 
         printf("  <parameter name=\"%s\" unique=\"0\">\n",
                PCMK_STONITH_HOST_CHECK);
         printf
             ("    <shortdesc lang=\"en\">How to determine which machines are controlled by the device.</shortdesc>\n");
         printf("    <longdesc lang=\"en\">Allowed values: dynamic-list "
                "(query the device via the 'list' command), static-list "
                "(check the " PCMK_STONITH_HOST_LIST " attribute), status "
                "(query the device via the 'status' command), none (assume "
                "every device can fence every machine)</longdesc>\n");
         printf("    <content type=\"string\" default=\"dynamic-list\"/>\n");
         printf("  </parameter>\n");
 
         printf("  <parameter name=\"%s\" unique=\"0\">\n",
                PCMK_STONITH_DELAY_MAX);
         printf("    <shortdesc lang=\"en\">Enable a delay of no more than the "
                "time specified before executing fencing actions. Pacemaker "
                "derives the overall delay by taking the value of "
                PCMK_STONITH_DELAY_BASE " and adding a random delay value such "
                "that the sum is kept below this maximum.</shortdesc>\n");
         printf("    <longdesc lang=\"en\">This prevents double fencing when "
                "using slow devices such as sbd.\nUse this to enable a random "
                "delay for fencing actions.\nThe overall delay is derived from "
                "this random delay value adding a static delay so that the sum "
                "is kept below the maximum delay.</longdesc>\n");
         printf("    <content type=\"time\" default=\"0s\"/>\n");
         printf("  </parameter>\n");
 
         printf("  <parameter name=\"%s\" unique=\"0\">\n",
                PCMK_STONITH_DELAY_BASE);
         printf("    <shortdesc lang=\"en\">Enable a base delay for "
                "fencing actions and specify base delay value.</shortdesc>\n");
         printf("    <longdesc lang=\"en\">This prevents double fencing when "
                "different delays are configured on the nodes.\nUse this to "
                "enable a static delay for fencing actions.\nThe overall delay "
                "is derived from a random delay value adding this static delay "
                "so that the sum is kept below the maximum delay.</longdesc>\n");
         printf("    <content type=\"time\" default=\"0s\"/>\n");
         printf("  </parameter>\n");
 
         printf("  <parameter name=\"%s\" unique=\"0\">\n",
                PCMK_STONITH_ACTION_LIMIT);
         printf
             ("    <shortdesc lang=\"en\">The maximum number of actions can be performed in parallel on this device</shortdesc>\n");
         printf
             ("    <longdesc lang=\"en\">Cluster property concurrent-fencing=true needs to be configured first.\n"
              "Then use this to specify the maximum number of actions can be performed in parallel on this device. -1 is unlimited.</longdesc>\n");
         printf("    <content type=\"integer\" default=\"1\"/>\n");
         printf("  </parameter>\n");
 
 
         for (lpc = 0; lpc < DIMOF(actions); lpc++) {
             printf("  <parameter name=\"pcmk_%s_action\" unique=\"0\">\n", actions[lpc]);
             printf
                 ("    <shortdesc lang=\"en\">Advanced use only: An alternate command to run instead of '%s'</shortdesc>\n",
                  actions[lpc]);
             printf
                 ("    <longdesc lang=\"en\">Some devices do not support the standard commands or may provide additional ones.\n"
                  "Use this to specify an alternate, device-specific, command that implements the '%s' action.</longdesc>\n",
                  actions[lpc]);
             printf("    <content type=\"string\" default=\"%s\"/>\n", actions[lpc]);
             printf("  </parameter>\n");
 
             printf("  <parameter name=\"pcmk_%s_timeout\" unique=\"0\">\n", actions[lpc]);
             printf
                 ("    <shortdesc lang=\"en\">Advanced use only: Specify an alternate timeout to use for %s actions instead of stonith-timeout</shortdesc>\n",
                  actions[lpc]);
             printf
                 ("    <longdesc lang=\"en\">Some devices need much more/less time to complete than normal.\n"
                  "Use this to specify an alternate, device-specific, timeout for '%s' actions.</longdesc>\n",
                  actions[lpc]);
             printf("    <content type=\"time\" default=\"60s\"/>\n");
             printf("  </parameter>\n");
 
             printf("  <parameter name=\"pcmk_%s_retries\" unique=\"0\">\n", actions[lpc]);
             printf
                 ("    <shortdesc lang=\"en\">Advanced use only: The maximum number of times to retry the '%s' command within the timeout period</shortdesc>\n",
                  actions[lpc]);
             printf("    <longdesc lang=\"en\">Some devices do not support multiple connections."
                    " Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation, if there is time remaining."
                    " Use this option to alter the number of times Pacemaker retries '%s' actions before giving up."
                    "</longdesc>\n", actions[lpc]);
             printf("    <content type=\"integer\" default=\"2\"/>\n");
             printf("  </parameter>\n");
         }
 
         printf(" </parameters>\n");
         printf("</resource-agent>\n");
         return CRM_EX_OK;
     }
 
     if (optind != argc) {
         ++argerr;
     }
 
     if (argerr) {
         pcmk__cli_help('?', CRM_EX_USAGE);
     }
 
     crm_log_init(NULL, LOG_INFO, TRUE, FALSE, argc, argv, FALSE);
 
     crm_notice("Starting Pacemaker fencer");
 
     old_instance = crm_ipc_new("stonith-ng", 0);
     if (crm_ipc_connect(old_instance)) {
         /* IPC end-point already up */
         crm_ipc_close(old_instance);
         crm_ipc_destroy(old_instance);
         crm_err("pacemaker-fenced is already active, aborting startup");
         crm_exit(CRM_EX_OK);
     } else {
         /* not up or not authentic, we'll proceed either way */
         crm_ipc_destroy(old_instance);
         old_instance = NULL;
     }
 
     mainloop_add_signal(SIGTERM, stonith_shutdown);
 
     crm_peer_init();
 
     fenced_data_set = pe_new_working_set();
     CRM_ASSERT(fenced_data_set != NULL);
     pe__set_working_set_flags(fenced_data_set,
                               pe_flag_no_counts|pe_flag_no_compat);
 
     if (stand_alone == FALSE) {
 
         if (is_corosync_cluster()) {
 #if SUPPORT_COROSYNC
             cluster.destroy = stonith_peer_cs_destroy;
             cluster.cpg.cpg_deliver_fn = stonith_peer_ais_callback;
             cluster.cpg.cpg_confchg_fn = pcmk_cpg_membership;
 #endif
         }
 
         crm_set_status_callback(&st_peer_update_callback);
 
         if (crm_cluster_connect(&cluster) == FALSE) {
             crm_crit("Cannot sign in to the cluster... terminating");
             crm_exit(CRM_EX_FATAL);
         }
         stonith_our_uname = cluster.uname;
         stonith_our_uuid = cluster.uuid;
 
         if (no_cib_connect == FALSE) {
             setup_cib();
         }
 
     } else {
         stonith_our_uname = strdup("localhost");
     }
 
     init_device_list();
     init_topology_list();
 
     if(stonith_watchdog_timeout_ms > 0) {
         int rc;
         xmlNode *xml;
         stonith_key_value_t *params = NULL;
 
         params = stonith_key_value_add(params, PCMK_STONITH_HOST_LIST,
                                        stonith_our_uname);
 
         xml = create_device_registration_xml("watchdog", st_namespace_internal,
                                              STONITH_WATCHDOG_AGENT, params,
                                              NULL);
         stonith_key_value_freeall(params, 1, 1);
         rc = stonith_device_register(xml, NULL, FALSE);
         free_xml(xml);
         if (rc != pcmk_ok) {
             crm_crit("Cannot register watchdog pseudo fence agent");
             crm_exit(CRM_EX_FATAL);
         }
     }
 
     pcmk__serve_fenced_ipc(&ipcs, &ipc_callbacks);
 
     /* Create the mainloop and run it... */
     mainloop = g_main_loop_new(NULL, FALSE);
     crm_notice("Pacemaker fencer successfully started and accepting connections");
     g_main_loop_run(mainloop);
 
     stonith_cleanup();
     pe_free_working_set(fenced_data_set);
     crm_exit(CRM_EX_OK);
 }
diff --git a/include/crm/pengine/complex.h b/include/crm/pengine/complex.h
index effa44fbdb..1d010f4fd5 100644
--- a/include/crm/pengine/complex.h
+++ b/include/crm/pengine/complex.h
@@ -1,39 +1,42 @@
 /*
- * Copyright 2004-2019 the Pacemaker project contributors
+ * Copyright 2004-2020 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #ifndef PENGINE_COMPLEX__H
 #  define PENGINE_COMPLEX__H
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
 #include <glib.h>                   // gboolean, GHashTable
 #include <libxml/tree.h>            // xmlNode
 #include <crm/pengine/pe_types.h>   // pe_node_t, pe_resource_t, etc.
 
 extern resource_object_functions_t resource_class_functions[];
+
+GHashTable *pe_rsc_params(pe_resource_t *rsc, pe_node_t *node,
+                          pe_working_set_t *data_set);
 void get_meta_attributes(GHashTable * meta_hash, pe_resource_t *rsc,
                          pe_node_t *node, pe_working_set_t *data_set);
 void get_rsc_attributes(GHashTable *meta_hash, pe_resource_t *rsc,
                         pe_node_t *node, pe_working_set_t *data_set);
 
 #if ENABLE_VERSIONED_ATTRS
 void pe_get_versioned_attributes(xmlNode *meta_hash, pe_resource_t *rsc,
                                  pe_node_t *node, pe_working_set_t *data_set);
 #endif
 
 gboolean is_parent(pe_resource_t *child, pe_resource_t *rsc);
 pe_resource_t *uber_parent(pe_resource_t *rsc);
 
 #ifdef __cplusplus
 }
 #endif
 
 #endif
diff --git a/include/crm/pengine/internal.h b/include/crm/pengine/internal.h
index 1e5aee12c9..9f4e28a1e4 100644
--- a/include/crm/pengine/internal.h
+++ b/include/crm/pengine/internal.h
@@ -1,598 +1,600 @@
 /*
  * Copyright 2004-2020 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #ifndef PE_INTERNAL__H
 #  define PE_INTERNAL__H
 #  include <string.h>
 #  include <crm/pengine/status.h>
 #  include <crm/pengine/remote_internal.h>
 #  include <crm/common/internal.h>
 #  include <crm/common/output_internal.h>
 
 #  define pe_rsc_info(rsc, fmt, args...)  crm_log_tag(LOG_INFO,  rsc ? rsc->id : "<NULL>", fmt, ##args)
 #  define pe_rsc_debug(rsc, fmt, args...) crm_log_tag(LOG_DEBUG, rsc ? rsc->id : "<NULL>", fmt, ##args)
 #  define pe_rsc_trace(rsc, fmt, args...) crm_log_tag(LOG_TRACE, rsc ? rsc->id : "<NULL>", fmt, ##args)
 
 #  define pe_err(fmt...) do {           \
         was_processing_error = TRUE;    \
         pcmk__config_err(fmt);          \
     } while (0)
 
 #  define pe_warn(fmt...) do {          \
         was_processing_warning = TRUE;  \
         pcmk__config_warn(fmt);         \
     } while (0)
 
 #  define pe_proc_err(fmt...) { was_processing_error = TRUE; crm_err(fmt); }
 #  define pe_proc_warn(fmt...) { was_processing_warning = TRUE; crm_warn(fmt); }
 
 #define pe__set_working_set_flags(working_set, flags_to_set) do {           \
         (working_set)->flags = pcmk__set_flags_as(__func__, __LINE__,       \
             LOG_TRACE, "Working set", crm_system_name,                      \
             (working_set)->flags, (flags_to_set), #flags_to_set);           \
     } while (0)
 
 #define pe__clear_working_set_flags(working_set, flags_to_clear) do {       \
         (working_set)->flags = pcmk__clear_flags_as(__func__, __LINE__,     \
             LOG_TRACE, "Working set", crm_system_name,                      \
             (working_set)->flags, (flags_to_clear), #flags_to_clear);       \
     } while (0)
 
 #define pe__set_resource_flags(resource, flags_to_set) do {                 \
         (resource)->flags = pcmk__set_flags_as(__func__, __LINE__,          \
             LOG_TRACE, "Resource", (resource)->id, (resource)->flags,       \
             (flags_to_set), #flags_to_set);                                 \
     } while (0)
 
 #define pe__clear_resource_flags(resource, flags_to_clear) do {             \
         (resource)->flags = pcmk__clear_flags_as(__func__, __LINE__,        \
             LOG_TRACE, "Resource", (resource)->id, (resource)->flags,       \
             (flags_to_clear), #flags_to_clear);                             \
     } while (0)
 
 #define pe__set_action_flags(action, flags_to_set) do {                     \
         (action)->flags = pcmk__set_flags_as(__func__, __LINE__,            \
                                              LOG_TRACE,                     \
                                              "Action", (action)->uuid,      \
                                              (action)->flags,               \
                                              (flags_to_set),                \
                                              #flags_to_set);                \
     } while (0)
 
 #define pe__clear_action_flags(action, flags_to_clear) do {                 \
         (action)->flags = pcmk__clear_flags_as(__func__, __LINE__,          \
                                                LOG_TRACE,                   \
                                                "Action", (action)->uuid,    \
                                                (action)->flags,             \
                                                (flags_to_clear),            \
                                                #flags_to_clear);            \
     } while (0)
 
 #define pe__set_raw_action_flags(action_flags, action_name, flags_to_set) do { \
         action_flags = pcmk__set_flags_as(__func__, __LINE__,               \
                                           LOG_TRACE, "Action", action_name, \
                                           (action_flags),                   \
                                           (flags_to_set), #flags_to_set);   \
     } while (0)
 
 #define pe__clear_raw_action_flags(action_flags, action_name, flags_to_clear) do { \
         action_flags = pcmk__clear_flags_as(__func__, __LINE__,             \
                                             LOG_TRACE,                      \
                                             "Action", action_name,          \
                                             (action_flags),                 \
                                             (flags_to_clear),               \
                                             #flags_to_clear);               \
     } while (0)
 
 #define pe__set_action_flags_as(function, line, action, flags_to_set) do {  \
         (action)->flags = pcmk__set_flags_as((function), (line),            \
                                              LOG_TRACE,                     \
                                              "Action", (action)->uuid,      \
                                              (action)->flags,               \
                                              (flags_to_set),                \
                                              #flags_to_set);                \
     } while (0)
 
 #define pe__clear_action_flags_as(function, line, action, flags_to_clear) do { \
         (action)->flags = pcmk__clear_flags_as((function), (line),          \
                                                LOG_TRACE,                   \
                                                "Action", (action)->uuid,    \
                                                (action)->flags,             \
                                                (flags_to_clear),            \
                                                #flags_to_clear);            \
     } while (0)
 
 #define pe__set_order_flags(order_flags, flags_to_set) do {                 \
         order_flags = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE,     \
                                          "Ordering", "constraint",          \
                                          order_flags, (flags_to_set),       \
                                          #flags_to_set);                    \
     } while (0)
 
 #define pe__clear_order_flags(order_flags, flags_to_clear) do {               \
         order_flags = pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE,     \
                                            "Ordering", "constraint",          \
                                            order_flags, (flags_to_clear),     \
                                            #flags_to_clear);                  \
     } while (0)
 
 #define pe__set_graph_flags(graph_flags, gr_action, flags_to_set) do {      \
         graph_flags = pcmk__set_flags_as(__func__, __LINE__,                \
                                          LOG_TRACE, "Graph",                \
                                          (gr_action)->uuid, graph_flags,    \
                                          (flags_to_set), #flags_to_set);    \
     } while (0)
 
 #define pe__clear_graph_flags(graph_flags, gr_action, flags_to_clear) do {     \
         graph_flags = pcmk__clear_flags_as(__func__, __LINE__,                 \
                                            LOG_TRACE, "Graph",                 \
                                            (gr_action)->uuid, graph_flags,     \
                                            (flags_to_clear), #flags_to_clear); \
     } while (0)
 
 // Some warnings we don't want to print every transition
 
 enum pe_warn_once_e {
     pe_wo_blind         = 0x0001,
     pe_wo_restart_type  = 0x0002,
     pe_wo_role_after    = 0x0004,
     pe_wo_poweroff      = 0x0008,
     pe_wo_require_all   = 0x0010,
     pe_wo_order_score   = 0x0020,
     pe_wo_neg_threshold = 0x0040,
 };
 
 extern uint32_t pe_wo;
 
 #define pe_warn_once(pe_wo_bit, fmt...) do {    \
         if (!pcmk_is_set(pe_wo, pe_wo_bit)) {  \
             if (pe_wo_bit == pe_wo_blind) {     \
                 crm_warn(fmt);                  \
             } else {                            \
                 pe_warn(fmt);                   \
             }                                   \
             pe_wo = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE,       \
                                       "Warn-once", "logging", pe_wo,        \
                                       (pe_wo_bit), #pe_wo_bit);             \
         }                                       \
     } while (0);
 
 
 typedef struct pe__location_constraint_s {
     char *id;                           // Constraint XML ID
     pe_resource_t *rsc_lh;              // Resource being located
     enum rsc_role_e role_filter;        // Role to locate
     enum pe_discover_e discover_mode;   // Resource discovery
     GListPtr node_list_rh;              // List of pe_node_t*
 } pe__location_t;
 
 typedef struct pe__order_constraint_s {
     int id;
     enum pe_ordering type;
 
     void *lh_opaque;
     pe_resource_t *lh_rsc;
     pe_action_t *lh_action;
     char *lh_action_task;
 
     void *rh_opaque;
     pe_resource_t *rh_rsc;
     pe_action_t *rh_action;
     char *rh_action_task;
 } pe__ordering_t;
 
 typedef struct notify_data_s {
     GSList *keys;               // Environment variable name/value pairs
 
     const char *action;
 
     pe_action_t *pre;
     pe_action_t *post;
     pe_action_t *pre_done;
     pe_action_t *post_done;
 
     GListPtr active;            /* notify_entry_t*  */
     GListPtr inactive;          /* notify_entry_t*  */
     GListPtr start;             /* notify_entry_t*  */
     GListPtr stop;              /* notify_entry_t*  */
     GListPtr demote;            /* notify_entry_t*  */
     GListPtr promote;           /* notify_entry_t*  */
     GListPtr master;            /* notify_entry_t*  */
     GListPtr slave;             /* notify_entry_t*  */
     GHashTable *allowed_nodes;
 
 } notify_data_t;
 
 bool pe_can_fence(pe_working_set_t *data_set, pe_node_t *node);
 
 int pe__add_scores(int score1, int score2);
 void add_hash_param(GHashTable * hash, const char *name, const char *value);
 
 char *native_parameter(pe_resource_t * rsc, pe_node_t * node, gboolean create, const char *name,
                        pe_working_set_t * data_set);
 pe_node_t *native_location(const pe_resource_t *rsc, GList **list, int current);
 
 void pe_metadata(void);
 void verify_pe_options(GHashTable * options);
 
 void common_update_score(pe_resource_t * rsc, const char *id, int score);
 void native_add_running(pe_resource_t * rsc, pe_node_t * node, pe_working_set_t * data_set);
 
 gboolean native_unpack(pe_resource_t * rsc, pe_working_set_t * data_set);
 gboolean group_unpack(pe_resource_t * rsc, pe_working_set_t * data_set);
 gboolean clone_unpack(pe_resource_t * rsc, pe_working_set_t * data_set);
 gboolean pe__unpack_bundle(pe_resource_t *rsc, pe_working_set_t *data_set);
 
 pe_resource_t *native_find_rsc(pe_resource_t *rsc, const char *id, const pe_node_t *node,
                                int flags);
 
 gboolean native_active(pe_resource_t * rsc, gboolean all);
 gboolean group_active(pe_resource_t * rsc, gboolean all);
 gboolean clone_active(pe_resource_t * rsc, gboolean all);
 gboolean pe__bundle_active(pe_resource_t *rsc, gboolean all);
 
 void native_print(pe_resource_t * rsc, const char *pre_text, long options, void *print_data);
 void group_print(pe_resource_t * rsc, const char *pre_text, long options, void *print_data);
 void clone_print(pe_resource_t * rsc, const char *pre_text, long options, void *print_data);
 void pe__print_bundle(pe_resource_t *rsc, const char *pre_text, long options,
                       void *print_data);
 
 gchar * pcmk__native_output_string(pe_resource_t *rsc, const char *name, pe_node_t *node,
                                    long options, const char *target_role, bool show_nodes);
 
 int pe__name_and_nvpairs_xml(pcmk__output_t *out, bool is_list, const char *tag_name
                          , size_t pairs_count, ...);
 char *pe__node_display_name(pe_node_t *node, bool print_detail);
 
 static inline const char *
 pe__rsc_bool_str(pe_resource_t *rsc, uint64_t rsc_flag)
 {
     return pcmk__btoa(pcmk_is_set(rsc->flags, rsc_flag));
 }
 
 int pe__clone_xml(pcmk__output_t *out, va_list args);
 int pe__clone_html(pcmk__output_t *out, va_list args);
 int pe__clone_text(pcmk__output_t *out, va_list args);
 int pe__group_xml(pcmk__output_t *out, va_list args);
 int pe__group_html(pcmk__output_t *out, va_list args);
 int pe__group_text(pcmk__output_t *out, va_list args);
 int pe__bundle_xml(pcmk__output_t *out, va_list args);
 int pe__bundle_html(pcmk__output_t *out, va_list args);
 int pe__bundle_text(pcmk__output_t *out, va_list args);
 int pe__node_html(pcmk__output_t *out, va_list args);
 int pe__node_text(pcmk__output_t *out, va_list args);
 int pe__node_xml(pcmk__output_t *out, va_list args);
 int pe__resource_xml(pcmk__output_t *out, va_list args);
 int pe__resource_html(pcmk__output_t *out, va_list args);
 int pe__resource_text(pcmk__output_t *out, va_list args);
 
 /* Exported for crm_mon to reference */
 int pe__ban_text(pcmk__output_t *out, va_list args);
 int pe__cluster_counts_text(pcmk__output_t *out, va_list args);
 int pe__cluster_dc_text(pcmk__output_t *out, va_list args);
 int pe__cluster_maint_mode_text(pcmk__output_t *out, va_list args);
 int pe__cluster_options_text(pcmk__output_t *out, va_list args);
 int pe__cluster_stack_text(pcmk__output_t *out, va_list args);
 int pe__cluster_summary(pcmk__output_t *out, va_list args);
 int pe__cluster_times_text(pcmk__output_t *out, va_list args);
 int pe__failed_action_text(pcmk__output_t *out, va_list args);
 int pe__node_attribute_text(pcmk__output_t *out, va_list args);
 int pe__node_list_text(pcmk__output_t *out, va_list args);
 int pe__op_history_text(pcmk__output_t *out, va_list args);
 int pe__resource_history_text(pcmk__output_t *out, va_list args);
 int pe__ticket_text(pcmk__output_t *out, va_list args);
 
 void native_free(pe_resource_t * rsc);
 void group_free(pe_resource_t * rsc);
 void clone_free(pe_resource_t * rsc);
 void pe__free_bundle(pe_resource_t *rsc);
 
 enum rsc_role_e native_resource_state(const pe_resource_t * rsc, gboolean current);
 enum rsc_role_e group_resource_state(const pe_resource_t * rsc, gboolean current);
 enum rsc_role_e clone_resource_state(const pe_resource_t * rsc, gboolean current);
 enum rsc_role_e pe__bundle_resource_state(const pe_resource_t *rsc,
                                           gboolean current);
 
 void pe__count_common(pe_resource_t *rsc);
 void pe__count_bundle(pe_resource_t *rsc);
 
 gboolean common_unpack(xmlNode * xml_obj, pe_resource_t ** rsc, pe_resource_t * parent,
                        pe_working_set_t * data_set);
 void common_free(pe_resource_t * rsc);
 
 pe_node_t *pe__copy_node(const pe_node_t *this_node);
 extern time_t get_effective_time(pe_working_set_t * data_set);
 
 /* Failure handling utilities (from failcounts.c) */
 
 // bit flags for fail count handling options
 enum pe_fc_flags_e {
     pe_fc_default   = 0x00,
     pe_fc_effective = 0x01, // don't count expired failures
     pe_fc_fillers   = 0x02, // if container, include filler failures in count
 };
 
 int pe_get_failcount(pe_node_t *node, pe_resource_t *rsc, time_t *last_failure,
                      uint32_t flags, xmlNode *xml_op,
                      pe_working_set_t *data_set);
 
 pe_action_t *pe__clear_failcount(pe_resource_t *rsc, pe_node_t *node,
                                  const char *reason,
                                  pe_working_set_t *data_set);
 
 /* Functions for finding/counting a resource's active nodes */
 
 pe_node_t *pe__find_active_on(const pe_resource_t *rsc,
                               unsigned int *count_all,
                               unsigned int *count_clean);
 pe_node_t *pe__find_active_requires(const pe_resource_t *rsc,
                                     unsigned int *count);
 
 static inline pe_node_t *
 pe__current_node(const pe_resource_t *rsc)
 {
     return pe__find_active_on(rsc, NULL, NULL);
 }
 
 
 /* Binary like operators for lists of nodes */
 extern void node_list_exclude(GHashTable * list, GListPtr list2, gboolean merge_scores);
 
 GHashTable *pe__node_list2table(GList *list);
 
 static inline gpointer
 pe_hash_table_lookup(GHashTable * hash, gconstpointer key)
 {
     if (hash) {
         return g_hash_table_lookup(hash, key);
     }
     return NULL;
 }
 
 extern pe_action_t *get_pseudo_op(const char *name, pe_working_set_t * data_set);
 extern gboolean order_actions(pe_action_t * lh_action, pe_action_t * rh_action, enum pe_ordering order);
 
 /* Printing functions for debug */
 extern void print_node(const char *pre_text, pe_node_t * node, gboolean details);
 extern void print_str_str(gpointer key, gpointer value, gpointer user_data);
 extern void pe__output_node(pe_node_t * node, gboolean details, pcmk__output_t *out);
 
 extern void dump_node_capacity(int level, const char *comment, pe_node_t * node);
 extern void dump_rsc_utilization(int level, const char *comment, pe_resource_t * rsc, pe_node_t * node);
 
 void pe__show_node_weights_as(const char *file, const char *function,
                               int line, bool to_log, pe_resource_t *rsc,
                               const char *comment, GHashTable *nodes);
 
 #define pe__show_node_weights(level, rsc, text, nodes)              \
         pe__show_node_weights_as(__FILE__, __func__, __LINE__,      \
                                  (level), (rsc), (text), (nodes))
 
 /* Sorting functions */
 extern gint sort_rsc_priority(gconstpointer a, gconstpointer b);
 extern gint sort_rsc_index(gconstpointer a, gconstpointer b);
 
 extern xmlNode *find_rsc_op_entry(pe_resource_t * rsc, const char *key);
 
 extern pe_action_t *custom_action(pe_resource_t * rsc, char *key, const char *task, pe_node_t * on_node,
                                   gboolean optional, gboolean foo, pe_working_set_t * data_set);
 
 #  define delete_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_DELETE, 0)
 #  define delete_action(rsc, node, optional) custom_action(		\
 		rsc, delete_key(rsc), CRMD_ACTION_DELETE, node,		\
 		optional, TRUE, data_set);
 
 #  define stopped_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_STOPPED, 0)
 #  define stopped_action(rsc, node, optional) custom_action(		\
 		rsc, stopped_key(rsc), CRMD_ACTION_STOPPED, node,	\
 		optional, TRUE, data_set);
 
 #  define stop_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_STOP, 0)
 #  define stop_action(rsc, node, optional) custom_action(			\
 		rsc, stop_key(rsc), CRMD_ACTION_STOP, node,		\
 		optional, TRUE, data_set);
 
 #  define reload_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_RELOAD, 0)
 #  define start_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_START, 0)
 #  define start_action(rsc, node, optional) custom_action(		\
 		rsc, start_key(rsc), CRMD_ACTION_START, node,		\
 		optional, TRUE, data_set)
 
 #  define started_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_STARTED, 0)
 #  define started_action(rsc, node, optional) custom_action(		\
 		rsc, started_key(rsc), CRMD_ACTION_STARTED, node,	\
 		optional, TRUE, data_set)
 
 #  define promote_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_PROMOTE, 0)
 #  define promote_action(rsc, node, optional) custom_action(		\
 		rsc, promote_key(rsc), CRMD_ACTION_PROMOTE, node,	\
 		optional, TRUE, data_set)
 
 #  define promoted_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_PROMOTED, 0)
 #  define promoted_action(rsc, node, optional) custom_action(		\
 		rsc, promoted_key(rsc), CRMD_ACTION_PROMOTED, node,	\
 		optional, TRUE, data_set)
 
 #  define demote_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_DEMOTE, 0)
 #  define demote_action(rsc, node, optional) custom_action(		\
 		rsc, demote_key(rsc), CRMD_ACTION_DEMOTE, node,		\
 		optional, TRUE, data_set)
 
 #  define demoted_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_DEMOTED, 0)
 #  define demoted_action(rsc, node, optional) custom_action(		\
 		rsc, demoted_key(rsc), CRMD_ACTION_DEMOTED, node,	\
 		optional, TRUE, data_set)
 
 extern int pe_get_configured_timeout(pe_resource_t *rsc, const char *action,
                                      pe_working_set_t *data_set);
 
 extern pe_action_t *find_first_action(GListPtr input, const char *uuid, const char *task,
                                       pe_node_t * on_node);
 extern enum action_tasks get_complex_task(pe_resource_t * rsc, const char *name,
                                           gboolean allow_non_atomic);
 
 extern GListPtr find_actions(GListPtr input, const char *key, const pe_node_t *on_node);
 GList *find_actions_exact(GList *input, const char *key,
                           const pe_node_t *on_node);
 extern GListPtr find_recurring_actions(GListPtr input, pe_node_t * not_on_node);
 GList *pe__resource_actions(const pe_resource_t *rsc, const pe_node_t *node,
                             const char *task, bool require_node);
 
 extern void pe_free_action(pe_action_t * action);
 
 extern void resource_location(pe_resource_t * rsc, pe_node_t * node, int score, const char *tag,
                               pe_working_set_t * data_set);
 
 extern gint sort_op_by_callid(gconstpointer a, gconstpointer b);
 extern gboolean get_target_role(pe_resource_t * rsc, enum rsc_role_e *role);
 
 extern pe_resource_t *find_clone_instance(pe_resource_t * rsc, const char *sub_id,
                                           pe_working_set_t * data_set);
 
 extern void destroy_ticket(gpointer data);
 extern pe_ticket_t *ticket_new(const char *ticket_id, pe_working_set_t * data_set);
 
 // Resources for manipulating resource names
 const char *pe_base_name_end(const char *id);
 char *clone_strip(const char *last_rsc_id);
 char *clone_zero(const char *last_rsc_id);
 
 static inline bool
 pe_base_name_eq(pe_resource_t *rsc, const char *id)
 {
     if (id && rsc && rsc->id) {
         // Number of characters in rsc->id before any clone suffix
         size_t base_len = pe_base_name_end(rsc->id) - rsc->id + 1;
 
         return (strlen(id) == base_len) && !strncmp(id, rsc->id, base_len);
     }
     return FALSE;
 }
 
 int pe__target_rc_from_xml(xmlNode *xml_op);
 
 gint sort_node_uname(gconstpointer a, gconstpointer b);
 bool is_set_recursive(pe_resource_t * rsc, long long flag, bool any);
 
 enum rsc_digest_cmp_val {
     /*! Digests are the same */
     RSC_DIGEST_MATCH = 0,
     /*! Params that require a restart changed */
     RSC_DIGEST_RESTART,
     /*! Some parameter changed.  */
     RSC_DIGEST_ALL,
     /*! rsc op didn't have a digest associated with it, so
      *  it is unknown if parameters changed or not. */
     RSC_DIGEST_UNKNOWN,
 };
 
 typedef struct op_digest_cache_s {
     enum rsc_digest_cmp_val rc;
     xmlNode *params_all;
     xmlNode *params_secure;
     xmlNode *params_restart;
     char *digest_all_calc;
     char *digest_secure_calc;
     char *digest_restart_calc;
 } op_digest_cache_t;
 
 op_digest_cache_t *pe__calculate_digests(pe_resource_t *rsc, const char *task,
                                          guint *interval_ms, pe_node_t *node,
                                          xmlNode *xml_op, GHashTable *overrides,
                                          bool calc_secure,
                                          pe_working_set_t *data_set);
 
 void pe__free_digests(gpointer ptr);
 
 op_digest_cache_t *rsc_action_digest_cmp(pe_resource_t * rsc, xmlNode * xml_op, pe_node_t * node,
                                          pe_working_set_t * data_set);
 
 pe_action_t *pe_fence_op(pe_node_t * node, const char *op, bool optional, const char *reason, bool priority_delay, pe_working_set_t * data_set);
 void trigger_unfencing(
     pe_resource_t * rsc, pe_node_t *node, const char *reason, pe_action_t *dependency, pe_working_set_t * data_set);
 
 void pe_action_set_reason(pe_action_t *action, const char *reason, bool overwrite);
 void pe_action_set_flag_reason(const char *function, long line, pe_action_t *action, pe_action_t *reason, const char *text, enum pe_action_flags flags, bool overwrite);
 
 #define pe_action_required(action, reason, text)    \
     pe_action_set_flag_reason(__func__, __LINE__, action, reason, text, \
                               pe_action_optional, FALSE)
 #define pe_action_implies(action, reason, flag)     \
     pe_action_set_flag_reason(__func__, __LINE__, action, reason, NULL, \
                               flag, FALSE)
 
 void pe__set_resource_flags_recursive(pe_resource_t *rsc, uint64_t flags);
 void pe__clear_resource_flags_recursive(pe_resource_t *rsc, uint64_t flags);
 void pe__clear_resource_flags_on_all(pe_working_set_t *data_set, uint64_t flag);
 
 gboolean add_tag_ref(GHashTable * tags, const char * tag_name,  const char * obj_ref);
 
 void print_rscs_brief(GListPtr rsc_list, const char * pre_text, long options,
                       void * print_data, gboolean print_all);
 int pe__rscs_brief_output(pcmk__output_t *out, GListPtr rsc_list, long options, gboolean print_all);
 void pe_fence_node(pe_working_set_t * data_set, pe_node_t * node, const char *reason, bool priority_delay);
 
 pe_node_t *pe_create_node(const char *id, const char *uname, const char *type,
                           const char *score, pe_working_set_t * data_set);
 void common_print(pe_resource_t * rsc, const char *pre_text, const char *name, pe_node_t *node, long options, void *print_data);
 int pe__common_output_text(pcmk__output_t *out, pe_resource_t * rsc, const char *name, pe_node_t *node, long options);
 int pe__common_output_html(pcmk__output_t *out, pe_resource_t * rsc, const char *name, pe_node_t *node, long options);
 pe_resource_t *pe__find_bundle_replica(const pe_resource_t *bundle,
                                        const pe_node_t *node);
-bool pe__bundle_needs_remote_name(pe_resource_t *rsc);
-const char *pe__add_bundle_remote_name(pe_resource_t *rsc, xmlNode *xml,
-                                       const char *field);
+bool pe__bundle_needs_remote_name(pe_resource_t *rsc,
+                                  pe_working_set_t *data_set);
+const char *pe__add_bundle_remote_name(pe_resource_t *rsc,
+                                       pe_working_set_t *data_set,
+                                       xmlNode *xml, const char *field);
 const char *pe_node_attribute_calculated(const pe_node_t *node,
                                          const char *name,
                                          const pe_resource_t *rsc);
 const char *pe_node_attribute_raw(pe_node_t *node, const char *name);
 bool pe__is_universal_clone(pe_resource_t *rsc,
                             pe_working_set_t *data_set);
 void pe__add_param_check(xmlNode *rsc_op, pe_resource_t *rsc, pe_node_t *node,
                          enum pe_check_parameters, pe_working_set_t *data_set);
 void pe__foreach_param_check(pe_working_set_t *data_set,
                              void (*cb)(pe_resource_t*, pe_node_t*, xmlNode*,
                                         enum pe_check_parameters,
                                         pe_working_set_t*));
 void pe__free_param_checks(pe_working_set_t *data_set);
 
 bool pe__shutdown_requested(pe_node_t *node);
 void pe__update_recheck_time(time_t recheck, pe_working_set_t *data_set);
 
 /*!
  * \internal
  * \brief Register xml formatting message functions.
  */
 void pe__register_messages(pcmk__output_t *out);
 
 void pe__unpack_dataset_nvpairs(xmlNode *xml_obj, const char *set_name,
                                 pe_rule_eval_data_t *rule_data, GHashTable *hash,
                                 const char *always_first, gboolean overwrite,
                                 pe_working_set_t *data_set);
 
 bool pe__resource_is_disabled(pe_resource_t *rsc);
 pe_action_t *pe__clear_resource_history(pe_resource_t *rsc, pe_node_t *node,
                                         pe_working_set_t *data_set);
 
 GListPtr pe__rscs_with_tag(pe_working_set_t *data_set, const char *tag_name);
 GListPtr pe__unames_with_tag(pe_working_set_t *data_set, const char *tag_name);
 bool pe__rsc_has_tag(pe_working_set_t *data_set, const char *rsc, const char *tag);
 bool pe__uname_has_tag(pe_working_set_t *data_set, const char *node, const char *tag);
 
 bool pe__rsc_running_on_any_node_in_list(pe_resource_t *rsc, GListPtr node_list);
 GListPtr pe__filter_rsc_list(GListPtr rscs, GListPtr filter);
 
 bool pcmk__rsc_filtered_by_node(pe_resource_t *rsc, GListPtr only_node);
 
 gboolean pe__bundle_is_filtered(pe_resource_t *rsc, GListPtr only_rsc, gboolean check_parent);
 gboolean pe__clone_is_filtered(pe_resource_t *rsc, GListPtr only_rsc, gboolean check_parent);
 gboolean pe__group_is_filtered(pe_resource_t *rsc, GListPtr only_rsc, gboolean check_parent);
 gboolean pe__native_is_filtered(pe_resource_t *rsc, GListPtr only_rsc, gboolean check_parent);
 
 #endif
diff --git a/include/crm/pengine/pe_types.h b/include/crm/pengine/pe_types.h
index 59d5ce8b49..1416cee4dc 100644
--- a/include/crm/pengine/pe_types.h
+++ b/include/crm/pengine/pe_types.h
@@ -1,539 +1,545 @@
 /*
- * Copyright 2004-2020 the Pacemaker project contributors
+ * Copyright 2004-2021 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #ifndef PE_TYPES__H
 #  define PE_TYPES__H
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
 /*!
  * \file
  * \brief Data types for cluster status
  * \ingroup pengine
  */
 
 #  include <stdbool.h>              // bool
 #  include <sys/types.h>            // time_t
 #  include <glib.h>                 // gboolean, guint, GList, GHashTable
 #  include <crm/crm.h>              // GListPtr
 #  include <crm/common/iso8601.h>
 #  include <crm/pengine/common.h>
 
 typedef struct pe_node_s pe_node_t;
 typedef struct pe_action_s pe_action_t;
 typedef struct pe_resource_s pe_resource_t;
 typedef struct pe_working_set_s pe_working_set_t;
 
 enum pe_obj_types {
     pe_unknown = -1,
     pe_native = 0,
     pe_group = 1,
     pe_clone = 2,
     pe_container = 3,
 };
 
 typedef struct resource_object_functions_s {
     gboolean (*unpack) (pe_resource_t*, pe_working_set_t*);
     pe_resource_t *(*find_rsc) (pe_resource_t *parent, const char *search,
                                 const pe_node_t *node, int flags);
     /* parameter result must be free'd */
     char *(*parameter) (pe_resource_t*, pe_node_t*, gboolean, const char*,
                         pe_working_set_t*);
     void (*print) (pe_resource_t*, const char*, long, void*);
     gboolean (*active) (pe_resource_t*, gboolean);
     enum rsc_role_e (*state) (const pe_resource_t*, gboolean);
     pe_node_t *(*location) (const pe_resource_t*, GList**, int);
     void (*free) (pe_resource_t*);
     void (*count) (pe_resource_t*);
     gboolean (*is_filtered) (pe_resource_t*, GListPtr, gboolean);
 } resource_object_functions_t;
 
 typedef struct resource_alloc_functions_s resource_alloc_functions_t;
 
 enum pe_quorum_policy {
     no_quorum_freeze,
     no_quorum_stop,
     no_quorum_ignore,
     no_quorum_suicide,
     no_quorum_demote
 };
 
 enum node_type {
     node_ping,
     node_member,
     node_remote
 };
 
 //! \deprecated will be removed in a future release
 enum pe_restart {
     pe_restart_restart,
     pe_restart_ignore
 };
 
 //! Determine behavior of pe_find_resource_with_flags()
 enum pe_find {
     pe_find_renamed  = 0x001, //!< match resource ID or LRM history ID
     pe_find_anon     = 0x002, //!< match base name of anonymous clone instances
     pe_find_clone    = 0x004, //!< match only clone instances
     pe_find_current  = 0x008, //!< match resource active on specified node
     pe_find_inactive = 0x010, //!< match resource not running anywhere
     pe_find_any      = 0x020, //!< match base name of any clone instance
 };
 
 // @TODO Make these an enum
 
 #  define pe_flag_have_quorum           0x00000001ULL
 #  define pe_flag_symmetric_cluster     0x00000002ULL
 #  define pe_flag_maintenance_mode      0x00000008ULL
 
 #  define pe_flag_stonith_enabled       0x00000010ULL
 #  define pe_flag_have_stonith_resource 0x00000020ULL
 #  define pe_flag_enable_unfencing      0x00000040ULL
 #  define pe_flag_concurrent_fencing    0x00000080ULL
 
 #  define pe_flag_stop_rsc_orphans      0x00000100ULL
 #  define pe_flag_stop_action_orphans   0x00000200ULL
 #  define pe_flag_stop_everything       0x00000400ULL
 
 #  define pe_flag_start_failure_fatal   0x00001000ULL
 #  define pe_flag_remove_after_stop     0x00002000ULL
 #  define pe_flag_startup_fencing       0x00004000ULL
 #  define pe_flag_shutdown_lock         0x00008000ULL
 
 #  define pe_flag_startup_probes        0x00010000ULL
 #  define pe_flag_have_status           0x00020000ULL
 #  define pe_flag_have_remote_nodes     0x00040000ULL
 
 #  define pe_flag_quick_location        0x00100000ULL
 #  define pe_flag_sanitized             0x00200000ULL
 #  define pe_flag_stdout                0x00400000ULL
 
 //! Don't count total, disabled and blocked resource instances
 #  define pe_flag_no_counts             0x00800000ULL
 
 /*! Skip deprecated code that is kept solely for backward API compatibility.
  * (Internal code should always set this.)
  */
 #  define pe_flag_no_compat             0x01000000ULL
 
 struct pe_working_set_s {
     xmlNode *input;
     crm_time_t *now;
 
     /* options extracted from the input */
     char *dc_uuid;
     pe_node_t *dc_node;
     const char *stonith_action;
     const char *placement_strategy;
 
     unsigned long long flags;
 
     int stonith_timeout;
     enum pe_quorum_policy no_quorum_policy;
 
     GHashTable *config_hash;
     GHashTable *tickets;
 
     // Actions for which there can be only one (e.g. fence nodeX)
     GHashTable *singletons;
 
     GListPtr nodes;
     GListPtr resources;
     GListPtr placement_constraints;
     GListPtr ordering_constraints;
     GListPtr colocation_constraints;
     GListPtr ticket_constraints;
 
     GListPtr actions;
     xmlNode *failed;
     xmlNode *op_defaults;
     xmlNode *rsc_defaults;
 
     /* stats */
     int num_synapse;
     int max_valid_nodes;    //! Deprecated (will be removed in a future release)
     int order_id;
     int action_id;
 
     /* final output */
     xmlNode *graph;
 
     GHashTable *template_rsc_sets;
     const char *localhost;
     GHashTable *tags;
 
     int blocked_resources;
     int disabled_resources;
 
     GList *param_check; // History entries that need to be checked
     GList *stop_needed; // Containers that need stop actions
     time_t recheck_by;  // Hint to controller to re-run scheduler by this time
     int ninstances;     // Total number of resource instances
     guint shutdown_lock;// How long (seconds) to lock resources to shutdown node
     int priority_fencing_delay; // Priority fencing delay
 };
 
 enum pe_check_parameters {
     /* Clear fail count if parameters changed for un-expired start or monitor
      * last_failure.
      */
     pe_check_last_failure,
 
     /* Clear fail count if parameters changed for start, monitor, promote, or
      * migrate_from actions for active resources.
      */
     pe_check_active,
 };
 
 struct pe_node_shared_s {
     const char *id;
     const char *uname;
     enum node_type type;
 
     /* @TODO convert these flags into a bitfield */
     gboolean online;
     gboolean standby;
     gboolean standby_onfail;
     gboolean pending;
     gboolean unclean;
     gboolean unseen;
     gboolean shutdown;
     gboolean expected_up;
     gboolean is_dc;
     gboolean maintenance;
     gboolean rsc_discovery_enabled;
     gboolean remote_requires_reset;
     gboolean remote_was_fenced;
     gboolean remote_maintenance; /* what the remote-rsc is thinking */
     gboolean unpacked;
 
     int num_resources;
     pe_resource_t *remote_rsc;
     GListPtr running_rsc;       /* pe_resource_t* */
     GListPtr allocated_rsc;     /* pe_resource_t* */
 
     GHashTable *attrs;          /* char* => char* */
     GHashTable *utilization;
     GHashTable *digest_cache;   //!< cache of calculated resource digests
     int priority; // calculated based on the priority of resources running on the node
 };
 
 struct pe_node_s {
     int weight;
     gboolean fixed;
     int count;
     struct pe_node_shared_s *details;
     int rsc_discover_mode;
 };
 
 #  define pe_rsc_orphan                     0x00000001ULL
 #  define pe_rsc_managed                    0x00000002ULL
 #  define pe_rsc_block                      0x00000004ULL
 #  define pe_rsc_orphan_container_filler    0x00000008ULL
 
 #  define pe_rsc_notify                     0x00000010ULL
 #  define pe_rsc_unique                     0x00000020ULL
 #  define pe_rsc_fence_device               0x00000040ULL
 #  define pe_rsc_promotable                 0x00000080ULL
 
 #  define pe_rsc_provisional                0x00000100ULL
 #  define pe_rsc_allocating                 0x00000200ULL
 #  define pe_rsc_merging                    0x00000400ULL
 
 #  define pe_rsc_stop                       0x00001000ULL
 #  define pe_rsc_reload                     0x00002000ULL
 #  define pe_rsc_allow_remote_remotes       0x00004000ULL
 
 #  define pe_rsc_failed                     0x00010000ULL
 #  define pe_rsc_runnable                   0x00040000ULL
 #  define pe_rsc_start_pending              0x00080000ULL
 
 #  define pe_rsc_starting                   0x00100000ULL
 #  define pe_rsc_stopping                   0x00200000ULL
 #  define pe_rsc_allow_migrate              0x00800000ULL
 
 #  define pe_rsc_failure_ignored            0x01000000ULL
 #  define pe_rsc_maintenance                0x04000000ULL
 #  define pe_rsc_is_container               0x08000000ULL
 
 #  define pe_rsc_needs_quorum               0x10000000ULL
 #  define pe_rsc_needs_fencing              0x20000000ULL
 #  define pe_rsc_needs_unfencing            0x40000000ULL
 
 enum pe_graph_flags {
     pe_graph_none = 0x00000,
     pe_graph_updated_first = 0x00001,
     pe_graph_updated_then = 0x00002,
     pe_graph_disable = 0x00004,
 };
 
 /* *INDENT-OFF* */
 enum pe_action_flags {
     pe_action_pseudo = 0x00001,
     pe_action_runnable = 0x00002,
     pe_action_optional = 0x00004,
     pe_action_print_always = 0x00008,
 
     pe_action_have_node_attrs = 0x00010,
     pe_action_implied_by_stonith = 0x00040,
     pe_action_migrate_runnable =   0x00080,
 
     pe_action_dumped = 0x00100,
     pe_action_processed = 0x00200,
     pe_action_clear = 0x00400,
     pe_action_dangle = 0x00800,
 
     /* This action requires one or more of its dependencies to be runnable.
      * We use this to clear the runnable flag before checking dependencies.
      */
     pe_action_requires_any = 0x01000,
 
     pe_action_reschedule = 0x02000,
     pe_action_tracking = 0x04000,
     pe_action_dedup = 0x08000, //! Internal state tracking when creating graph
 
     pe_action_dc = 0x10000,         //! Action may run on DC instead of target
 };
 /* *INDENT-ON* */
 
 struct pe_resource_s {
     char *id;
     char *clone_name;
     xmlNode *xml;
     xmlNode *orig_xml;
     xmlNode *ops_xml;
 
     pe_working_set_t *cluster;
     pe_resource_t *parent;
 
     enum pe_obj_types variant;
     void *variant_opaque;
     resource_object_functions_t *fns;
     resource_alloc_functions_t *cmds;
 
     enum rsc_recovery_type recovery_type;
 
     // @TODO only pe_restart_restart is of interest, so merge into flags
     enum pe_restart restart_type; //!< \deprecated will be removed in future release
 
     int priority;
     int stickiness;
     int sort_index;
     int failure_timeout;
     int migration_threshold;
     guint remote_reconnect_ms;
     char *pending_task;
 
     unsigned long long flags;
 
     // @TODO merge these into flags
     gboolean is_remote_node;
     gboolean exclusive_discover;
 
     //!@{
     //! This field should be treated as internal to Pacemaker
     GListPtr rsc_cons_lhs;      // List of pcmk__colocation_t*
     GListPtr rsc_cons;          // List of pcmk__colocation_t*
     GListPtr rsc_location;      // List of pe__location_t*
     GListPtr actions;           // List of pe_action_t*
     GListPtr rsc_tickets;       // List of rsc_ticket*
     //!@}
 
     pe_node_t *allocated_to;
     pe_node_t *partial_migration_target;
     pe_node_t *partial_migration_source;
     GListPtr running_on;        /* pe_node_t*   */
     GHashTable *known_on;       /* pe_node_t*   */
     GHashTable *allowed_nodes;  /* pe_node_t*   */
 
     enum rsc_role_e role;
     enum rsc_role_e next_role;
 
     GHashTable *meta;
-    GHashTable *parameters;
+    GHashTable *parameters; //! \deprecated Use pe_rsc_params() instead
     GHashTable *utilization;
 
     GListPtr children;          /* pe_resource_t*   */
     GListPtr dangling_migrations;       /* pe_node_t*       */
 
     pe_resource_t *container;
     GListPtr fillers;
 
     pe_node_t *pending_node;    // Node on which pending_task is happening
     pe_node_t *lock_node;       // Resource is shutdown-locked to this node
     time_t lock_time;           // When shutdown lock started
 
+    /* Resource parameters may have node-attribute-based rules, which means the
+     * values can vary by node. This table is a cache of parameter name/value
+     * tables for each node (as needed). Use pe_rsc_params() to get the table
+     * for a given node.
+     */
+    GHashTable *parameter_cache; // Key = node name, value = parameters table
 #if ENABLE_VERSIONED_ATTRS
     xmlNode *versioned_parameters;
 #endif
 };
 
 #if ENABLE_VERSIONED_ATTRS
 // Used as action->action_details if action->rsc is not NULL
 typedef struct pe_rsc_action_details_s {
     xmlNode *versioned_parameters;
     xmlNode *versioned_meta;
 } pe_rsc_action_details_t;
 #endif
 
 struct pe_action_s {
     int id;
     int priority;
 
     pe_resource_t *rsc;
     pe_node_t *node;
     xmlNode *op_entry;
 
     char *task;
     char *uuid;
     char *cancel_task;
     char *reason;
 
     enum pe_action_flags flags;
     enum rsc_start_requirement needs;
     enum action_fail_response on_fail;
     enum rsc_role_e fail_role;
 
     GHashTable *meta;
     GHashTable *extra;
 
     /* 
      * These two varables are associated with the constraint logic
      * that involves first having one or more actions runnable before
      * then allowing this action to execute.
      *
      * These varables are used with features such as 'clone-min' which
      * requires at minimum X number of cloned instances to be running
      * before an order dependency can run. Another option that uses
      * this is 'require-all=false' in ordering constrants. This option
      * says "only require one instance of a resource to start before
      * allowing dependencies to start" -- basically, require-all=false is
      * the same as clone-min=1.
      */
 
     /* current number of known runnable actions in the before list. */
     int runnable_before;
     /* the number of "before" runnable actions required for this action
      * to be considered runnable */ 
     int required_runnable_before;
 
     GListPtr actions_before;    /* pe_action_wrapper_t* */
     GListPtr actions_after;     /* pe_action_wrapper_t* */
 
     /* Some of the above fields could be moved to the details,
      * except for API backward compatibility.
      */
     void *action_details; // varies by type of action
 };
 
 typedef struct pe_ticket_s {
     char *id;
     gboolean granted;
     time_t last_granted;
     gboolean standby;
     GHashTable *state;
 } pe_ticket_t;
 
 typedef struct pe_tag_s {
     char *id;
     GListPtr refs;
 } pe_tag_t;
 
 //! Internal tracking for transition graph creation
 enum pe_link_state {
     pe_link_not_dumped, //! Internal tracking for transition graph creation
     pe_link_dumped,     //! Internal tracking for transition graph creation
     pe_link_dup,        //! \deprecated No longer used by Pacemaker
 };
 
 enum pe_discover_e {
     pe_discover_always = 0,
     pe_discover_never,
     pe_discover_exclusive,
 };
 
 /* *INDENT-OFF* */
 enum pe_ordering {
     pe_order_none                  = 0x0,       /* deleted */
     pe_order_optional              = 0x1,       /* pure ordering, nothing implied */
     pe_order_apply_first_non_migratable = 0x2,  /* Only apply this constraint's ordering if first is not migratable. */
 
     pe_order_implies_first         = 0x10,      /* If 'then' is required, ensure 'first' is too */
     pe_order_implies_then          = 0x20,      /* If 'first' is required, ensure 'then' is too */
     pe_order_implies_first_master  = 0x40,      /* Imply 'first' is required when 'then' is required and then's rsc holds Master role. */
 
     /* first requires then to be both runnable and migrate runnable. */
     pe_order_implies_first_migratable  = 0x80,
 
     pe_order_runnable_left         = 0x100,     /* 'then' requires 'first' to be runnable */
 
     pe_order_pseudo_left           = 0x200,     /* 'then' can only be pseudo if 'first' is runnable */
     pe_order_implies_then_on_node  = 0x400,     /* If 'first' is required on 'nodeX',
                                                  * ensure instances of 'then' on 'nodeX' are too.
                                                  * Only really useful if 'then' is a clone and 'first' is not
                                                  */
     pe_order_probe                 = 0x800,     /* If 'first->rsc' is
                                                  *  - running but about to stop, ignore the constraint
                                                  *  - otherwise, behave as runnable_left
                                                  */
 
     pe_order_restart               = 0x1000,    /* 'then' is runnable if 'first' is optional or runnable */
     pe_order_stonith_stop          = 0x2000,    /* only applies if the action is non-pseudo */
     pe_order_serialize_only        = 0x4000,    /* serialize */
     pe_order_same_node             = 0x8000,    /* applies only if 'first' and 'then' are on same node */
 
     pe_order_implies_first_printed = 0x10000,   /* Like ..implies_first but only ensures 'first' is printed, not mandatory */
     pe_order_implies_then_printed  = 0x20000,   /* Like ..implies_then but only ensures 'then' is printed, not mandatory */
 
     pe_order_asymmetrical          = 0x100000,  /* Indicates asymmetrical one way ordering constraint. */
     pe_order_load                  = 0x200000,  /* Only relevant if... */
     pe_order_one_or_more           = 0x400000,  /* 'then' is runnable only if one or more of its dependencies are too */
     pe_order_anti_colocation       = 0x800000,
 
     pe_order_preserve              = 0x1000000, /* Hack for breaking user ordering constraints with container resources */
     pe_order_then_cancels_first    = 0x2000000, // if 'then' becomes required, 'first' becomes optional
     pe_order_trace                 = 0x4000000, /* test marker */
 };
 /* *INDENT-ON* */
 
 typedef struct pe_action_wrapper_s {
     enum pe_ordering type;
     enum pe_link_state state;
     pe_action_t *action;
 } pe_action_wrapper_t;
 
 #ifndef PCMK__NO_COMPAT
 /* Everything here is deprecated and kept only for public API backward
  * compatibility. It will be moved to compatibility.h in a future release.
  */
 
 //!< \deprecated Use pe_action_t instead
 typedef struct pe_action_s action_t;
 //!< \deprecated Use pe_action_wrapper_t instead
 typedef struct pe_action_wrapper_s action_wrapper_t;
 //!< \deprecated Use pe_node_t instead
 typedef struct pe_node_s node_t;
 //!< \deprecated Use enum pe_quorum_policy instead
 typedef enum pe_quorum_policy no_quorum_policy_t;
 //!< \deprecated use pe_resource_t instead
 typedef struct pe_resource_s resource_t;
 //!< \deprecated Use pe_tag_t instead
 typedef struct pe_tag_s tag_t;
 //!< \deprecated Use pe_ticket_t instead
 typedef struct pe_ticket_s ticket_t;
 
 #endif
 
 #ifdef __cplusplus
 }
 #endif
 
 #endif // PE_TYPES__H
diff --git a/lib/pacemaker/pcmk_sched_allocate.c b/lib/pacemaker/pcmk_sched_allocate.c
index a0fb5abc6f..22f284af49 100644
--- a/lib/pacemaker/pcmk_sched_allocate.c
+++ b/lib/pacemaker/pcmk_sched_allocate.c
@@ -1,3062 +1,3062 @@
 /*
  * Copyright 2004-2020 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <sys/param.h>
 
 #include <crm/crm.h>
 #include <crm/cib.h>
 #include <crm/msg_xml.h>
 #include <crm/common/xml.h>
 #include <crm/common/xml_internal.h>
 
 #include <glib.h>
 
 #include <crm/pengine/status.h>
 #include <pacemaker-internal.h>
 
 CRM_TRACE_INIT_DATA(pacemaker);
 
 void set_alloc_actions(pe_working_set_t * data_set);
 extern void ReloadRsc(pe_resource_t * rsc, pe_node_t *node, pe_working_set_t * data_set);
 extern gboolean DeleteRsc(pe_resource_t * rsc, pe_node_t * node, gboolean optional, pe_working_set_t * data_set);
 static void apply_remote_node_ordering(pe_working_set_t *data_set);
 static enum remote_connection_state get_remote_node_state(pe_node_t *node);
 
 enum remote_connection_state {
     remote_state_unknown = 0,
     remote_state_alive = 1,
     remote_state_resting = 2,
     remote_state_failed = 3,
     remote_state_stopped = 4
 };
 
 static const char *
 state2text(enum remote_connection_state state)
 {
     switch (state) {
         case remote_state_unknown:
             return "unknown";
         case remote_state_alive:
             return "alive";
         case remote_state_resting:
             return "resting";
         case remote_state_failed:
             return "failed";
         case remote_state_stopped:
             return "stopped";
     }
 
     return "impossible";
 }
 
 resource_alloc_functions_t resource_class_alloc_functions[] = {
     {
      pcmk__native_merge_weights,
      pcmk__native_allocate,
      native_create_actions,
      native_create_probe,
      native_internal_constraints,
      native_rsc_colocation_lh,
      native_rsc_colocation_rh,
      native_rsc_location,
      native_action_flags,
      native_update_actions,
      native_expand,
      native_append_meta,
      },
     {
      pcmk__group_merge_weights,
      pcmk__group_allocate,
      group_create_actions,
      native_create_probe,
      group_internal_constraints,
      group_rsc_colocation_lh,
      group_rsc_colocation_rh,
      group_rsc_location,
      group_action_flags,
      group_update_actions,
      group_expand,
      group_append_meta,
      },
     {
      pcmk__native_merge_weights,
      pcmk__clone_allocate,
      clone_create_actions,
      clone_create_probe,
      clone_internal_constraints,
      clone_rsc_colocation_lh,
      clone_rsc_colocation_rh,
      clone_rsc_location,
      clone_action_flags,
      pcmk__multi_update_actions,
      clone_expand,
      clone_append_meta,
      },
     {
      pcmk__native_merge_weights,
      pcmk__bundle_allocate,
      pcmk__bundle_create_actions,
      pcmk__bundle_create_probe,
      pcmk__bundle_internal_constraints,
      pcmk__bundle_rsc_colocation_lh,
      pcmk__bundle_rsc_colocation_rh,
      pcmk__bundle_rsc_location,
      pcmk__bundle_action_flags,
      pcmk__multi_update_actions,
      pcmk__bundle_expand,
      pcmk__bundle_append_meta,
      }
 };
 
 gboolean
 update_action_flags(pe_action_t * action, enum pe_action_flags flags, const char *source, int line)
 {
     static unsigned long calls = 0;
     gboolean changed = FALSE;
     gboolean clear = pcmk_is_set(flags, pe_action_clear);
     enum pe_action_flags last = action->flags;
 
     if (clear) {
         pe__clear_action_flags_as(source, line, action, flags);
     } else {
         pe__set_action_flags_as(source, line, action, flags);
     }
 
     if (last != action->flags) {
         calls++;
         changed = TRUE;
         /* Useful for tracking down _who_ changed a specific flag */
         /* CRM_ASSERT(calls != 534); */
         pe__clear_raw_action_flags(flags, "action update", pe_action_clear);
         crm_trace("%s on %s: %sset flags 0x%.6x (was 0x%.6x, now 0x%.6x, %lu, %s)",
                   action->uuid, action->node ? action->node->details->uname : "[none]",
                   clear ? "un-" : "", flags, last, action->flags, calls, source);
     }
 
     return changed;
 }
 
 static gboolean
 check_rsc_parameters(pe_resource_t * rsc, pe_node_t * node, xmlNode * rsc_entry,
                      gboolean active_here, pe_working_set_t * data_set)
 {
     int attr_lpc = 0;
     gboolean force_restart = FALSE;
     gboolean delete_resource = FALSE;
     gboolean changed = FALSE;
 
     const char *value = NULL;
     const char *old_value = NULL;
 
     const char *attr_list[] = {
         XML_ATTR_TYPE,
         XML_AGENT_ATTR_CLASS,
         XML_AGENT_ATTR_PROVIDER
     };
 
     for (; attr_lpc < DIMOF(attr_list); attr_lpc++) {
         value = crm_element_value(rsc->xml, attr_list[attr_lpc]);
         old_value = crm_element_value(rsc_entry, attr_list[attr_lpc]);
         if (value == old_value  /* i.e. NULL */
             || pcmk__str_eq(value, old_value, pcmk__str_none)) {
             continue;
         }
 
         changed = TRUE;
         trigger_unfencing(rsc, node, "Device definition changed", NULL, data_set);
         if (active_here) {
             force_restart = TRUE;
             crm_notice("Forcing restart of %s on %s, %s changed: %s -> %s",
                        rsc->id, node->details->uname, attr_list[attr_lpc],
                        crm_str(old_value), crm_str(value));
         }
     }
     if (force_restart) {
         /* make sure the restart happens */
         stop_action(rsc, node, FALSE);
         pe__set_resource_flags(rsc, pe_rsc_start_pending);
         delete_resource = TRUE;
 
     } else if (changed) {
         delete_resource = TRUE;
     }
     return delete_resource;
 }
 
 static void
 CancelXmlOp(pe_resource_t * rsc, xmlNode * xml_op, pe_node_t * active_node,
             const char *reason, pe_working_set_t * data_set)
 {
     guint interval_ms = 0;
     pe_action_t *cancel = NULL;
 
     const char *task = NULL;
     const char *call_id = NULL;
 
     CRM_CHECK(xml_op != NULL, return);
     CRM_CHECK(active_node != NULL, return);
 
     task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
     call_id = crm_element_value(xml_op, XML_LRM_ATTR_CALLID);
     crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms);
 
     crm_info("Action " PCMK__OP_FMT " on %s will be stopped: %s",
              rsc->id, task, interval_ms,
              active_node->details->uname, (reason? reason : "unknown"));
 
     cancel = pe_cancel_op(rsc, task, interval_ms, active_node, data_set);
     add_hash_param(cancel->meta, XML_LRM_ATTR_CALLID, call_id);
     custom_action_order(rsc, stop_key(rsc), NULL, rsc, NULL, cancel, pe_order_optional, data_set);
 }
 
 static gboolean
 check_action_definition(pe_resource_t * rsc, pe_node_t * active_node, xmlNode * xml_op,
                         pe_working_set_t * data_set)
 {
     char *key = NULL;
     guint interval_ms = 0;
     const op_digest_cache_t *digest_data = NULL;
     gboolean did_change = FALSE;
 
     const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
     const char *digest_secure = NULL;
 
     CRM_CHECK(active_node != NULL, return FALSE);
 
     crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms);
     if (interval_ms > 0) {
         xmlNode *op_match = NULL;
 
         /* we need to reconstruct the key because of the way we used to construct resource IDs */
         key = pcmk__op_key(rsc->id, task, interval_ms);
 
         pe_rsc_trace(rsc, "Checking parameters for %s", key);
         op_match = find_rsc_op_entry(rsc, key);
 
         if ((op_match == NULL)
             && pcmk_is_set(data_set->flags, pe_flag_stop_action_orphans)) {
             CancelXmlOp(rsc, xml_op, active_node, "orphan", data_set);
             free(key);
             return TRUE;
 
         } else if (op_match == NULL) {
             pe_rsc_debug(rsc, "Orphan action detected: %s on %s", key, active_node->details->uname);
             free(key);
             return TRUE;
         }
         free(key);
         key = NULL;
     }
 
     crm_trace("Testing " PCMK__OP_FMT " on %s",
               rsc->id, task, interval_ms, active_node->details->uname);
     if ((interval_ms == 0) && pcmk__str_eq(task, RSC_STATUS, pcmk__str_casei)) {
         /* Reload based on the start action not a probe */
         task = RSC_START;
 
     } else if ((interval_ms == 0) && pcmk__str_eq(task, RSC_MIGRATED, pcmk__str_casei)) {
         /* Reload based on the start action not a migrate */
         task = RSC_START;
     } else if ((interval_ms == 0) && pcmk__str_eq(task, RSC_PROMOTE, pcmk__str_casei)) {
         /* Reload based on the start action not a promote */
         task = RSC_START;
     }
 
     digest_data = rsc_action_digest_cmp(rsc, xml_op, active_node, data_set);
 
     if (pcmk_is_set(data_set->flags, pe_flag_sanitized)) {
         digest_secure = crm_element_value(xml_op, XML_LRM_ATTR_SECURE_DIGEST);
     }
 
     if(digest_data->rc != RSC_DIGEST_MATCH
        && digest_secure
        && digest_data->digest_secure_calc
        && strcmp(digest_data->digest_secure_calc, digest_secure) == 0) {
         if (pcmk_is_set(data_set->flags, pe_flag_stdout)) {
             printf("Only 'private' parameters to " PCMK__OP_FMT
                    " on %s changed: %s\n",
                    rsc->id, task, interval_ms, active_node->details->uname,
                    crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC));
         }
 
     } else if (digest_data->rc == RSC_DIGEST_RESTART) {
         /* Changes that force a restart */
         pe_action_t *required = NULL;
 
         did_change = TRUE;
         key = pcmk__op_key(rsc->id, task, interval_ms);
         crm_log_xml_info(digest_data->params_restart, "params:restart");
         required = custom_action(rsc, key, task, NULL, TRUE, TRUE, data_set);
         pe_action_set_flag_reason(__func__, __LINE__, required, NULL,
                                   "resource definition change", pe_action_optional, TRUE);
 
         trigger_unfencing(rsc, active_node, "Device parameters changed", NULL, data_set);
 
     } else if ((digest_data->rc == RSC_DIGEST_ALL) || (digest_data->rc == RSC_DIGEST_UNKNOWN)) {
         /* Changes that can potentially be handled by a reload */
         const char *digest_restart = crm_element_value(xml_op, XML_LRM_ATTR_RESTART_DIGEST);
 
         did_change = TRUE;
         trigger_unfencing(rsc, active_node, "Device parameters changed (reload)", NULL, data_set);
         crm_log_xml_info(digest_data->params_all, "params:reload");
         key = pcmk__op_key(rsc->id, task, interval_ms);
 
         if (interval_ms > 0) {
             pe_action_t *op = NULL;
 
 #if 0
             /* Always reload/restart the entire resource */
             ReloadRsc(rsc, active_node, data_set);
 #else
             /* Re-sending the recurring op is sufficient - the old one will be cancelled automatically */
             op = custom_action(rsc, key, task, active_node, TRUE, TRUE, data_set);
             pe__set_action_flags(op, pe_action_reschedule);
 #endif
 
         } else if (digest_restart) {
             pe_rsc_trace(rsc, "Reloading '%s' action for resource %s", task, rsc->id);
 
             /* Reload this resource */
             ReloadRsc(rsc, active_node, data_set);
             free(key);
 
         } else {
             pe_action_t *required = NULL;
             pe_rsc_trace(rsc, "Resource %s doesn't know how to reload", rsc->id);
 
             /* Re-send the start/demote/promote op
              * Recurring ops will be detected independently
              */
             required = custom_action(rsc, key, task, NULL, TRUE, TRUE, data_set);
             pe_action_set_flag_reason(__func__, __LINE__, required, NULL,
                                       "resource definition change", pe_action_optional, TRUE);
         }
     }
 
     return did_change;
 }
 
 /*!
  * \internal
  * \brief Do deferred action checks after allocation
  *
  * \param[in] data_set  Working set for cluster
  */
 static void
 check_params(pe_resource_t *rsc, pe_node_t *node, xmlNode *rsc_op,
              enum pe_check_parameters check, pe_working_set_t *data_set)
 {
     const char *reason = NULL;
     op_digest_cache_t *digest_data = NULL;
 
     switch (check) {
         case pe_check_active:
             if (check_action_definition(rsc, node, rsc_op, data_set)
                 && pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL,
                                     data_set)) {
 
                 reason = "action definition changed";
             }
             break;
 
         case pe_check_last_failure:
             digest_data = rsc_action_digest_cmp(rsc, rsc_op, node, data_set);
             switch (digest_data->rc) {
                 case RSC_DIGEST_UNKNOWN:
                     crm_trace("Resource %s history entry %s on %s has no digest to compare",
                               rsc->id, ID(rsc_op), node->details->id);
                     break;
                 case RSC_DIGEST_MATCH:
                     break;
                 default:
                     reason = "resource parameters have changed";
                     break;
             }
             break;
     }
 
     if (reason) {
         pe__clear_failcount(rsc, node, reason, data_set);
     }
 }
 
 static void
 check_actions_for(xmlNode * rsc_entry, pe_resource_t * rsc, pe_node_t * node, pe_working_set_t * data_set)
 {
     GListPtr gIter = NULL;
     int offset = -1;
     int stop_index = 0;
     int start_index = 0;
 
     const char *task = NULL;
 
     xmlNode *rsc_op = NULL;
     GListPtr op_list = NULL;
     GListPtr sorted_op_list = NULL;
 
     CRM_CHECK(node != NULL, return);
 
     if (pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
         pe_resource_t *parent = uber_parent(rsc);
         if(parent == NULL
            || pe_rsc_is_clone(parent) == FALSE
            || pcmk_is_set(parent->flags, pe_rsc_unique)) {
             pe_rsc_trace(rsc, "Skipping param check for %s and deleting: orphan", rsc->id);
             DeleteRsc(rsc, node, FALSE, data_set);
         } else {
             pe_rsc_trace(rsc, "Skipping param check for %s (orphan clone)", rsc->id);
         }
         return;
 
     } else if (pe_find_node_id(rsc->running_on, node->details->id) == NULL) {
         if (check_rsc_parameters(rsc, node, rsc_entry, FALSE, data_set)) {
             DeleteRsc(rsc, node, FALSE, data_set);
         }
         pe_rsc_trace(rsc, "Skipping param check for %s: no longer active on %s",
                      rsc->id, node->details->uname);
         return;
     }
 
     pe_rsc_trace(rsc, "Processing %s on %s", rsc->id, node->details->uname);
 
     if (check_rsc_parameters(rsc, node, rsc_entry, TRUE, data_set)) {
         DeleteRsc(rsc, node, FALSE, data_set);
     }
 
     for (rsc_op = pcmk__xe_first_child(rsc_entry); rsc_op != NULL;
          rsc_op = pcmk__xe_next(rsc_op)) {
 
         if (pcmk__str_eq((const char *)rsc_op->name, XML_LRM_TAG_RSC_OP, pcmk__str_none)) {
             op_list = g_list_prepend(op_list, rsc_op);
         }
     }
 
     sorted_op_list = g_list_sort(op_list, sort_op_by_callid);
     calculate_active_ops(sorted_op_list, &start_index, &stop_index);
 
     for (gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) {
         xmlNode *rsc_op = (xmlNode *) gIter->data;
         guint interval_ms = 0;
 
         offset++;
 
         if (start_index < stop_index) {
             /* stopped */
             continue;
         } else if (offset < start_index) {
             /* action occurred prior to a start */
             continue;
         }
 
         task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK);
         crm_element_value_ms(rsc_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms);
 
         if ((interval_ms > 0) &&
             (pcmk_is_set(rsc->flags, pe_rsc_maintenance) || node->details->maintenance)) {
             // Maintenance mode cancels recurring operations
             CancelXmlOp(rsc, rsc_op, node, "maintenance mode", data_set);
 
         } else if ((interval_ms > 0) || pcmk__strcase_any_of(task, RSC_STATUS, RSC_START,
                                                              RSC_PROMOTE, RSC_MIGRATED, NULL)) {
             /* If a resource operation failed, and the operation's definition
              * has changed, clear any fail count so they can be retried fresh.
              */
 
-            if (pe__bundle_needs_remote_name(rsc)) {
+            if (pe__bundle_needs_remote_name(rsc, data_set)) {
                 /* We haven't allocated resources to nodes yet, so if the
                  * REMOTE_CONTAINER_HACK is used, we may calculate the digest
                  * based on the literal "#uname" value rather than the properly
                  * substituted value. That would mistakenly make the action
                  * definition appear to have been changed. Defer the check until
                  * later in this case.
                  */
                 pe__add_param_check(rsc_op, rsc, node, pe_check_active,
                                     data_set);
 
             } else if (check_action_definition(rsc, node, rsc_op, data_set)
                 && pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL,
                                     data_set)) {
                 pe__clear_failcount(rsc, node, "action definition changed",
                                     data_set);
             }
         }
     }
     g_list_free(sorted_op_list);
 }
 
 static GListPtr
 find_rsc_list(GListPtr result, pe_resource_t * rsc, const char *id, gboolean renamed_clones,
               gboolean partial, pe_working_set_t * data_set)
 {
     GListPtr gIter = NULL;
     gboolean match = FALSE;
 
     if (id == NULL) {
         return NULL;
     }
 
     if (rsc == NULL) {
         if (data_set == NULL) {
             return NULL;
         }
         for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
             pe_resource_t *child = (pe_resource_t *) gIter->data;
 
             result = find_rsc_list(result, child, id, renamed_clones, partial,
                                    NULL);
         }
         return result;
     }
 
     if (partial) {
         if (strstr(rsc->id, id)) {
             match = TRUE;
 
         } else if (renamed_clones && rsc->clone_name && strstr(rsc->clone_name, id)) {
             match = TRUE;
         }
 
     } else {
         if (strcmp(rsc->id, id) == 0) {
             match = TRUE;
 
         } else if (renamed_clones && rsc->clone_name && strcmp(rsc->clone_name, id) == 0) {
             match = TRUE;
         }
     }
 
     if (match) {
         result = g_list_prepend(result, rsc);
     }
 
     if (rsc->children) {
         gIter = rsc->children;
         for (; gIter != NULL; gIter = gIter->next) {
             pe_resource_t *child = (pe_resource_t *) gIter->data;
 
             result = find_rsc_list(result, child, id, renamed_clones, partial, NULL);
         }
     }
 
     return result;
 }
 
 static void
 check_actions(pe_working_set_t * data_set)
 {
     const char *id = NULL;
     pe_node_t *node = NULL;
     xmlNode *lrm_rscs = NULL;
     xmlNode *status = get_object_root(XML_CIB_TAG_STATUS, data_set->input);
 
     xmlNode *node_state = NULL;
 
     for (node_state = pcmk__xe_first_child(status); node_state != NULL;
          node_state = pcmk__xe_next(node_state)) {
 
         if (pcmk__str_eq((const char *)node_state->name, XML_CIB_TAG_STATE,
                          pcmk__str_none)) {
             id = crm_element_value(node_state, XML_ATTR_ID);
             lrm_rscs = find_xml_node(node_state, XML_CIB_TAG_LRM, FALSE);
             lrm_rscs = find_xml_node(lrm_rscs, XML_LRM_TAG_RESOURCES, FALSE);
 
             node = pe_find_node_id(data_set->nodes, id);
 
             if (node == NULL) {
                 continue;
 
             /* Still need to check actions for a maintenance node to cancel existing monitor operations */
             } else if (can_run_resources(node) == FALSE && node->details->maintenance == FALSE) {
                 crm_trace("Skipping param check for %s: can't run resources",
                           node->details->uname);
                 continue;
             }
 
             crm_trace("Processing node %s", node->details->uname);
             if (node->details->online
                 || pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
                 xmlNode *rsc_entry = NULL;
 
                 for (rsc_entry = pcmk__xe_first_child(lrm_rscs);
                      rsc_entry != NULL;
                      rsc_entry = pcmk__xe_next(rsc_entry)) {
 
                     if (pcmk__str_eq((const char *)rsc_entry->name, XML_LRM_TAG_RESOURCE, pcmk__str_none)) {
 
                         if (xml_has_children(rsc_entry)) {
                             GListPtr gIter = NULL;
                             GListPtr result = NULL;
                             const char *rsc_id = ID(rsc_entry);
 
                             CRM_CHECK(rsc_id != NULL, return);
 
                             result = find_rsc_list(NULL, NULL, rsc_id, TRUE, FALSE, data_set);
                             for (gIter = result; gIter != NULL; gIter = gIter->next) {
                                 pe_resource_t *rsc = (pe_resource_t *) gIter->data;
 
                                 if (rsc->variant != pe_native) {
                                     continue;
                                 }
                                 check_actions_for(rsc_entry, rsc, node, data_set);
                             }
                             g_list_free(result);
                         }
                     }
                 }
             }
         }
     }
 }
 
 static void
 apply_placement_constraints(pe_working_set_t * data_set)
 {
     for (GList *gIter = data_set->placement_constraints;
          gIter != NULL; gIter = gIter->next) {
         pe__location_t *cons = gIter->data;
 
         cons->rsc_lh->cmds->rsc_location(cons->rsc_lh, cons);
     }
 }
 
 static gboolean
 failcount_clear_action_exists(pe_node_t * node, pe_resource_t * rsc)
 {
     gboolean rc = FALSE;
     GList *list = pe__resource_actions(rsc, node, CRM_OP_CLEAR_FAILCOUNT, TRUE);
 
     if (list) {
         rc = TRUE;
     }
     g_list_free(list);
     return rc;
 }
 
 /*!
  * \internal
  * \brief Force resource away if failures hit migration threshold
  *
  * \param[in,out] rsc       Resource to check for failures
  * \param[in,out] node      Node to check for failures
  * \param[in,out] data_set  Cluster working set to update
  */
 static void
 check_migration_threshold(pe_resource_t *rsc, pe_node_t *node,
                           pe_working_set_t *data_set)
 {
     int fail_count, countdown;
     pe_resource_t *failed;
 
     /* Migration threshold of 0 means never force away */
     if (rsc->migration_threshold == 0) {
         return;
     }
 
     // If we're ignoring failures, also ignore the migration threshold
     if (pcmk_is_set(rsc->flags, pe_rsc_failure_ignored)) {
         return;
     }
 
     /* If there are no failures, there's no need to force away */
     fail_count = pe_get_failcount(node, rsc, NULL,
                                   pe_fc_effective|pe_fc_fillers, NULL,
                                   data_set);
     if (fail_count <= 0) {
         return;
     }
 
     /* How many more times recovery will be tried on this node */
     countdown = QB_MAX(rsc->migration_threshold - fail_count, 0);
 
     /* If failed resource has a parent, we'll force the parent away */
     failed = rsc;
     if (!pcmk_is_set(rsc->flags, pe_rsc_unique)) {
         failed = uber_parent(rsc);
     }
 
     if (countdown == 0) {
         resource_location(failed, node, -INFINITY, "__fail_limit__", data_set);
         crm_warn("Forcing %s away from %s after %d failures (max=%d)",
                  failed->id, node->details->uname, fail_count,
                  rsc->migration_threshold);
     } else {
         crm_info("%s can fail %d more times on %s before being forced off",
                  failed->id, countdown, node->details->uname);
     }
 }
 
 static void
 common_apply_stickiness(pe_resource_t * rsc, pe_node_t * node, pe_working_set_t * data_set)
 {
     if (rsc->children) {
         GListPtr gIter = rsc->children;
 
         for (; gIter != NULL; gIter = gIter->next) {
             pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
             common_apply_stickiness(child_rsc, node, data_set);
         }
         return;
     }
 
     if (pcmk_is_set(rsc->flags, pe_rsc_managed)
         && rsc->stickiness != 0 && pcmk__list_of_1(rsc->running_on)) {
         pe_node_t *current = pe_find_node_id(rsc->running_on, node->details->id);
         pe_node_t *match = pe_hash_table_lookup(rsc->allowed_nodes, node->details->id);
 
         if (current == NULL) {
 
         } else if ((match != NULL)
                    || pcmk_is_set(data_set->flags, pe_flag_symmetric_cluster)) {
             pe_resource_t *sticky_rsc = rsc;
 
             resource_location(sticky_rsc, node, rsc->stickiness, "stickiness", data_set);
             pe_rsc_debug(sticky_rsc, "Resource %s: preferring current location"
                          " (node=%s, weight=%d)", sticky_rsc->id,
                          node->details->uname, rsc->stickiness);
         } else {
             GHashTableIter iter;
             pe_node_t *nIter = NULL;
 
             pe_rsc_debug(rsc, "Ignoring stickiness for %s: the cluster is asymmetric"
                          " and node %s is not explicitly allowed", rsc->id, node->details->uname);
             g_hash_table_iter_init(&iter, rsc->allowed_nodes);
             while (g_hash_table_iter_next(&iter, NULL, (void **)&nIter)) {
                 crm_err("%s[%s] = %d", rsc->id, nIter->details->uname, nIter->weight);
             }
         }
     }
 
     /* Check the migration threshold only if a failcount clear action
      * has not already been placed for this resource on the node.
      * There is no sense in potentially forcing the resource from this
      * node if the failcount is being reset anyway.
      *
      * @TODO A clear_failcount operation can be scheduled in stage4() via
      * check_actions_for(), or in stage5() via check_params(). This runs in
      * stage2(), so it cannot detect those, meaning we might check the migration
      * threshold when we shouldn't -- worst case, we stop or move the resource,
      * then move it back next transition.
      */
     if (failcount_clear_action_exists(node, rsc) == FALSE) {
         check_migration_threshold(rsc, node, data_set);
     }
 }
 
 void
 complex_set_cmds(pe_resource_t * rsc)
 {
     GListPtr gIter = rsc->children;
 
     rsc->cmds = &resource_class_alloc_functions[rsc->variant];
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         complex_set_cmds(child_rsc);
     }
 }
 
 void
 set_alloc_actions(pe_working_set_t * data_set)
 {
 
     GListPtr gIter = data_set->resources;
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *rsc = (pe_resource_t *) gIter->data;
 
         complex_set_cmds(rsc);
     }
 }
 
 static void
 calculate_system_health(gpointer gKey, gpointer gValue, gpointer user_data)
 {
     const char *key = (const char *)gKey;
     const char *value = (const char *)gValue;
     int *system_health = (int *)user_data;
 
     if (!gKey || !gValue || !user_data) {
         return;
     }
 
     if (pcmk__starts_with(key, "#health")) {
         int score;
 
         /* Convert the value into an integer */
         score = char2score(value);
 
         /* Add it to the running total */
         *system_health = pe__add_scores(score, *system_health);
     }
 }
 
 static gboolean
 apply_system_health(pe_working_set_t * data_set)
 {
     GListPtr gIter = NULL;
     const char *health_strategy = pe_pref(data_set->config_hash, "node-health-strategy");
     int base_health = 0;
 
     if (pcmk__str_eq(health_strategy, "none", pcmk__str_null_matches | pcmk__str_casei)) {
         /* Prevent any accidental health -> score translation */
         pcmk__score_red = 0;
         pcmk__score_yellow = 0;
         pcmk__score_green = 0;
         return TRUE;
 
     } else if (pcmk__str_eq(health_strategy, "migrate-on-red", pcmk__str_casei)) {
 
         /* Resources on nodes which have health values of red are
          * weighted away from that node.
          */
         pcmk__score_red = -INFINITY;
         pcmk__score_yellow = 0;
         pcmk__score_green = 0;
 
     } else if (pcmk__str_eq(health_strategy, "only-green", pcmk__str_casei)) {
 
         /* Resources on nodes which have health values of red or yellow
          * are forced away from that node.
          */
         pcmk__score_red = -INFINITY;
         pcmk__score_yellow = -INFINITY;
         pcmk__score_green = 0;
 
     } else if (pcmk__str_eq(health_strategy, "progressive", pcmk__str_casei)) {
         /* Same as the above, but use the r/y/g scores provided by the user
          * Defaults are provided by the pe_prefs table
          * Also, custom health "base score" can be used
          */
         base_health = crm_parse_int(pe_pref(data_set->config_hash, "node-health-base"), "0");
 
     } else if (pcmk__str_eq(health_strategy, "custom", pcmk__str_casei)) {
 
         /* Requires the admin to configure the rsc_location constaints for
          * processing the stored health scores
          */
         /* TODO: Check for the existence of appropriate node health constraints */
         return TRUE;
 
     } else {
         crm_err("Unknown node health strategy: %s", health_strategy);
         return FALSE;
     }
 
     crm_info("Applying automated node health strategy: %s", health_strategy);
 
     for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
         int system_health = base_health;
         pe_node_t *node = (pe_node_t *) gIter->data;
 
         /* Search through the node hash table for system health entries. */
         g_hash_table_foreach(node->details->attrs, calculate_system_health, &system_health);
 
         crm_info(" Node %s has an combined system health of %d",
                  node->details->uname, system_health);
 
         /* If the health is non-zero, then create a new rsc2node so that the
          * weight will be added later on.
          */
         if (system_health != 0) {
 
             GListPtr gIter2 = data_set->resources;
 
             for (; gIter2 != NULL; gIter2 = gIter2->next) {
                 pe_resource_t *rsc = (pe_resource_t *) gIter2->data;
 
                 rsc2node_new(health_strategy, rsc, system_health, NULL, node, data_set);
             }
         }
     }
 
     return TRUE;
 }
 
 gboolean
 stage0(pe_working_set_t * data_set)
 {
     xmlNode *cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, data_set->input);
 
     if (data_set->input == NULL) {
         return FALSE;
     }
 
     if (!pcmk_is_set(data_set->flags, pe_flag_have_status)) {
         crm_trace("Calculating status");
         cluster_status(data_set);
     }
 
     set_alloc_actions(data_set);
     apply_system_health(data_set);
     unpack_constraints(cib_constraints, data_set);
 
     return TRUE;
 }
 
 /*
  * Check nodes for resources started outside of the LRM
  */
 gboolean
 probe_resources(pe_working_set_t * data_set)
 {
     pe_action_t *probe_node_complete = NULL;
 
     for (GListPtr gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
         pe_node_t *node = (pe_node_t *) gIter->data;
         const char *probed = pe_node_attribute_raw(node, CRM_OP_PROBED);
 
         if (node->details->online == FALSE) {
 
             if (pe__is_remote_node(node) && node->details->remote_rsc
                 && (get_remote_node_state(node) == remote_state_failed)) {
 
                 pe_fence_node(data_set, node, "the connection is unrecoverable", FALSE);
             }
             continue;
 
         } else if (node->details->unclean) {
             continue;
 
         } else if (node->details->rsc_discovery_enabled == FALSE) {
             /* resource discovery is disabled for this node */
             continue;
         }
 
         if (probed != NULL && crm_is_true(probed) == FALSE) {
             pe_action_t *probe_op = custom_action(NULL, crm_strdup_printf("%s-%s", CRM_OP_REPROBE, node->details->uname),
                                                   CRM_OP_REPROBE, node, FALSE, TRUE, data_set);
 
             add_hash_param(probe_op->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE);
             continue;
         }
 
         for (GListPtr gIter2 = data_set->resources; gIter2 != NULL; gIter2 = gIter2->next) {
             pe_resource_t *rsc = (pe_resource_t *) gIter2->data;
 
             rsc->cmds->create_probe(rsc, node, probe_node_complete, FALSE, data_set);
         }
     }
     return TRUE;
 }
 
 static void
 rsc_discover_filter(pe_resource_t *rsc, pe_node_t *node)
 {
     GListPtr gIter = rsc->children;
     pe_resource_t *top = uber_parent(rsc);
     pe_node_t *match;
 
     if (rsc->exclusive_discover == FALSE && top->exclusive_discover == FALSE) {
         return;
     }
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
         rsc_discover_filter(child_rsc, node);
     }
 
     match = g_hash_table_lookup(rsc->allowed_nodes, node->details->id);
     if (match && match->rsc_discover_mode != pe_discover_exclusive) {
         match->weight = -INFINITY;
     }
 }
 
 static time_t
 shutdown_time(pe_node_t *node, pe_working_set_t *data_set)
 {
     const char *shutdown = pe_node_attribute_raw(node, XML_CIB_ATTR_SHUTDOWN);
     time_t result = 0;
 
     if (shutdown) {
         errno = 0;
         result = (time_t) crm_parse_ll(shutdown, NULL);
         if (errno != 0) {
             result = 0;
         }
     }
     return result? result : get_effective_time(data_set);
 }
 
 static void
 apply_shutdown_lock(pe_resource_t *rsc, pe_working_set_t *data_set)
 {
     const char *class;
 
     // Only primitives and (uncloned) groups may be locked
     if (rsc->variant == pe_group) {
         for (GList *item = rsc->children; item != NULL;
              item = item->next) {
             apply_shutdown_lock((pe_resource_t *) item->data, data_set);
         }
     } else if (rsc->variant != pe_native) {
         return;
     }
 
     // Fence devices and remote connections can't be locked
     class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
     if (pcmk__str_eq(class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_null_matches)
         || pe__resource_is_remote_conn(rsc, data_set)) {
         return;
     }
 
     if (rsc->lock_node != NULL) {
         // The lock was obtained from resource history
 
         if (rsc->running_on != NULL) {
             /* The resource was started elsewhere even though it is now
              * considered locked. This shouldn't be possible, but as a
              * failsafe, we don't want to disturb the resource now.
              */
             pe_rsc_info(rsc,
                         "Cancelling shutdown lock because %s is already active",
                         rsc->id);
             pe__clear_resource_history(rsc, rsc->lock_node, data_set);
             rsc->lock_node = NULL;
             rsc->lock_time = 0;
         }
 
     // Only a resource active on exactly one node can be locked
     } else if (pcmk__list_of_1(rsc->running_on)) {
         pe_node_t *node = rsc->running_on->data;
 
         if (node->details->shutdown) {
             if (node->details->unclean) {
                 pe_rsc_debug(rsc, "Not locking %s to unclean %s for shutdown",
                              rsc->id, node->details->uname);
             } else {
                 rsc->lock_node = node;
                 rsc->lock_time = shutdown_time(node, data_set);
             }
         }
     }
 
     if (rsc->lock_node == NULL) {
         // No lock needed
         return;
     }
 
     if (data_set->shutdown_lock > 0) {
         time_t lock_expiration = rsc->lock_time + data_set->shutdown_lock;
 
         pe_rsc_info(rsc, "Locking %s to %s due to shutdown (expires @%lld)",
                     rsc->id, rsc->lock_node->details->uname,
                     (long long) lock_expiration);
         pe__update_recheck_time(++lock_expiration, data_set);
     } else {
         pe_rsc_info(rsc, "Locking %s to %s due to shutdown",
                     rsc->id, rsc->lock_node->details->uname);
     }
 
     // If resource is locked to one node, ban it from all other nodes
     for (GList *item = data_set->nodes; item != NULL; item = item->next) {
         pe_node_t *node = item->data;
 
         if (strcmp(node->details->uname, rsc->lock_node->details->uname)) {
             resource_location(rsc, node, -CRM_SCORE_INFINITY,
                               XML_CONFIG_ATTR_SHUTDOWN_LOCK, data_set);
         }
     }
 }
 
 /*
  * \internal
  * \brief Stage 2 of cluster status: apply node-specific criteria
  *
  * Count known nodes, and apply location constraints, stickiness, and exclusive
  * resource discovery.
  */
 gboolean
 stage2(pe_working_set_t * data_set)
 {
     GListPtr gIter = NULL;
 
     if (pcmk_is_set(data_set->flags, pe_flag_shutdown_lock)) {
         for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
             apply_shutdown_lock((pe_resource_t *) gIter->data, data_set);
         }
     }
 
     if (!pcmk_is_set(data_set->flags, pe_flag_no_compat)) {
         // @COMPAT API backward compatibility
         for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
             pe_node_t *node = (pe_node_t *) gIter->data;
 
             if (node && (node->weight >= 0) && node->details->online
                 && (node->details->type != node_ping)) {
                 data_set->max_valid_nodes++;
             }
         }
     }
 
     apply_placement_constraints(data_set);
 
     gIter = data_set->nodes;
     for (; gIter != NULL; gIter = gIter->next) {
         GListPtr gIter2 = NULL;
         pe_node_t *node = (pe_node_t *) gIter->data;
 
         gIter2 = data_set->resources;
         for (; gIter2 != NULL; gIter2 = gIter2->next) {
             pe_resource_t *rsc = (pe_resource_t *) gIter2->data;
 
             common_apply_stickiness(rsc, node, data_set);
             rsc_discover_filter(rsc, node);
         }
     }
 
     return TRUE;
 }
 
 /*
  * Create internal resource constraints before allocation
  */
 gboolean
 stage3(pe_working_set_t * data_set)
 {
 
     GListPtr gIter = data_set->resources;
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *rsc = (pe_resource_t *) gIter->data;
 
         rsc->cmds->internal_constraints(rsc, data_set);
     }
 
     return TRUE;
 }
 
 /*
  * Check for orphaned or redefined actions
  */
 gboolean
 stage4(pe_working_set_t * data_set)
 {
     check_actions(data_set);
     return TRUE;
 }
 
 static void *
 convert_const_pointer(const void *ptr)
 {
     /* Worst function ever */
     return (void *)ptr;
 }
 
 static gint
 sort_rsc_process_order(gconstpointer a, gconstpointer b, gpointer data)
 {
     int rc = 0;
     int r1_weight = -INFINITY;
     int r2_weight = -INFINITY;
 
     const char *reason = "existence";
 
     const GListPtr nodes = (GListPtr) data;
     const pe_resource_t *resource1 = a;
     const pe_resource_t *resource2 = b;
 
     pe_node_t *r1_node = NULL;
     pe_node_t *r2_node = NULL;
     GListPtr gIter = NULL;
     GHashTable *r1_nodes = NULL;
     GHashTable *r2_nodes = NULL;
 
     reason = "priority";
     r1_weight = resource1->priority;
     r2_weight = resource2->priority;
 
     if (r1_weight > r2_weight) {
         rc = -1;
         goto done;
     }
 
     if (r1_weight < r2_weight) {
         rc = 1;
         goto done;
     }
 
     reason = "no node list";
     if (nodes == NULL) {
         goto done;
     }
 
     r1_nodes = pcmk__native_merge_weights(convert_const_pointer(resource1),
                                           resource1->id, NULL, NULL, 1,
                                           pe_weights_forward | pe_weights_init);
     pe__show_node_weights(true, NULL, resource1->id, r1_nodes);
 
     r2_nodes = pcmk__native_merge_weights(convert_const_pointer(resource2),
                                           resource2->id, NULL, NULL, 1,
                                           pe_weights_forward | pe_weights_init);
     pe__show_node_weights(true, NULL, resource2->id, r2_nodes);
 
     /* Current location score */
     reason = "current location";
     r1_weight = -INFINITY;
     r2_weight = -INFINITY;
 
     if (resource1->running_on) {
         r1_node = pe__current_node(resource1);
         r1_node = g_hash_table_lookup(r1_nodes, r1_node->details->id);
         if (r1_node != NULL) {
             r1_weight = r1_node->weight;
         }
     }
     if (resource2->running_on) {
         r2_node = pe__current_node(resource2);
         r2_node = g_hash_table_lookup(r2_nodes, r2_node->details->id);
         if (r2_node != NULL) {
             r2_weight = r2_node->weight;
         }
     }
 
     if (r1_weight > r2_weight) {
         rc = -1;
         goto done;
     }
 
     if (r1_weight < r2_weight) {
         rc = 1;
         goto done;
     }
 
     reason = "score";
     for (gIter = nodes; gIter != NULL; gIter = gIter->next) {
         pe_node_t *node = (pe_node_t *) gIter->data;
 
         r1_node = NULL;
         r2_node = NULL;
 
         r1_weight = -INFINITY;
         if (r1_nodes) {
             r1_node = g_hash_table_lookup(r1_nodes, node->details->id);
         }
         if (r1_node) {
             r1_weight = r1_node->weight;
         }
 
         r2_weight = -INFINITY;
         if (r2_nodes) {
             r2_node = g_hash_table_lookup(r2_nodes, node->details->id);
         }
         if (r2_node) {
             r2_weight = r2_node->weight;
         }
 
         if (r1_weight > r2_weight) {
             rc = -1;
             goto done;
         }
 
         if (r1_weight < r2_weight) {
             rc = 1;
             goto done;
         }
     }
 
   done:
     crm_trace("%s (%d) on %s %c %s (%d) on %s: %s",
               resource1->id, r1_weight, r1_node ? r1_node->details->id : "n/a",
               rc < 0 ? '>' : rc > 0 ? '<' : '=',
               resource2->id, r2_weight, r2_node ? r2_node->details->id : "n/a", reason);
 
     if (r1_nodes) {
         g_hash_table_destroy(r1_nodes);
     }
     if (r2_nodes) {
         g_hash_table_destroy(r2_nodes);
     }
 
     return rc;
 }
 
 static void
 allocate_resources(pe_working_set_t * data_set)
 {
     GListPtr gIter = NULL;
 
     if (pcmk_is_set(data_set->flags, pe_flag_have_remote_nodes)) {
         /* Allocate remote connection resources first (which will also allocate
          * any colocation dependencies). If the connection is migrating, always
          * prefer the partial migration target.
          */
         for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
             pe_resource_t *rsc = (pe_resource_t *) gIter->data;
             if (rsc->is_remote_node == FALSE) {
                 continue;
             }
             pe_rsc_trace(rsc, "Allocating remote connection resource '%s'",
                          rsc->id);
             rsc->cmds->allocate(rsc, rsc->partial_migration_target, data_set);
         }
     }
 
     /* now do the rest of the resources */
     for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *rsc = (pe_resource_t *) gIter->data;
         if (rsc->is_remote_node == TRUE) {
             continue;
         }
         pe_rsc_trace(rsc, "Allocating %s resource '%s'",
                      crm_element_name(rsc->xml), rsc->id);
         rsc->cmds->allocate(rsc, NULL, data_set);
     }
 }
 
 /* We always use pe_order_preserve with these convenience functions to exempt
  * internally generated constraints from the prohibition of user constraints
  * involving remote connection resources.
  *
  * The start ordering additionally uses pe_order_runnable_left so that the
  * specified action is not runnable if the start is not runnable.
  */
 
 static inline void
 order_start_then_action(pe_resource_t *lh_rsc, pe_action_t *rh_action,
                         enum pe_ordering extra, pe_working_set_t *data_set)
 {
     if (lh_rsc && rh_action && data_set) {
         custom_action_order(lh_rsc, start_key(lh_rsc), NULL,
                             rh_action->rsc, NULL, rh_action,
                             pe_order_preserve | pe_order_runnable_left | extra,
                             data_set);
     }
 }
 
 static inline void
 order_action_then_stop(pe_action_t *lh_action, pe_resource_t *rh_rsc,
                        enum pe_ordering extra, pe_working_set_t *data_set)
 {
     if (lh_action && rh_rsc && data_set) {
         custom_action_order(lh_action->rsc, NULL, lh_action,
                             rh_rsc, stop_key(rh_rsc), NULL,
                             pe_order_preserve | extra, data_set);
     }
 }
 
 // Clear fail counts for orphaned rsc on all online nodes
 static void
 cleanup_orphans(pe_resource_t * rsc, pe_working_set_t * data_set)
 {
     GListPtr gIter = NULL;
 
     for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
         pe_node_t *node = (pe_node_t *) gIter->data;
 
         if (node->details->online
             && pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL,
                                 data_set)) {
 
             pe_action_t *clear_op = NULL;
 
             clear_op = pe__clear_failcount(rsc, node, "it is orphaned",
                                            data_set);
 
             /* We can't use order_action_then_stop() here because its
              * pe_order_preserve breaks things
              */
             custom_action_order(clear_op->rsc, NULL, clear_op,
                                 rsc, stop_key(rsc), NULL,
                                 pe_order_optional, data_set);
         }
     }
 }
 
 gboolean
 stage5(pe_working_set_t * data_set)
 {
     GListPtr gIter = NULL;
     int log_prio = show_utilization? LOG_STDOUT : LOG_TRACE;
 
     if (!pcmk__str_eq(data_set->placement_strategy, "default", pcmk__str_casei)) {
         GListPtr nodes = g_list_copy(data_set->nodes);
 
         nodes = sort_nodes_by_weight(nodes, NULL, data_set);
         data_set->resources =
             g_list_sort_with_data(data_set->resources, sort_rsc_process_order, nodes);
 
         g_list_free(nodes);
     }
 
     gIter = data_set->nodes;
     for (; gIter != NULL; gIter = gIter->next) {
         pe_node_t *node = (pe_node_t *) gIter->data;
 
         dump_node_capacity(log_prio, "Original", node);
     }
 
     crm_trace("Allocating services");
     /* Take (next) highest resource, assign it and create its actions */
 
     allocate_resources(data_set);
 
     gIter = data_set->nodes;
     for (; gIter != NULL; gIter = gIter->next) {
         pe_node_t *node = (pe_node_t *) gIter->data;
 
         dump_node_capacity(log_prio, "Remaining", node);
     }
 
     // Process deferred action checks
     pe__foreach_param_check(data_set, check_params);
     pe__free_param_checks(data_set);
 
     if (pcmk_is_set(data_set->flags, pe_flag_startup_probes)) {
         crm_trace("Calculating needed probes");
         /* This code probably needs optimization
          * ptest -x with 100 nodes, 100 clones and clone-max=100:
 
          With probes:
 
          ptest[14781]: 2010/09/27_17:56:46 notice: TRACE: do_calculations: pengine.c:258 Calculate cluster status
          ptest[14781]: 2010/09/27_17:56:46 notice: TRACE: do_calculations: pengine.c:278 Applying placement constraints
          ptest[14781]: 2010/09/27_17:56:47 notice: TRACE: do_calculations: pengine.c:285 Create internal constraints
          ptest[14781]: 2010/09/27_17:56:47 notice: TRACE: do_calculations: pengine.c:292 Check actions
          ptest[14781]: 2010/09/27_17:56:48 notice: TRACE: do_calculations: pengine.c:299 Allocate resources
          ptest[14781]: 2010/09/27_17:56:48 notice: TRACE: stage5: allocate.c:881 Allocating services
          ptest[14781]: 2010/09/27_17:56:49 notice: TRACE: stage5: allocate.c:894 Calculating needed probes
          ptest[14781]: 2010/09/27_17:56:51 notice: TRACE: stage5: allocate.c:899 Creating actions
          ptest[14781]: 2010/09/27_17:56:52 notice: TRACE: stage5: allocate.c:905 Creating done
          ptest[14781]: 2010/09/27_17:56:52 notice: TRACE: do_calculations: pengine.c:306 Processing fencing and shutdown cases
          ptest[14781]: 2010/09/27_17:56:52 notice: TRACE: do_calculations: pengine.c:313 Applying ordering constraints
          36s
          ptest[14781]: 2010/09/27_17:57:28 notice: TRACE: do_calculations: pengine.c:320 Create transition graph
 
          Without probes:
 
          ptest[14637]: 2010/09/27_17:56:21 notice: TRACE: do_calculations: pengine.c:258 Calculate cluster status
          ptest[14637]: 2010/09/27_17:56:22 notice: TRACE: do_calculations: pengine.c:278 Applying placement constraints
          ptest[14637]: 2010/09/27_17:56:22 notice: TRACE: do_calculations: pengine.c:285 Create internal constraints
          ptest[14637]: 2010/09/27_17:56:22 notice: TRACE: do_calculations: pengine.c:292 Check actions
          ptest[14637]: 2010/09/27_17:56:23 notice: TRACE: do_calculations: pengine.c:299 Allocate resources
          ptest[14637]: 2010/09/27_17:56:23 notice: TRACE: stage5: allocate.c:881 Allocating services
          ptest[14637]: 2010/09/27_17:56:24 notice: TRACE: stage5: allocate.c:899 Creating actions
          ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: stage5: allocate.c:905 Creating done
          ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: do_calculations: pengine.c:306 Processing fencing and shutdown cases
          ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: do_calculations: pengine.c:313 Applying ordering constraints
          ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: do_calculations: pengine.c:320 Create transition graph
         */
 
         probe_resources(data_set);
     }
 
     crm_trace("Handle orphans");
     if (pcmk_is_set(data_set->flags, pe_flag_stop_rsc_orphans)) {
         for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
             pe_resource_t *rsc = (pe_resource_t *) gIter->data;
 
             /* There's no need to recurse into rsc->children because those
              * should just be unallocated clone instances.
              */
             if (pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
                 cleanup_orphans(rsc, data_set);
             }
         }
     }
 
     crm_trace("Creating actions");
 
     for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *rsc = (pe_resource_t *) gIter->data;
 
         rsc->cmds->create_actions(rsc, data_set);
     }
 
     crm_trace("Creating done");
     return TRUE;
 }
 
 static gboolean
 is_managed(const pe_resource_t * rsc)
 {
     GListPtr gIter = rsc->children;
 
     if (pcmk_is_set(rsc->flags, pe_rsc_managed)) {
         return TRUE;
     }
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         if (is_managed(child_rsc)) {
             return TRUE;
         }
     }
 
     return FALSE;
 }
 
 static gboolean
 any_managed_resources(pe_working_set_t * data_set)
 {
 
     GListPtr gIter = data_set->resources;
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *rsc = (pe_resource_t *) gIter->data;
 
         if (is_managed(rsc)) {
             return TRUE;
         }
     }
     return FALSE;
 }
 
 /*!
  * \internal
  * \brief Create pseudo-op for guest node fence, and order relative to it
  *
  * \param[in] node      Guest node to fence
  * \param[in] data_set  Working set of CIB state
  */
 static void
 fence_guest(pe_node_t *node, pe_working_set_t *data_set)
 {
     pe_resource_t *container = node->details->remote_rsc->container;
     pe_action_t *stop = NULL;
     pe_action_t *stonith_op = NULL;
 
     /* The fence action is just a label; we don't do anything differently for
      * off vs. reboot. We specify it explicitly, rather than let it default to
      * cluster's default action, because we are not _initiating_ fencing -- we
      * are creating a pseudo-event to describe fencing that is already occurring
      * by other means (container recovery).
      */
     const char *fence_action = "off";
 
     /* Check whether guest's container resource has any explicit stop or
      * start (the stop may be implied by fencing of the guest's host).
      */
     if (container) {
         stop = find_first_action(container->actions, NULL, CRMD_ACTION_STOP, NULL);
 
         if (find_first_action(container->actions, NULL, CRMD_ACTION_START, NULL)) {
             fence_action = "reboot";
         }
     }
 
     /* Create a fence pseudo-event, so we have an event to order actions
      * against, and the controller can always detect it.
      */
     stonith_op = pe_fence_op(node, fence_action, FALSE, "guest is unclean", FALSE, data_set);
     update_action_flags(stonith_op, pe_action_pseudo | pe_action_runnable,
                         __func__, __LINE__);
 
     /* We want to imply stops/demotes after the guest is stopped, not wait until
      * it is restarted, so we always order pseudo-fencing after stop, not start
      * (even though start might be closer to what is done for a real reboot).
      */
     if ((stop != NULL) && pcmk_is_set(stop->flags, pe_action_pseudo)) {
         pe_action_t *parent_stonith_op = pe_fence_op(stop->node, NULL, FALSE, NULL, FALSE, data_set);
         crm_info("Implying guest node %s is down (action %d) after %s fencing",
                  node->details->uname, stonith_op->id, stop->node->details->uname);
         order_actions(parent_stonith_op, stonith_op,
                       pe_order_runnable_left|pe_order_implies_then);
 
     } else if (stop) {
         order_actions(stop, stonith_op,
                       pe_order_runnable_left|pe_order_implies_then);
         crm_info("Implying guest node %s is down (action %d) "
                  "after container %s is stopped (action %d)",
                  node->details->uname, stonith_op->id,
                  container->id, stop->id);
     } else {
         /* If we're fencing the guest node but there's no stop for the guest
          * resource, we must think the guest is already stopped. However, we may
          * think so because its resource history was just cleaned. To avoid
          * unnecessarily considering the guest node down if it's really up,
          * order the pseudo-fencing after any stop of the connection resource,
          * which will be ordered after any container (re-)probe.
          */
         stop = find_first_action(node->details->remote_rsc->actions, NULL,
                                  RSC_STOP, NULL);
 
         if (stop) {
             order_actions(stop, stonith_op, pe_order_optional);
             crm_info("Implying guest node %s is down (action %d) "
                      "after connection is stopped (action %d)",
                      node->details->uname, stonith_op->id, stop->id);
         } else {
             /* Not sure why we're fencing, but everything must already be
              * cleanly stopped.
              */
             crm_info("Implying guest node %s is down (action %d) ",
                      node->details->uname, stonith_op->id);
         }
     }
 
     /* Order/imply other actions relative to pseudo-fence as with real fence */
     pcmk__order_vs_fence(stonith_op, data_set);
 }
 
 /*
  * Create dependencies for stonith and shutdown operations
  */
 gboolean
 stage6(pe_working_set_t * data_set)
 {
     pe_action_t *dc_down = NULL;
     pe_action_t *stonith_op = NULL;
     gboolean integrity_lost = FALSE;
     gboolean need_stonith = TRUE;
     GListPtr gIter;
     GListPtr stonith_ops = NULL;
     GList *shutdown_ops = NULL;
 
     /* Remote ordering constraints need to happen prior to calculating fencing
      * because it is one more place we will mark the node as dirty.
      *
      * A nice side effect of doing them early is that apply_*_ordering() can be
      * simpler because pe_fence_node() has already done some of the work.
      */
     crm_trace("Creating remote ordering constraints");
     apply_remote_node_ordering(data_set);
 
     crm_trace("Processing fencing and shutdown cases");
     if (any_managed_resources(data_set) == FALSE) {
         crm_notice("Delaying fencing operations until there are resources to manage");
         need_stonith = FALSE;
     }
 
     /* Check each node for stonith/shutdown */
     for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
         pe_node_t *node = (pe_node_t *) gIter->data;
 
         /* Guest nodes are "fenced" by recovering their container resource,
          * so handle them separately.
          */
         if (pe__is_guest_node(node)) {
             if (node->details->remote_requires_reset && need_stonith
                 && pe_can_fence(data_set, node)) {
                 fence_guest(node, data_set);
             }
             continue;
         }
 
         stonith_op = NULL;
 
         if (node->details->unclean
             && need_stonith && pe_can_fence(data_set, node)) {
 
             stonith_op = pe_fence_op(node, NULL, FALSE, "node is unclean", FALSE, data_set);
             pe_warn("Scheduling Node %s for STONITH", node->details->uname);
 
             pcmk__order_vs_fence(stonith_op, data_set);
 
             if (node->details->is_dc) {
                 // Remember if the DC is being fenced
                 dc_down = stonith_op;
 
             } else {
 
                 if (!pcmk_is_set(data_set->flags, pe_flag_concurrent_fencing)
                     && (stonith_ops != NULL)) {
                     /* Concurrent fencing is disabled, so order each non-DC
                      * fencing in a chain. If there is any DC fencing or
                      * shutdown, it will be ordered after the last action in the
                      * chain later.
                      */
                     order_actions((pe_action_t *) stonith_ops->data,
                                   stonith_op, pe_order_optional);
                 }
 
                 // Remember all non-DC fencing actions in a separate list
                 stonith_ops = g_list_prepend(stonith_ops, stonith_op);
             }
 
         } else if (node->details->online && node->details->shutdown &&
                 /* TODO define what a shutdown op means for a remote node.
                  * For now we do not send shutdown operations for remote nodes, but
                  * if we can come up with a good use for this in the future, we will. */
                     pe__is_guest_or_remote_node(node) == FALSE) {
 
             pe_action_t *down_op = sched_shutdown_op(node, data_set);
 
             if (node->details->is_dc) {
                 // Remember if the DC is being shut down
                 dc_down = down_op;
             } else {
                 // Remember non-DC shutdowns for later ordering
                 shutdown_ops = g_list_prepend(shutdown_ops, down_op);
             }
         }
 
         if (node->details->unclean && stonith_op == NULL) {
             integrity_lost = TRUE;
             pe_warn("Node %s is unclean!", node->details->uname);
         }
     }
 
     if (integrity_lost) {
         if (!pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
             pe_warn("YOUR RESOURCES ARE NOW LIKELY COMPROMISED");
             pe_err("ENABLE STONITH TO KEEP YOUR RESOURCES SAFE");
 
         } else if (!pcmk_is_set(data_set->flags, pe_flag_have_quorum)) {
             crm_notice("Cannot fence unclean nodes until quorum is"
                        " attained (or no-quorum-policy is set to ignore)");
         }
     }
 
     if (dc_down != NULL) {
         /* Order any non-DC shutdowns before any DC shutdown, to avoid repeated
          * DC elections. However, we don't want to order non-DC shutdowns before
          * a DC *fencing*, because even though we don't want a node that's
          * shutting down to become DC, the DC fencing could be ordered before a
          * clone stop that's also ordered before the shutdowns, thus leading to
          * a graph loop.
          */
         if (pcmk__str_eq(dc_down->task, CRM_OP_SHUTDOWN, pcmk__str_casei)) {
             for (gIter = shutdown_ops; gIter != NULL; gIter = gIter->next) {
                 pe_action_t *node_stop = (pe_action_t *) gIter->data;
 
                 crm_debug("Ordering shutdown on %s before %s on DC %s",
                           node_stop->node->details->uname,
                           dc_down->task, dc_down->node->details->uname);
 
                 order_actions(node_stop, dc_down, pe_order_optional);
             }
         }
 
         // Order any non-DC fencing before any DC fencing or shutdown
 
         if (pcmk_is_set(data_set->flags, pe_flag_concurrent_fencing)) {
             /* With concurrent fencing, order each non-DC fencing action
              * separately before any DC fencing or shutdown.
              */
             for (gIter = stonith_ops; gIter != NULL; gIter = gIter->next) {
                 order_actions((pe_action_t *) gIter->data, dc_down,
                               pe_order_optional);
             }
         } else if (stonith_ops) {
             /* Without concurrent fencing, the non-DC fencing actions are
              * already ordered relative to each other, so we just need to order
              * the DC fencing after the last action in the chain (which is the
              * first item in the list).
              */
             order_actions((pe_action_t *) stonith_ops->data, dc_down,
                           pe_order_optional);
         }
     }
     g_list_free(stonith_ops);
     g_list_free(shutdown_ops);
     return TRUE;
 }
 
 /*
  * Determine the sets of independent actions and the correct order for the
  *  actions in each set.
  *
  * Mark dependencies of un-runnable actions un-runnable
  *
  */
 static GListPtr
 find_actions_by_task(GListPtr actions, pe_resource_t * rsc, const char *original_key)
 {
     GListPtr list = NULL;
 
     list = find_actions(actions, original_key, NULL);
     if (list == NULL) {
         /* we're potentially searching a child of the original resource */
         char *key = NULL;
         char *task = NULL;
         guint interval_ms = 0;
 
         if (parse_op_key(original_key, NULL, &task, &interval_ms)) {
             key = pcmk__op_key(rsc->id, task, interval_ms);
             list = find_actions(actions, key, NULL);
 
         } else {
             crm_err("search key: %s", original_key);
         }
 
         free(key);
         free(task);
     }
 
     return list;
 }
 
 static void
 rsc_order_then(pe_action_t *lh_action, pe_resource_t *rsc,
                pe__ordering_t *order)
 {
     GListPtr gIter = NULL;
     GListPtr rh_actions = NULL;
     pe_action_t *rh_action = NULL;
     enum pe_ordering type;
 
     CRM_CHECK(rsc != NULL, return);
     CRM_CHECK(order != NULL, return);
 
     type = order->type;
     rh_action = order->rh_action;
     crm_trace("Processing RH of ordering constraint %d", order->id);
 
     if (rh_action != NULL) {
         rh_actions = g_list_prepend(NULL, rh_action);
 
     } else if (rsc != NULL) {
         rh_actions = find_actions_by_task(rsc->actions, rsc, order->rh_action_task);
     }
 
     if (rh_actions == NULL) {
         pe_rsc_trace(rsc, "No RH-Side (%s/%s) found for constraint..."
                      " ignoring", rsc->id, order->rh_action_task);
         if (lh_action) {
             pe_rsc_trace(rsc, "LH-Side was: %s", lh_action->uuid);
         }
         return;
     }
 
     if ((lh_action != NULL) && (lh_action->rsc == rsc)
         && pcmk_is_set(lh_action->flags, pe_action_dangle)) {
 
         pe_rsc_trace(rsc, "Detected dangling operation %s -> %s", lh_action->uuid,
                      order->rh_action_task);
         pe__clear_order_flags(type, pe_order_implies_then);
     }
 
     gIter = rh_actions;
     for (; gIter != NULL; gIter = gIter->next) {
         pe_action_t *rh_action_iter = (pe_action_t *) gIter->data;
 
         if (lh_action) {
             order_actions(lh_action, rh_action_iter, type);
 
         } else if (type & pe_order_implies_then) {
             update_action_flags(rh_action_iter, pe_action_runnable | pe_action_clear,
                                 __func__, __LINE__);
             crm_warn("Unrunnable %s 0x%.6x", rh_action_iter->uuid, type);
         } else {
             crm_warn("neither %s 0x%.6x", rh_action_iter->uuid, type);
         }
     }
 
     g_list_free(rh_actions);
 }
 
 static void
 rsc_order_first(pe_resource_t *lh_rsc, pe__ordering_t *order,
                 pe_working_set_t *data_set)
 {
     GListPtr gIter = NULL;
     GListPtr lh_actions = NULL;
     pe_action_t *lh_action = order->lh_action;
     pe_resource_t *rh_rsc = order->rh_rsc;
 
     crm_trace("Processing LH of ordering constraint %d", order->id);
     CRM_ASSERT(lh_rsc != NULL);
 
     if (lh_action != NULL) {
         lh_actions = g_list_prepend(NULL, lh_action);
 
     } else {
         lh_actions = find_actions_by_task(lh_rsc->actions, lh_rsc, order->lh_action_task);
     }
 
     if (lh_actions == NULL && lh_rsc != rh_rsc) {
         char *key = NULL;
         char *op_type = NULL;
         guint interval_ms = 0;
 
         parse_op_key(order->lh_action_task, NULL, &op_type, &interval_ms);
         key = pcmk__op_key(lh_rsc->id, op_type, interval_ms);
 
         if (lh_rsc->fns->state(lh_rsc, TRUE) == RSC_ROLE_STOPPED && pcmk__str_eq(op_type, RSC_STOP, pcmk__str_casei)) {
             free(key);
             pe_rsc_trace(lh_rsc, "No LH-Side (%s/%s) found for constraint %d with %s - ignoring",
                          lh_rsc->id, order->lh_action_task, order->id, order->rh_action_task);
 
         } else if (lh_rsc->fns->state(lh_rsc, TRUE) == RSC_ROLE_SLAVE && pcmk__str_eq(op_type, RSC_DEMOTE, pcmk__str_casei)) {
             free(key);
             pe_rsc_trace(lh_rsc, "No LH-Side (%s/%s) found for constraint %d with %s - ignoring",
                          lh_rsc->id, order->lh_action_task, order->id, order->rh_action_task);
 
         } else {
             pe_rsc_trace(lh_rsc, "No LH-Side (%s/%s) found for constraint %d with %s - creating",
                          lh_rsc->id, order->lh_action_task, order->id, order->rh_action_task);
             lh_action = custom_action(lh_rsc, key, op_type, NULL, TRUE, TRUE, data_set);
             lh_actions = g_list_prepend(NULL, lh_action);
         }
 
         free(op_type);
     }
 
     gIter = lh_actions;
     for (; gIter != NULL; gIter = gIter->next) {
         pe_action_t *lh_action_iter = (pe_action_t *) gIter->data;
 
         if (rh_rsc == NULL && order->rh_action) {
             rh_rsc = order->rh_action->rsc;
         }
         if (rh_rsc) {
             rsc_order_then(lh_action_iter, rh_rsc, order);
 
         } else if (order->rh_action) {
             order_actions(lh_action_iter, order->rh_action, order->type);
         }
     }
 
     g_list_free(lh_actions);
 }
 
 extern void update_colo_start_chain(pe_action_t *action,
                                     pe_working_set_t *data_set);
 
 static int
 is_recurring_action(pe_action_t *action)
 {
     guint interval_ms;
 
     if (pcmk__guint_from_hash(action->meta,
                               XML_LRM_ATTR_INTERVAL_MS, 0,
                               &interval_ms) != pcmk_rc_ok) {
         return 0;
     }
     return (interval_ms > 0);
 }
 
 static void
 apply_container_ordering(pe_action_t *action, pe_working_set_t *data_set)
 {
     /* VMs are also classified as containers for these purposes... in
      * that they both involve a 'thing' running on a real or remote
      * cluster node.
      *
      * This allows us to be smarter about the type and extent of
      * recovery actions required in various scenarios
      */
     pe_resource_t *remote_rsc = NULL;
     pe_resource_t *container = NULL;
     enum action_tasks task = text2task(action->task);
 
     CRM_ASSERT(action->rsc);
     CRM_ASSERT(action->node);
     CRM_ASSERT(pe__is_guest_or_remote_node(action->node));
 
     remote_rsc = action->node->details->remote_rsc;
     CRM_ASSERT(remote_rsc);
 
     container = remote_rsc->container;
     CRM_ASSERT(container);
 
     if (pcmk_is_set(container->flags, pe_rsc_failed)) {
         pe_fence_node(data_set, action->node, "container failed", FALSE);
     }
 
     crm_trace("Order %s action %s relative to %s%s for %s%s",
               action->task, action->uuid,
               pcmk_is_set(remote_rsc->flags, pe_rsc_failed)? "failed " : "",
               remote_rsc->id,
               pcmk_is_set(container->flags, pe_rsc_failed)? "failed " : "",
               container->id);
 
     if (pcmk__strcase_any_of(action->task, CRMD_ACTION_MIGRATE, CRMD_ACTION_MIGRATED, NULL)) {
         /* Migration ops map to "no_action", but we need to apply the same
          * ordering as for stop or demote (see get_router_node()).
          */
         task = stop_rsc;
     }
 
     switch (task) {
         case start_rsc:
         case action_promote:
             /* Force resource recovery if the container is recovered */
             order_start_then_action(container, action, pe_order_implies_then,
                                     data_set);
 
             /* Wait for the connection resource to be up too */
             order_start_then_action(remote_rsc, action, pe_order_none,
                                     data_set);
             break;
 
         case stop_rsc:
         case action_demote:
             if (pcmk_is_set(container->flags, pe_rsc_failed)) {
                 /* When the container representing a guest node fails, any stop
                  * or demote actions for resources running on the guest node
                  * are implied by the container stopping. This is similar to
                  * how fencing operations work for cluster nodes and remote
                  * nodes.
                  */
             } else {
                 /* Ensure the operation happens before the connection is brought
                  * down.
                  *
                  * If we really wanted to, we could order these after the
                  * connection start, IFF the container's current role was
                  * stopped (otherwise we re-introduce an ordering loop when the
                  * connection is restarting).
                  */
                 order_action_then_stop(action, remote_rsc, pe_order_none,
                                        data_set);
             }
             break;
 
         default:
             /* Wait for the connection resource to be up */
             if (is_recurring_action(action)) {
                 /* In case we ever get the recovery logic wrong, force
                  * recurring monitors to be restarted, even if just
                  * the connection was re-established
                  */
                 if(task != no_action) {
                     order_start_then_action(remote_rsc, action,
                                             pe_order_implies_then, data_set);
                 }
             } else {
                 order_start_then_action(remote_rsc, action, pe_order_none,
                                         data_set);
             }
             break;
     }
 }
 
 static enum remote_connection_state
 get_remote_node_state(pe_node_t *node) 
 {
     pe_resource_t *remote_rsc = NULL;
     pe_node_t *cluster_node = NULL;
 
     CRM_ASSERT(node);
 
     remote_rsc = node->details->remote_rsc;
     CRM_ASSERT(remote_rsc);
 
     cluster_node = pe__current_node(remote_rsc);
 
     /* If the cluster node the remote connection resource resides on
      * is unclean or went offline, we can't process any operations
      * on that remote node until after it starts elsewhere.
      */
     if(remote_rsc->next_role == RSC_ROLE_STOPPED || remote_rsc->allocated_to == NULL) {
         /* The connection resource is not going to run anywhere */
 
         if (cluster_node && cluster_node->details->unclean) {
             /* The remote connection is failed because its resource is on a
              * failed node and can't be recovered elsewhere, so we must fence.
              */
             return remote_state_failed;
         }
 
         if (!pcmk_is_set(remote_rsc->flags, pe_rsc_failed)) {
             /* Connection resource is cleanly stopped */
             return remote_state_stopped;
         }
 
         /* Connection resource is failed */
 
         if ((remote_rsc->next_role == RSC_ROLE_STOPPED)
             && remote_rsc->remote_reconnect_ms
             && node->details->remote_was_fenced
             && !pe__shutdown_requested(node)) {
 
             /* We won't know whether the connection is recoverable until the
              * reconnect interval expires and we reattempt connection.
              */
             return remote_state_unknown;
         }
 
         /* The remote connection is in a failed state. If there are any
          * resources known to be active on it (stop) or in an unknown state
          * (probe), we must assume the worst and fence it.
          */
         return remote_state_failed;
 
     } else if (cluster_node == NULL) {
         /* Connection is recoverable but not currently running anywhere, see if we can recover it first */
         return remote_state_unknown;
 
     } else if(cluster_node->details->unclean == TRUE
               || cluster_node->details->online == FALSE) {
         /* Connection is running on a dead node, see if we can recover it first */
         return remote_state_resting;
 
     } else if (pcmk__list_of_multiple(remote_rsc->running_on)
                && remote_rsc->partial_migration_source
                && remote_rsc->partial_migration_target) {
         /* We're in the middle of migrating a connection resource,
          * wait until after the resource migrates before performing
          * any actions.
          */
         return remote_state_resting;
 
     }
     return remote_state_alive;
 }
 
 /*!
  * \internal
  * \brief Order actions on remote node relative to actions for the connection
  */
 static void
 apply_remote_ordering(pe_action_t *action, pe_working_set_t *data_set)
 {
     pe_resource_t *remote_rsc = NULL;
     enum action_tasks task = text2task(action->task);
     enum remote_connection_state state = get_remote_node_state(action->node);
 
     enum pe_ordering order_opts = pe_order_none;
 
     if (action->rsc == NULL) {
         return;
     }
 
     CRM_ASSERT(action->node);
     CRM_ASSERT(pe__is_guest_or_remote_node(action->node));
 
     remote_rsc = action->node->details->remote_rsc;
     CRM_ASSERT(remote_rsc);
 
     crm_trace("Order %s action %s relative to %s%s (state: %s)",
               action->task, action->uuid,
               pcmk_is_set(remote_rsc->flags, pe_rsc_failed)? "failed " : "",
               remote_rsc->id, state2text(state));
 
     if (pcmk__strcase_any_of(action->task, CRMD_ACTION_MIGRATE, CRMD_ACTION_MIGRATED, NULL)) {
         /* Migration ops map to "no_action", but we need to apply the same
          * ordering as for stop or demote (see get_router_node()).
          */
         task = stop_rsc;
     }
 
     switch (task) {
         case start_rsc:
         case action_promote:
             order_opts = pe_order_none;
 
             if (state == remote_state_failed) {
                 /* Force recovery, by making this action required */
                 pe__set_order_flags(order_opts, pe_order_implies_then);
             }
 
             /* Ensure connection is up before running this action */
             order_start_then_action(remote_rsc, action, order_opts, data_set);
             break;
 
         case stop_rsc:
             if(state == remote_state_alive) {
                 order_action_then_stop(action, remote_rsc,
                                        pe_order_implies_first, data_set);
 
             } else if(state == remote_state_failed) {
                 /* The resource is active on the node, but since we don't have a
                  * valid connection, the only way to stop the resource is by
                  * fencing the node. There is no need to order the stop relative
                  * to the remote connection, since the stop will become implied
                  * by the fencing.
                  */
                 pe_fence_node(data_set, action->node, "resources are active and the connection is unrecoverable", FALSE);
 
             } else if(remote_rsc->next_role == RSC_ROLE_STOPPED) {
                 /* State must be remote_state_unknown or remote_state_stopped.
                  * Since the connection is not coming back up in this
                  * transition, stop this resource first.
                  */
                 order_action_then_stop(action, remote_rsc,
                                        pe_order_implies_first, data_set);
 
             } else {
                 /* The connection is going to be started somewhere else, so
                  * stop this resource after that completes.
                  */
                 order_start_then_action(remote_rsc, action, pe_order_none, data_set);
             }
             break;
 
         case action_demote:
             /* Only order this demote relative to the connection start if the
              * connection isn't being torn down. Otherwise, the demote would be
              * blocked because the connection start would not be allowed.
              */
             if(state == remote_state_resting || state == remote_state_unknown) {
                 order_start_then_action(remote_rsc, action, pe_order_none,
                                         data_set);
             } /* Otherwise we can rely on the stop ordering */
             break;
 
         default:
             /* Wait for the connection resource to be up */
             if (is_recurring_action(action)) {
                 /* In case we ever get the recovery logic wrong, force
                  * recurring monitors to be restarted, even if just
                  * the connection was re-established
                  */
                 order_start_then_action(remote_rsc, action,
                                         pe_order_implies_then, data_set);
 
             } else {
                 pe_node_t *cluster_node = pe__current_node(remote_rsc);
 
                 if(task == monitor_rsc && state == remote_state_failed) {
                     /* We would only be here if we do not know the
                      * state of the resource on the remote node.
                      * Since we have no way to find out, it is
                      * necessary to fence the node.
                      */
                     pe_fence_node(data_set, action->node, "resources are in an unknown state and the connection is unrecoverable", FALSE);
                 }
 
                 if(cluster_node && state == remote_state_stopped) {
                     /* The connection is currently up, but is going
                      * down permanently.
                      *
                      * Make sure we check services are actually
                      * stopped _before_ we let the connection get
                      * closed
                      */
                     order_action_then_stop(action, remote_rsc,
                                            pe_order_runnable_left, data_set);
 
                 } else {
                     order_start_then_action(remote_rsc, action, pe_order_none,
                                             data_set);
                 }
             }
             break;
     }
 }
 
 static void
 apply_remote_node_ordering(pe_working_set_t *data_set)
 {
     if (!pcmk_is_set(data_set->flags, pe_flag_have_remote_nodes)) {
         return;
     }
 
     for (GListPtr gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
         pe_action_t *action = (pe_action_t *) gIter->data;
         pe_resource_t *remote = NULL;
 
         // We are only interested in resource actions
         if (action->rsc == NULL) {
             continue;
         }
 
         /* Special case: If we are clearing the failcount of an actual
          * remote connection resource, then make sure this happens before
          * any start of the resource in this transition.
          */
         if (action->rsc->is_remote_node &&
             pcmk__str_eq(action->task, CRM_OP_CLEAR_FAILCOUNT, pcmk__str_casei)) {
 
             custom_action_order(action->rsc,
                 NULL,
                 action,
                 action->rsc,
                 pcmk__op_key(action->rsc->id, RSC_START, 0),
                 NULL,
                 pe_order_optional,
                 data_set);
 
             continue;
         }
 
         // We are only interested in actions allocated to a node
         if (action->node == NULL) {
             continue;
         }
 
         if (!pe__is_guest_or_remote_node(action->node)) {
             continue;
         }
 
         /* We are only interested in real actions.
          *
          * @TODO This is probably wrong; pseudo-actions might be converted to
          * real actions and vice versa later in update_actions() at the end of
          * stage7().
          */
         if (pcmk_is_set(action->flags, pe_action_pseudo)) {
             continue;
         }
 
         remote = action->node->details->remote_rsc;
         if (remote == NULL) {
             // Orphaned
             continue;
         }
 
         /* Another special case: if a resource is moving to a Pacemaker Remote
          * node, order the stop on the original node after any start of the
          * remote connection. This ensures that if the connection fails to
          * start, we leave the resource running on the original node.
          */
         if (pcmk__str_eq(action->task, RSC_START, pcmk__str_casei)) {
             for (GList *item = action->rsc->actions; item != NULL;
                  item = item->next) {
                 pe_action_t *rsc_action = item->data;
 
                 if ((rsc_action->node->details != action->node->details)
                     && pcmk__str_eq(rsc_action->task, RSC_STOP, pcmk__str_casei)) {
                     custom_action_order(remote, start_key(remote), NULL,
                                         action->rsc, NULL, rsc_action,
                                         pe_order_optional, data_set);
                 }
             }
         }
 
         /* The action occurs across a remote connection, so create
          * ordering constraints that guarantee the action occurs while the node
          * is active (after start, before stop ... things like that).
          *
          * This is somewhat brittle in that we need to make sure the results of
          * this ordering are compatible with the result of get_router_node().
          * It would probably be better to add XML_LRM_ATTR_ROUTER_NODE as part
          * of this logic rather than action2xml().
          */
         if (remote->container) {
             crm_trace("Container ordering for %s", action->uuid);
             apply_container_ordering(action, data_set);
 
         } else {
             crm_trace("Remote ordering for %s", action->uuid);
             apply_remote_ordering(action, data_set);
         }
     }
 }
 
 static gboolean
 order_first_probe_unneeded(pe_action_t * probe, pe_action_t * rh_action)
 {
     /* No need to probe the resource on the node that is being
      * unfenced. Otherwise it might introduce transition loop
      * since probe will be performed after the node is
      * unfenced.
      */
     if (pcmk__str_eq(rh_action->task, CRM_OP_FENCE, pcmk__str_casei)
          && probe->node && rh_action->node
          && probe->node->details == rh_action->node->details) {
         const char *op = g_hash_table_lookup(rh_action->meta, "stonith_action");
 
         if (pcmk__str_eq(op, "on", pcmk__str_casei)) {
             return TRUE;
         }
     }
 
     // Shutdown waits for probe to complete only if it's on the same node
     if ((pcmk__str_eq(rh_action->task, CRM_OP_SHUTDOWN, pcmk__str_casei))
         && probe->node && rh_action->node
         && probe->node->details != rh_action->node->details) {
         return TRUE;
     }
     return FALSE;
 }
 
 static void
 order_first_probes_imply_stops(pe_working_set_t * data_set)
 {
     GListPtr gIter = NULL;
 
     for (gIter = data_set->ordering_constraints; gIter != NULL; gIter = gIter->next) {
         pe__ordering_t *order = gIter->data;
         enum pe_ordering order_type = pe_order_optional;
 
         pe_resource_t *lh_rsc = order->lh_rsc;
         pe_resource_t *rh_rsc = order->rh_rsc;
         pe_action_t *lh_action = order->lh_action;
         pe_action_t *rh_action = order->rh_action;
         const char *lh_action_task = order->lh_action_task;
         const char *rh_action_task = order->rh_action_task;
 
         GListPtr probes = NULL;
         GListPtr rh_actions = NULL;
 
         GListPtr pIter = NULL;
 
         if (lh_rsc == NULL) {
             continue;
 
         } else if (rh_rsc && lh_rsc == rh_rsc) {
             continue;
         }
 
         if (lh_action == NULL && lh_action_task == NULL) {
             continue;
         }
 
         if (rh_action == NULL && rh_action_task == NULL) {
             continue;
         }
 
         /* Technically probe is expected to return "not running", which could be
          * the alternative of stop action if the status of the resource is
          * unknown yet.
          */
         if (lh_action && !pcmk__str_eq(lh_action->task, RSC_STOP, pcmk__str_casei)) {
             continue;
 
         } else if (lh_action == NULL
                    && lh_action_task
                    && !pcmk__ends_with(lh_action_task, "_" RSC_STOP "_0")) {
             continue;
         }
 
         /* Do not probe the resource inside of a stopping container. Otherwise
          * it might introduce transition loop since probe will be performed
          * after the container starts again.
          */
         if (rh_rsc && lh_rsc->container == rh_rsc) {
             if (rh_action && pcmk__str_eq(rh_action->task, RSC_STOP, pcmk__str_casei)) {
                 continue;
 
             } else if (rh_action == NULL && rh_action_task
                        && pcmk__ends_with(rh_action_task,"_" RSC_STOP "_0")) {
                 continue;
             }
         }
 
         if (order->type == pe_order_none) {
             continue;
         }
 
         // Preserve the order options for future filtering
         if (pcmk_is_set(order->type, pe_order_apply_first_non_migratable)) {
             pe__set_order_flags(order_type,
                                 pe_order_apply_first_non_migratable);
         }
 
         if (pcmk_is_set(order->type, pe_order_same_node)) {
             pe__set_order_flags(order_type, pe_order_same_node);
         }
 
         // Keep the order types for future filtering
         if (order->type == pe_order_anti_colocation
                    || order->type == pe_order_load) {
             order_type = order->type;
         }
 
         probes = pe__resource_actions(lh_rsc, NULL, RSC_STATUS, FALSE);
         if (probes == NULL) {
             continue;
         }
 
         if (rh_action) {
             rh_actions = g_list_prepend(rh_actions, rh_action);
 
         } else if (rh_rsc && rh_action_task) {
             rh_actions = find_actions(rh_rsc->actions, rh_action_task, NULL);
         }
 
         if (rh_actions == NULL) {
             g_list_free(probes);
             continue;
         }
 
         crm_trace("Processing for LH probe based on ordering constraint %s -> %s"
                   " (id=%d, type=%.6x)",
                   lh_action ? lh_action->uuid : lh_action_task,
                   rh_action ? rh_action->uuid : rh_action_task,
                   order->id, order->type);
 
         for (pIter = probes; pIter != NULL; pIter = pIter->next) {
             pe_action_t *probe = (pe_action_t *) pIter->data;
             GListPtr rIter = NULL;
 
             for (rIter = rh_actions; rIter != NULL; rIter = rIter->next) {
                 pe_action_t *rh_action_iter = (pe_action_t *) rIter->data;
 
                 if (order_first_probe_unneeded(probe, rh_action_iter)) {
                     continue;
                 }
                 order_actions(probe, rh_action_iter, order_type);
             }
         }
 
         g_list_free(rh_actions);
         g_list_free(probes);
     }
 }
 
 static void
 order_first_probe_then_restart_repromote(pe_action_t * probe,
                                          pe_action_t * after,
                                          pe_working_set_t * data_set)
 {
     GListPtr gIter = NULL;
     bool interleave = FALSE;
     pe_resource_t *compatible_rsc = NULL;
 
     if (probe == NULL
         || probe->rsc == NULL
         || probe->rsc->variant != pe_native) {
         return;
     }
 
     if (after == NULL
         // Avoid running into any possible loop
         || pcmk_is_set(after->flags, pe_action_tracking)) {
         return;
     }
 
     if (!pcmk__str_eq(probe->task, RSC_STATUS, pcmk__str_casei)) {
         return;
     }
 
     pe__set_action_flags(after, pe_action_tracking);
 
     crm_trace("Processing based on %s %s -> %s %s",
               probe->uuid,
               probe->node ? probe->node->details->uname: "",
               after->uuid,
               after->node ? after->node->details->uname : "");
 
     if (after->rsc
         /* Better not build a dependency directly with a clone/group.
          * We are going to proceed through the ordering chain and build
          * dependencies with its children.
          */
         && after->rsc->variant == pe_native
         && probe->rsc != after->rsc) {
 
             GListPtr then_actions = NULL;
             enum pe_ordering probe_order_type = pe_order_optional;
 
             if (pcmk__str_eq(after->task, RSC_START, pcmk__str_casei)) {
                 then_actions = pe__resource_actions(after->rsc, NULL, RSC_STOP, FALSE);
 
             } else if (pcmk__str_eq(after->task, RSC_PROMOTE, pcmk__str_casei)) {
                 then_actions = pe__resource_actions(after->rsc, NULL, RSC_DEMOTE, FALSE);
             }
 
             for (gIter = then_actions; gIter != NULL; gIter = gIter->next) {
                 pe_action_t *then = (pe_action_t *) gIter->data;
 
                 // Skip any pseudo action which for example is implied by fencing
                 if (pcmk_is_set(then->flags, pe_action_pseudo)) {
                     continue;
                 }
 
                 order_actions(probe, then, probe_order_type);
             }
             g_list_free(then_actions);
     }
 
     if (after->rsc
         && after->rsc->variant > pe_group) {
         const char *interleave_s = g_hash_table_lookup(after->rsc->meta,
                                                        XML_RSC_ATTR_INTERLEAVE);
 
         interleave = crm_is_true(interleave_s);
 
         if (interleave) {
             /* For an interleaved clone, we should build a dependency only
              * with the relevant clone child.
              */
             compatible_rsc = find_compatible_child(probe->rsc,
                                                    after->rsc,
                                                    RSC_ROLE_UNKNOWN,
                                                    FALSE, data_set);
         }
     }
 
     for (gIter = after->actions_after; gIter != NULL; gIter = gIter->next) {
         pe_action_wrapper_t *after_wrapper = (pe_action_wrapper_t *) gIter->data;
         /* pe_order_implies_then is the reason why a required A.start
          * implies/enforces B.start to be required too, which is the cause of
          * B.restart/re-promote.
          *
          * Not sure about pe_order_implies_then_on_node though. It's now only
          * used for unfencing case, which tends to introduce transition
          * loops...
          */
 
         if (!pcmk_is_set(after_wrapper->type, pe_order_implies_then)) {
             /* The order type between a group/clone and its child such as
              * B.start-> B_child.start is:
              * pe_order_implies_first_printed | pe_order_runnable_left
              *
              * Proceed through the ordering chain and build dependencies with
              * its children.
              */
             if (after->rsc == NULL
                 || after->rsc->variant < pe_group
                 || probe->rsc->parent == after->rsc
                 || after_wrapper->action->rsc == NULL
                 || after_wrapper->action->rsc->variant > pe_group
                 || after->rsc != after_wrapper->action->rsc->parent) {
                 continue;
             }
 
             /* Proceed to the children of a group or a non-interleaved clone.
              * For an interleaved clone, proceed only to the relevant child.
              */
             if (after->rsc->variant > pe_group
                 && interleave == TRUE
                 && (compatible_rsc == NULL
                     || compatible_rsc != after_wrapper->action->rsc)) {
                 continue;
             }
         }
 
         crm_trace("Proceeding through %s %s -> %s %s (type=0x%.6x)",
                   after->uuid,
                   after->node ? after->node->details->uname: "",
                   after_wrapper->action->uuid,
                   after_wrapper->action->node ? after_wrapper->action->node->details->uname : "",
                   after_wrapper->type);
 
         order_first_probe_then_restart_repromote(probe, after_wrapper->action, data_set);
     }
 }
 
 static void clear_actions_tracking_flag(pe_working_set_t * data_set)
 {
     GListPtr gIter = NULL;
 
     for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
         pe_action_t *action = (pe_action_t *) gIter->data;
 
         if (pcmk_is_set(action->flags, pe_action_tracking)) {
             pe__clear_action_flags(action, pe_action_tracking);
         }
     }
 }
 
 static void
 order_first_rsc_probes(pe_resource_t * rsc, pe_working_set_t * data_set)
 {
     GListPtr gIter = NULL;
     GListPtr probes = NULL;
 
     for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
         pe_resource_t * child = (pe_resource_t *) gIter->data;
 
         order_first_rsc_probes(child, data_set);
     }
 
     if (rsc->variant != pe_native) {
         return;
     }
 
     probes = pe__resource_actions(rsc, NULL, RSC_STATUS, FALSE);
 
     for (gIter = probes; gIter != NULL; gIter= gIter->next) {
         pe_action_t *probe = (pe_action_t *) gIter->data;
         GListPtr aIter = NULL;
 
         for (aIter = probe->actions_after; aIter != NULL; aIter = aIter->next) {
             pe_action_wrapper_t *after_wrapper = (pe_action_wrapper_t *) aIter->data;
 
             order_first_probe_then_restart_repromote(probe, after_wrapper->action, data_set);
             clear_actions_tracking_flag(data_set);
         }
     }
 
     g_list_free(probes);
 }
 
 static void
 order_first_probes(pe_working_set_t * data_set)
 {
     GListPtr gIter = NULL;
 
     for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *rsc = (pe_resource_t *) gIter->data;
 
         order_first_rsc_probes(rsc, data_set);
     }
 
     order_first_probes_imply_stops(data_set);
 }
 
 static void
 order_then_probes(pe_working_set_t * data_set)
 {
 #if 0
     GListPtr gIter = NULL;
 
     for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *rsc = (pe_resource_t *) gIter->data;
 
         /* Given "A then B", we would prefer to wait for A to be
          * started before probing B.
          *
          * If A was a filesystem on which the binaries and data for B
          * lived, it would have been useful if the author of B's agent
          * could assume that A is running before B.monitor will be
          * called.
          *
          * However we can't _only_ probe once A is running, otherwise
          * we'd not detect the state of B if A could not be started
          * for some reason.
          *
          * In practice however, we cannot even do an opportunistic
          * version of this because B may be moving:
          *
          *   B.probe -> B.start
          *   B.probe -> B.stop
          *   B.stop -> B.start
          *   A.stop -> A.start
          *   A.start -> B.probe
          *
          * So far so good, but if we add the result of this code:
          *
          *   B.stop -> A.stop
          *
          * Then we get a loop:
          *
          *   B.probe -> B.stop -> A.stop -> A.start -> B.probe
          *
          * We could kill the 'B.probe -> B.stop' dependency, but that
          * could mean stopping B "too" soon, because B.start must wait
          * for the probes to complete.
          *
          * Another option is to allow it only if A is a non-unique
          * clone with clone-max == node-max (since we'll never be
          * moving it).  However, we could still be stopping one
          * instance at the same time as starting another.
 
          * The complexity of checking for allowed conditions combined
          * with the ever narrowing usecase suggests that this code
          * should remain disabled until someone gets smarter.
          */
         pe_action_t *start = NULL;
         GListPtr actions = NULL;
         GListPtr probes = NULL;
 
         actions = pe__resource_actions(rsc, NULL, RSC_START, FALSE);
 
         if (actions) {
             start = actions->data;
             g_list_free(actions);
         }
 
         if(start == NULL) {
             crm_err("No start action for %s", rsc->id);
             continue;
         }
 
         probes = pe__resource_actions(rsc, NULL, RSC_STATUS, FALSE);
 
         for (actions = start->actions_before; actions != NULL; actions = actions->next) {
             pe_action_wrapper_t *before = (pe_action_wrapper_t *) actions->data;
 
             GListPtr pIter = NULL;
             pe_action_t *first = before->action;
             pe_resource_t *first_rsc = first->rsc;
 
             if(first->required_runnable_before) {
                 GListPtr clone_actions = NULL;
                 for (clone_actions = first->actions_before; clone_actions != NULL; clone_actions = clone_actions->next) {
                     before = (pe_action_wrapper_t *) clone_actions->data;
 
                     crm_trace("Testing %s -> %s (%p) for %s", first->uuid, before->action->uuid, before->action->rsc, start->uuid);
 
                     CRM_ASSERT(before->action->rsc);
                     first_rsc = before->action->rsc;
                     break;
                 }
 
             } else if(!pcmk__str_eq(first->task, RSC_START, pcmk__str_casei)) {
                 crm_trace("Not a start op %s for %s", first->uuid, start->uuid);
             }
 
             if(first_rsc == NULL) {
                 continue;
 
             } else if(uber_parent(first_rsc) == uber_parent(start->rsc)) {
                 crm_trace("Same parent %s for %s", first_rsc->id, start->uuid);
                 continue;
 
             } else if(FALSE && pe_rsc_is_clone(uber_parent(first_rsc)) == FALSE) {
                 crm_trace("Not a clone %s for %s", first_rsc->id, start->uuid);
                 continue;
             }
 
             crm_err("Applying %s before %s %d", first->uuid, start->uuid, uber_parent(first_rsc)->variant);
 
             for (pIter = probes; pIter != NULL; pIter = pIter->next) {
                 pe_action_t *probe = (pe_action_t *) pIter->data;
 
                 crm_err("Ordering %s before %s", first->uuid, probe->uuid);
                 order_actions(first, probe, pe_order_optional);
             }
         }
     }
 #endif
 }
 
 static void
 order_probes(pe_working_set_t * data_set)
 {
     order_first_probes(data_set);
     order_then_probes(data_set);
 }
 
 gboolean
 stage7(pe_working_set_t * data_set)
 {
     GList *gIter = NULL;
 
     crm_trace("Applying ordering constraints");
 
     /* Don't ask me why, but apparently they need to be processed in
      * the order they were created in... go figure
      *
      * Also g_list_append() has horrendous performance characteristics
      * So we need to use g_list_prepend() and then reverse the list here
      */
     data_set->ordering_constraints = g_list_reverse(data_set->ordering_constraints);
 
     for (gIter = data_set->ordering_constraints; gIter != NULL; gIter = gIter->next) {
         pe__ordering_t *order = gIter->data;
         pe_resource_t *rsc = order->lh_rsc;
 
         crm_trace("Applying ordering constraint: %d", order->id);
 
         if (rsc != NULL) {
             crm_trace("rsc_action-to-*");
             rsc_order_first(rsc, order, data_set);
             continue;
         }
 
         rsc = order->rh_rsc;
         if (rsc != NULL) {
             crm_trace("action-to-rsc_action");
             rsc_order_then(order->lh_action, rsc, order);
 
         } else {
             crm_trace("action-to-action");
             order_actions(order->lh_action, order->rh_action, order->type);
         }
     }
 
     for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
         pe_action_t *action = (pe_action_t *) gIter->data;
 
         update_colo_start_chain(action, data_set);
     }
 
     crm_trace("Ordering probes");
     order_probes(data_set);
 
     crm_trace("Updating %d actions", g_list_length(data_set->actions));
     for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
         pe_action_t *action = (pe_action_t *) gIter->data;
 
         update_action(action, data_set);
     }
 
     // Check for invalid orderings
     for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
         pe_action_t *action = (pe_action_t *) gIter->data;
         pe_action_wrapper_t *input = NULL;
 
         for (GList *input_iter = action->actions_before;
              input_iter != NULL; input_iter = input_iter->next) {
 
             input = (pe_action_wrapper_t *) input_iter->data;
             if (pcmk__ordering_is_invalid(action, input)) {
                 input->type = pe_order_none;
             }
         }
     }
 
     LogNodeActions(data_set, FALSE);
     for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *rsc = (pe_resource_t *) gIter->data;
 
         LogActions(rsc, data_set, FALSE);
     }
     return TRUE;
 }
 
 static int transition_id = -1;
 
 /*!
  * \internal
  * \brief Log a message after calculating a transition
  *
  * \param[in] filename  Where transition input is stored
  */
 void
 pcmk__log_transition_summary(const char *filename)
 {
     if (was_processing_error) {
         crm_err("Calculated transition %d (with errors), saving inputs in %s",
                 transition_id, filename);
 
     } else if (was_processing_warning) {
         crm_warn("Calculated transition %d (with warnings), saving inputs in %s",
                  transition_id, filename);
 
     } else {
         crm_notice("Calculated transition %d, saving inputs in %s",
                    transition_id, filename);
     }
     if (pcmk__config_error) {
         crm_notice("Configuration errors found during scheduler processing,"
                    "  please run \"crm_verify -L\" to identify issues");
     }
 }
 
 /*
  * Create a dependency graph to send to the transitioner (via the controller)
  */
 gboolean
 stage8(pe_working_set_t * data_set)
 {
     GListPtr gIter = NULL;
     const char *value = NULL;
 
     transition_id++;
     crm_trace("Creating transition graph %d.", transition_id);
 
     data_set->graph = create_xml_node(NULL, XML_TAG_GRAPH);
 
     value = pe_pref(data_set->config_hash, "cluster-delay");
     crm_xml_add(data_set->graph, "cluster-delay", value);
 
     value = pe_pref(data_set->config_hash, "stonith-timeout");
     crm_xml_add(data_set->graph, "stonith-timeout", value);
 
     crm_xml_add(data_set->graph, "failed-stop-offset", "INFINITY");
 
     if (pcmk_is_set(data_set->flags, pe_flag_start_failure_fatal)) {
         crm_xml_add(data_set->graph, "failed-start-offset", "INFINITY");
     } else {
         crm_xml_add(data_set->graph, "failed-start-offset", "1");
     }
 
     value = pe_pref(data_set->config_hash, "batch-limit");
     crm_xml_add(data_set->graph, "batch-limit", value);
 
     crm_xml_add_int(data_set->graph, "transition_id", transition_id);
 
     value = pe_pref(data_set->config_hash, "migration-limit");
     if (crm_parse_ll(value, NULL) > 0) {
         crm_xml_add(data_set->graph, "migration-limit", value);
     }
 
     if (data_set->recheck_by > 0) {
         char *recheck_epoch = NULL;
 
         recheck_epoch = crm_strdup_printf("%llu",
                                           (long long) data_set->recheck_by);
         crm_xml_add(data_set->graph, "recheck-by", recheck_epoch);
         free(recheck_epoch);
     }
 
 /* errors...
    slist_iter(action, pe_action_t, action_list, lpc,
    if(action->optional == FALSE && action->runnable == FALSE) {
    print_action("Ignoring", action, TRUE);
    }
    );
 */
 
     /* The following code will de-duplicate action inputs, so nothing past this
      * should rely on the action input type flags retaining their original
      * values.
      */
 
     gIter = data_set->resources;
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *rsc = (pe_resource_t *) gIter->data;
 
         pe_rsc_trace(rsc, "processing actions for rsc=%s", rsc->id);
         rsc->cmds->expand(rsc, data_set);
     }
 
     crm_log_xml_trace(data_set->graph, "created resource-driven action list");
 
     /* pseudo action to distribute list of nodes with maintenance state update */
     add_maintenance_update(data_set);
 
     /* catch any non-resource specific actions */
     crm_trace("processing non-resource actions");
 
     gIter = data_set->actions;
     for (; gIter != NULL; gIter = gIter->next) {
         pe_action_t *action = (pe_action_t *) gIter->data;
 
         if (action->rsc
             && action->node
             && action->node->details->shutdown
             && !pcmk_is_set(action->rsc->flags, pe_rsc_maintenance)
             && !pcmk_any_flags_set(action->flags,
                                    pe_action_optional|pe_action_runnable)
             && pcmk__str_eq(action->task, RSC_STOP, pcmk__str_none)
             ) {
             /* Eventually we should just ignore the 'fence' case
              * But for now it's the best way to detect (in CTS) when
              * CIB resource updates are being lost
              */
             if (pcmk_is_set(data_set->flags, pe_flag_have_quorum)
                 || data_set->no_quorum_policy == no_quorum_ignore) {
                 crm_crit("Cannot %s node '%s' because of %s:%s%s (%s)",
                          action->node->details->unclean ? "fence" : "shut down",
                          action->node->details->uname, action->rsc->id,
                          pcmk_is_set(action->rsc->flags, pe_rsc_managed)? " blocked" : " unmanaged",
                          pcmk_is_set(action->rsc->flags, pe_rsc_failed)? " failed" : "",
                          action->uuid);
             }
         }
 
         graph_element_from_action(action, data_set);
     }
 
     crm_log_xml_trace(data_set->graph, "created generic action list");
     crm_trace("Created transition graph %d.", transition_id);
 
     return TRUE;
 }
 
 void
 LogNodeActions(pe_working_set_t * data_set, gboolean terminal)
 {
     GListPtr gIter = NULL;
 
     for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
         char *node_name = NULL;
         char *task = NULL;
         pe_action_t *action = (pe_action_t *) gIter->data;
 
         if (action->rsc != NULL) {
             continue;
         } else if (pcmk_is_set(action->flags, pe_action_optional)) {
             continue;
         }
 
         if (pe__is_guest_node(action->node)) {
             node_name = crm_strdup_printf("%s (resource: %s)", action->node->details->uname, action->node->details->remote_rsc->container->id);
         } else if(action->node) {
             node_name = crm_strdup_printf("%s", action->node->details->uname);
         }
 
 
         if (pcmk__str_eq(action->task, CRM_OP_SHUTDOWN, pcmk__str_casei)) {
             task = strdup("Shutdown");
         } else if (pcmk__str_eq(action->task, CRM_OP_FENCE, pcmk__str_casei)) {
             const char *op = g_hash_table_lookup(action->meta, "stonith_action");
             task = crm_strdup_printf("Fence (%s)", op);
         }
 
         if(task == NULL) {
             /* Nothing to report */
         } else if(terminal && action->reason) {
             printf(" * %s %s '%s'\n", task, node_name, action->reason);
         } else if(terminal) {
             printf(" * %s %s\n", task, node_name);
         } else if(action->reason) {
             crm_notice(" * %s %s '%s'\n", task, node_name, action->reason);
         } else {
             crm_notice(" * %s %s\n", task, node_name);
         }
 
         free(node_name);
         free(task);
     }
 }
diff --git a/lib/pacemaker/pcmk_sched_bundle.c b/lib/pacemaker/pcmk_sched_bundle.c
index ac9219c7bc..4f41b70c6e 100644
--- a/lib/pacemaker/pcmk_sched_bundle.c
+++ b/lib/pacemaker/pcmk_sched_bundle.c
@@ -1,1084 +1,1094 @@
 /*
  * Copyright 2004-2020 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <crm/msg_xml.h>
 #include <pacemaker-internal.h>
 
 #define PE__VARIANT_BUNDLE 1
 #include <lib/pengine/variant.h>
 
 static bool
 is_bundle_node(pe__bundle_variant_data_t *data, pe_node_t *node)
 {
     for (GList *gIter = data->replicas; gIter != NULL; gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         if (node->details == replica->node->details) {
             return TRUE;
         }
     }
     return FALSE;
 }
 
 gint sort_clone_instance(gconstpointer a, gconstpointer b, gpointer data_set);
 void distribute_children(pe_resource_t *rsc, GListPtr children, GListPtr nodes,
                          int max, int per_host_max, pe_working_set_t * data_set);
 
 static GList *
 get_container_list(pe_resource_t *rsc)
 {
     GList *containers = NULL;
 
     if (rsc->variant == pe_container) {
         pe__bundle_variant_data_t *data = NULL;
 
         get_bundle_variant_data(data, rsc);
         for (GList *gIter = data->replicas; gIter != NULL;
              gIter = gIter->next) {
             pe__bundle_replica_t *replica = gIter->data;
 
             containers = g_list_append(containers, replica->container);
         }
     }
     return containers;
 }
 
 static inline GList *
 get_containers_or_children(pe_resource_t *rsc)
 {
     return (rsc->variant == pe_container)?
            get_container_list(rsc) : rsc->children;
 }
 
 static bool
 migration_threshold_reached(pe_resource_t *rsc, pe_node_t *node,
                             pe_working_set_t *data_set)
 {
     int fail_count, countdown;
 
     /* Migration threshold of 0 means never force away */
     if (rsc->migration_threshold == 0) {
         return FALSE;
     }
 
     // If we're ignoring failures, also ignore the migration threshold
     if (pcmk_is_set(rsc->flags, pe_rsc_failure_ignored)) {
         return FALSE;
     }
 
     /* If there are no failures, there's no need to force away */
     fail_count = pe_get_failcount(node, rsc, NULL,
                                   pe_fc_effective|pe_fc_fillers, NULL,
                                   data_set);
     if (fail_count <= 0) {
         return FALSE;
     }
 
     /* How many more times recovery will be tried on this node */
     countdown = QB_MAX(rsc->migration_threshold - fail_count, 0);
 
     if (countdown == 0) {
         crm_warn("Forcing %s away from %s after %d failures (max=%d)",
                  rsc->id, node->details->uname, fail_count,
                  rsc->migration_threshold);
         return TRUE;
     }
 
     crm_info("%s can fail %d more times on %s before being forced off",
              rsc->id, countdown, node->details->uname);
     return FALSE;
 }
 
 pe_node_t *
 pcmk__bundle_allocate(pe_resource_t *rsc, pe_node_t *prefer,
                       pe_working_set_t *data_set)
 {
     GListPtr containers = NULL;
     GListPtr nodes = NULL;
     pe__bundle_variant_data_t *bundle_data = NULL;
 
     CRM_CHECK(rsc != NULL, return NULL);
 
     get_bundle_variant_data(bundle_data, rsc);
 
     pe__set_resource_flags(rsc, pe_rsc_allocating);
     containers = get_container_list(rsc);
 
     pe__show_node_weights(!show_scores, rsc, __func__, rsc->allowed_nodes);
 
     nodes = g_hash_table_get_values(rsc->allowed_nodes);
     nodes = sort_nodes_by_weight(nodes, NULL, data_set);
     containers = g_list_sort_with_data(containers, sort_clone_instance, data_set);
     distribute_children(rsc, containers, nodes, bundle_data->nreplicas,
                         bundle_data->nreplicas_per_host, data_set);
     g_list_free(nodes);
     g_list_free(containers);
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
         pe_node_t *container_host = NULL;
 
         CRM_ASSERT(replica);
         if (replica->ip) {
             pe_rsc_trace(rsc, "Allocating bundle %s IP %s",
                          rsc->id, replica->ip->id);
             replica->ip->cmds->allocate(replica->ip, prefer, data_set);
         }
 
         container_host = replica->container->allocated_to;
         if (replica->remote && pe__is_guest_or_remote_node(container_host)) {
             /* We need 'nested' connection resources to be on the same
              * host because pacemaker-remoted only supports a single
              * active connection
              */
             pcmk__new_colocation("child-remote-with-docker-remote", NULL,
                                  INFINITY, replica->remote,
                                  container_host->details->remote_rsc, NULL,
                                  NULL, data_set);
         }
 
         if (replica->remote) {
             pe_rsc_trace(rsc, "Allocating bundle %s connection %s",
                          rsc->id, replica->remote->id);
             replica->remote->cmds->allocate(replica->remote, prefer,
                                             data_set);
         }
 
         // Explicitly allocate replicas' children before bundle child
         if (replica->child) {
             pe_node_t *node = NULL;
             GHashTableIter iter;
 
             g_hash_table_iter_init(&iter, replica->child->allowed_nodes);
             while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & node)) {
                 if (node->details != replica->node->details) {
                     node->weight = -INFINITY;
                 } else if (!migration_threshold_reached(replica->child, node,
                                                         data_set)) {
                     node->weight = INFINITY;
                 }
             }
 
             pe__set_resource_flags(replica->child->parent, pe_rsc_allocating);
             pe_rsc_trace(rsc, "Allocating bundle %s replica child %s",
                          rsc->id, replica->child->id);
             replica->child->cmds->allocate(replica->child, replica->node,
                                            data_set);
             pe__clear_resource_flags(replica->child->parent,
                                        pe_rsc_allocating);
         }
     }
 
     if (bundle_data->child) {
         pe_node_t *node = NULL;
         GHashTableIter iter;
         g_hash_table_iter_init(&iter, bundle_data->child->allowed_nodes);
         while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & node)) {
             if (is_bundle_node(bundle_data, node)) {
                 node->weight = 0;
             } else {
                 node->weight = -INFINITY;
             }
         }
         pe_rsc_trace(rsc, "Allocating bundle %s child %s",
                      rsc->id, bundle_data->child->id);
         bundle_data->child->cmds->allocate(bundle_data->child, prefer, data_set);
     }
 
     pe__clear_resource_flags(rsc, pe_rsc_allocating|pe_rsc_provisional);
     return NULL;
 }
 
 
 void
 pcmk__bundle_create_actions(pe_resource_t *rsc, pe_working_set_t *data_set)
 {
     pe_action_t *action = NULL;
     GListPtr containers = NULL;
     pe__bundle_variant_data_t *bundle_data = NULL;
 
     CRM_CHECK(rsc != NULL, return);
 
     containers = get_container_list(rsc);
     get_bundle_variant_data(bundle_data, rsc);
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         CRM_ASSERT(replica);
         if (replica->ip) {
             replica->ip->cmds->create_actions(replica->ip, data_set);
         }
         if (replica->container) {
             replica->container->cmds->create_actions(replica->container,
                                                      data_set);
         }
         if (replica->remote) {
             replica->remote->cmds->create_actions(replica->remote, data_set);
         }
     }
 
     clone_create_pseudo_actions(rsc, containers, NULL, NULL,  data_set);
 
     if (bundle_data->child) {
         bundle_data->child->cmds->create_actions(bundle_data->child, data_set);
 
         if (pcmk_is_set(bundle_data->child->flags, pe_rsc_promotable)) {
             /* promote */
             create_pseudo_resource_op(rsc, RSC_PROMOTE, TRUE, TRUE, data_set);
             action = create_pseudo_resource_op(rsc, RSC_PROMOTED, TRUE, TRUE, data_set);
             action->priority = INFINITY;
 
             /* demote */
             create_pseudo_resource_op(rsc, RSC_DEMOTE, TRUE, TRUE, data_set);
             action = create_pseudo_resource_op(rsc, RSC_DEMOTED, TRUE, TRUE, data_set);
             action->priority = INFINITY;
         }
     }
 
     g_list_free(containers);
 }
 
 void
 pcmk__bundle_internal_constraints(pe_resource_t *rsc,
                                   pe_working_set_t *data_set)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
 
     CRM_CHECK(rsc != NULL, return);
 
     get_bundle_variant_data(bundle_data, rsc);
 
     if (bundle_data->child) {
         new_rsc_order(rsc, RSC_START, bundle_data->child, RSC_START,
                       pe_order_implies_first_printed, data_set);
         new_rsc_order(rsc, RSC_STOP, bundle_data->child, RSC_STOP,
                       pe_order_implies_first_printed, data_set);
 
         if (bundle_data->child->children) {
             new_rsc_order(bundle_data->child, RSC_STARTED, rsc, RSC_STARTED,
                           pe_order_implies_then_printed, data_set);
             new_rsc_order(bundle_data->child, RSC_STOPPED, rsc, RSC_STOPPED,
                           pe_order_implies_then_printed, data_set);
         } else {
             new_rsc_order(bundle_data->child, RSC_START, rsc, RSC_STARTED,
                           pe_order_implies_then_printed, data_set);
             new_rsc_order(bundle_data->child, RSC_STOP, rsc, RSC_STOPPED,
                           pe_order_implies_then_printed, data_set);
         }
     }
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         CRM_ASSERT(replica);
         CRM_ASSERT(replica->container);
 
         replica->container->cmds->internal_constraints(replica->container,
                                                        data_set);
 
         order_start_start(rsc, replica->container,
                           pe_order_runnable_left|pe_order_implies_first_printed);
 
         if (replica->child) {
             order_stop_stop(rsc, replica->child,
                             pe_order_implies_first_printed);
         }
         order_stop_stop(rsc, replica->container,
                         pe_order_implies_first_printed);
         new_rsc_order(replica->container, RSC_START, rsc, RSC_STARTED,
                       pe_order_implies_then_printed, data_set);
         new_rsc_order(replica->container, RSC_STOP, rsc, RSC_STOPPED,
                       pe_order_implies_then_printed, data_set);
 
         if (replica->ip) {
             replica->ip->cmds->internal_constraints(replica->ip, data_set);
 
             // Start ip then container
             new_rsc_order(replica->ip, RSC_START, replica->container, RSC_START,
                           pe_order_runnable_left|pe_order_preserve, data_set);
             new_rsc_order(replica->container, RSC_STOP, replica->ip, RSC_STOP,
                           pe_order_implies_first|pe_order_preserve, data_set);
 
             pcmk__new_colocation("ip-with-docker", NULL, INFINITY, replica->ip,
                                  replica->container, NULL, NULL, data_set);
         }
 
         if (replica->remote) {
             /* This handles ordering and colocating remote relative to container
              * (via "resource-with-container"). Since IP is also ordered and
              * colocated relative to the container, we don't need to do anything
              * explicit here with IP.
              */
             replica->remote->cmds->internal_constraints(replica->remote,
                                                         data_set);
         }
 
         if (replica->child) {
             CRM_ASSERT(replica->remote);
 
             // "Start remote then child" is implicit in scheduler's remote logic
         }
 
     }
 
     if (bundle_data->child) {
         bundle_data->child->cmds->internal_constraints(bundle_data->child, data_set);
         if (pcmk_is_set(bundle_data->child->flags, pe_rsc_promotable)) {
             promote_demote_constraints(rsc, data_set);
 
             /* child demoted before global demoted */
             new_rsc_order(bundle_data->child, RSC_DEMOTED, rsc, RSC_DEMOTED,
                           pe_order_implies_then_printed, data_set);
 
             /* global demote before child demote */
             new_rsc_order(rsc, RSC_DEMOTE, bundle_data->child, RSC_DEMOTE,
                           pe_order_implies_first_printed, data_set);
 
             /* child promoted before global promoted */
             new_rsc_order(bundle_data->child, RSC_PROMOTED, rsc, RSC_PROMOTED,
                           pe_order_implies_then_printed, data_set);
 
             /* global promote before child promote */
             new_rsc_order(rsc, RSC_PROMOTE, bundle_data->child, RSC_PROMOTE,
                           pe_order_implies_first_printed, data_set);
         }
 
     } else {
 //    int type = pe_order_optional | pe_order_implies_then | pe_order_restart;
 //        custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
 //                            rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL, pe_order_optional, data_set);
     }
 }
 
 static pe_resource_t *
 compatible_replica_for_node(pe_resource_t *rsc_lh, pe_node_t *candidate,
                             pe_resource_t *rsc, enum rsc_role_e filter,
                             gboolean current)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
 
     CRM_CHECK(candidate != NULL, return NULL);
     get_bundle_variant_data(bundle_data, rsc);
 
     crm_trace("Looking for compatible child from %s for %s on %s",
               rsc_lh->id, rsc->id, candidate->details->uname);
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         if (is_child_compatible(replica->container, candidate, filter, current)) {
             crm_trace("Pairing %s with %s on %s",
                       rsc_lh->id, replica->container->id,
                       candidate->details->uname);
             return replica->container;
         }
     }
 
     crm_trace("Can't pair %s with %s", rsc_lh->id, rsc->id);
     return NULL;
 }
 
 static pe_resource_t *
 compatible_replica(pe_resource_t *rsc_lh, pe_resource_t *rsc,
                    enum rsc_role_e filter, gboolean current,
                    pe_working_set_t *data_set)
 {
     GListPtr scratch = NULL;
     pe_resource_t *pair = NULL;
     pe_node_t *active_node_lh = NULL;
 
     active_node_lh = rsc_lh->fns->location(rsc_lh, NULL, current);
     if (active_node_lh) {
         return compatible_replica_for_node(rsc_lh, active_node_lh, rsc, filter,
                                            current);
     }
 
     scratch = g_hash_table_get_values(rsc_lh->allowed_nodes);
     scratch = sort_nodes_by_weight(scratch, NULL, data_set);
 
     for (GListPtr gIter = scratch; gIter != NULL; gIter = gIter->next) {
         pe_node_t *node = (pe_node_t *) gIter->data;
 
         pair = compatible_replica_for_node(rsc_lh, node, rsc, filter, current);
         if (pair) {
             goto done;
         }
     }
 
     pe_rsc_debug(rsc, "Can't pair %s with %s", rsc_lh->id, (rsc? rsc->id : "none"));
   done:
     g_list_free(scratch);
     return pair;
 }
 
 void
 pcmk__bundle_rsc_colocation_lh(pe_resource_t *rsc, pe_resource_t *rsc_rh,
                                pcmk__colocation_t *constraint,
                                pe_working_set_t *data_set)
 {
     /* -- Never called --
      *
      * Instead we add the colocation constraints to the child and call from there
      */
     CRM_ASSERT(FALSE);
 }
 
 int copies_per_node(pe_resource_t * rsc) 
 {
     /* Strictly speaking, there should be a 'copies_per_node' addition
      * to the resource function table and each case would be a
      * function.  However that would be serious overkill to return an
      * int.  In fact, it seems to me that both function tables
      * could/should be replaced by resources.{c,h} full of
      * rsc_{some_operation} functions containing a switch as below
      * which calls out to functions named {variant}_{some_operation}
      * as needed.
      */
     switch(rsc->variant) {
         case pe_unknown:
             return 0;
         case pe_native:
         case pe_group:
             return 1;
         case pe_clone:
             {
                 const char *max_clones_node = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION_NODEMAX);
                 return crm_parse_int(max_clones_node, "1");
             }
         case pe_container:
             {
                 pe__bundle_variant_data_t *data = NULL;
                 get_bundle_variant_data(data, rsc);
                 return data->nreplicas_per_host;
             }
     }
     return 0;
 }
 
 void
 pcmk__bundle_rsc_colocation_rh(pe_resource_t *rsc_lh, pe_resource_t *rsc,
                                pcmk__colocation_t *constraint,
                                pe_working_set_t *data_set)
 {
     GListPtr allocated_rhs = NULL;
     pe__bundle_variant_data_t *bundle_data = NULL;
 
     CRM_CHECK(constraint != NULL, return);
     CRM_CHECK(rsc_lh != NULL, pe_err("rsc_lh was NULL for %s", constraint->id); return);
     CRM_CHECK(rsc != NULL, pe_err("rsc was NULL for %s", constraint->id); return);
     CRM_ASSERT(rsc_lh->variant == pe_native);
 
     if (constraint->score == 0) {
         return;
     }
     if (pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
         pe_rsc_trace(rsc, "%s is still provisional", rsc->id);
         return;
 
     } else if(constraint->rsc_lh->variant > pe_group) {
         pe_resource_t *rh_child = compatible_replica(rsc_lh, rsc,
                                                   RSC_ROLE_UNKNOWN, FALSE,
                                                   data_set);
 
         if (rh_child) {
             pe_rsc_debug(rsc, "Pairing %s with %s", rsc_lh->id, rh_child->id);
             rsc_lh->cmds->rsc_colocation_lh(rsc_lh, rh_child, constraint,
                                             data_set);
 
         } else if (constraint->score >= INFINITY) {
             crm_notice("Cannot pair %s with instance of %s", rsc_lh->id, rsc->id);
             assign_node(rsc_lh, NULL, TRUE);
 
         } else {
             pe_rsc_debug(rsc, "Cannot pair %s with instance of %s", rsc_lh->id, rsc->id);
         }
 
         return;
     }
 
     get_bundle_variant_data(bundle_data, rsc);
     pe_rsc_trace(rsc, "Processing constraint %s: %s -> %s %d",
                  constraint->id, rsc_lh->id, rsc->id, constraint->score);
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         if (constraint->score < INFINITY) {
             replica->container->cmds->rsc_colocation_rh(rsc_lh,
                                                         replica->container,
                                                         constraint, data_set);
 
         } else {
             pe_node_t *chosen = replica->container->fns->location(replica->container,
                                                                   NULL, FALSE);
 
             if ((chosen == NULL)
                 || is_set_recursive(replica->container, pe_rsc_block, TRUE)) {
                 continue;
             }
             if ((constraint->role_rh >= RSC_ROLE_MASTER)
                 && (replica->child == NULL)) {
                 continue;
             }
             if ((constraint->role_rh >= RSC_ROLE_MASTER)
                 && (replica->child->next_role < RSC_ROLE_MASTER)) {
                 continue;
             }
 
             pe_rsc_trace(rsc, "Allowing %s: %s %d", constraint->id, chosen->details->uname, chosen->weight);
             allocated_rhs = g_list_prepend(allocated_rhs, chosen);
         }
     }
 
     if (constraint->score >= INFINITY) {
         node_list_exclude(rsc_lh->allowed_nodes, allocated_rhs, FALSE);
     }
     g_list_free(allocated_rhs);
 }
 
 enum pe_action_flags
 pcmk__bundle_action_flags(pe_action_t *action, pe_node_t *node)
 {
     GListPtr containers = NULL;
     enum pe_action_flags flags = 0;
     pe__bundle_variant_data_t *data = NULL;
 
     get_bundle_variant_data(data, action->rsc);
     if(data->child) {
         enum action_tasks task = get_complex_task(data->child, action->task, TRUE);
         switch(task) {
             case no_action:
             case action_notify:
             case action_notified:
             case action_promote:
             case action_promoted:
             case action_demote:
             case action_demoted:
                 return summary_action_flags(action, data->child->children, node);
             default:
                 break;
         }
     }
 
     containers = get_container_list(action->rsc);
     flags = summary_action_flags(action, containers, node);
     g_list_free(containers);
     return flags;
 }
 
 pe_resource_t *
 find_compatible_child_by_node(pe_resource_t * local_child, pe_node_t * local_node, pe_resource_t * rsc,
                               enum rsc_role_e filter, gboolean current)
 {
     GListPtr gIter = NULL;
     GListPtr children = NULL;
 
     if (local_node == NULL) {
         crm_err("Can't colocate unrunnable child %s with %s", local_child->id, rsc->id);
         return NULL;
     }
 
     crm_trace("Looking for compatible child from %s for %s on %s",
               local_child->id, rsc->id, local_node->details->uname);
 
     children = get_containers_or_children(rsc);
     for (gIter = children; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         if(is_child_compatible(child_rsc, local_node, filter, current)) {
             crm_trace("Pairing %s with %s on %s",
                       local_child->id, child_rsc->id, local_node->details->uname);
             return child_rsc;
         }
     }
 
     crm_trace("Can't pair %s with %s", local_child->id, rsc->id);
     if(children != rsc->children) {
         g_list_free(children);
     }
     return NULL;
 }
 
 static pe__bundle_replica_t *
 replica_for_container(pe_resource_t *rsc, pe_resource_t *container,
                       pe_node_t *node)
 {
     if (rsc->variant == pe_container) {
         pe__bundle_variant_data_t *data = NULL;
 
         get_bundle_variant_data(data, rsc);
         for (GList *gIter = data->replicas; gIter != NULL;
              gIter = gIter->next) {
             pe__bundle_replica_t *replica = gIter->data;
 
             if (replica->child
                 && (container == replica->container)
                 && (node->details == replica->node->details)) {
                 return replica;
             }
         }
     }
     return NULL;
 }
 
 static enum pe_graph_flags
 multi_update_interleave_actions(pe_action_t *first, pe_action_t *then,
                                 pe_node_t *node, enum pe_action_flags flags,
                                 enum pe_action_flags filter,
                                 enum pe_ordering type,
                                 pe_working_set_t *data_set)
 {
     GListPtr gIter = NULL;
     GListPtr children = NULL;
     gboolean current = FALSE;
     enum pe_graph_flags changed = pe_graph_none;
 
     /* Fix this - lazy */
     if (pcmk__ends_with(first->uuid, "_stopped_0")
         || pcmk__ends_with(first->uuid, "_demoted_0")) {
         current = TRUE;
     }
 
     children = get_containers_or_children(then->rsc);
     for (gIter = children; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *then_child = gIter->data;
         pe_resource_t *first_child = find_compatible_child(then_child,
                                                            first->rsc,
                                                            RSC_ROLE_UNKNOWN,
                                                            current, data_set);
         if (first_child == NULL && current) {
             crm_trace("Ignore");
 
         } else if (first_child == NULL) {
             crm_debug("No match found for %s (%d / %s / %s)", then_child->id, current, first->uuid, then->uuid);
 
             /* Me no like this hack - but what else can we do?
              *
              * If there is no-one active or about to be active
              *   on the same node as then_child, then they must
              *   not be allowed to start
              */
             if (type & (pe_order_runnable_left | pe_order_implies_then) /* Mandatory */ ) {
                 pe_rsc_info(then->rsc, "Inhibiting %s from being active", then_child->id);
                 if(assign_node(then_child, NULL, TRUE)) {
                     pe__set_graph_flags(changed, first, pe_graph_updated_then);
                 }
             }
 
         } else {
             pe_action_t *first_action = NULL;
             pe_action_t *then_action = NULL;
 
             enum action_tasks task = clone_child_action(first);
             const char *first_task = task2text(task);
 
             pe__bundle_replica_t *first_replica = NULL;
             pe__bundle_replica_t *then_replica = NULL;
 
             first_replica = replica_for_container(first->rsc, first_child,
                                                   node);
             if (strstr(first->task, "stop") && first_replica && first_replica->child) {
                 /* Except for 'stopped' we should be looking at the
                  * in-container resource, actions for the child will
                  * happen later and are therefor more likely to align
                  * with the user's intent.
                  */
                 first_action = find_first_action(first_replica->child->actions,
                                                  NULL, task2text(task), node);
             } else {
                 first_action = find_first_action(first_child->actions, NULL, task2text(task), node);
             }
 
             then_replica = replica_for_container(then->rsc, then_child, node);
             if (strstr(then->task, "mote")
                 && then_replica && then_replica->child) {
                 /* Promote/demote actions will never be found for the
                  * container resource, look in the child instead
                  *
                  * Alternatively treat:
                  *  'XXXX then promote YYYY' as 'XXXX then start container for YYYY', and
                  *  'demote XXXX then stop YYYY' as 'stop container for XXXX then stop YYYY'
                  */
                 then_action = find_first_action(then_replica->child->actions,
                                                 NULL, then->task, node);
             } else {
                 then_action = find_first_action(then_child->actions, NULL, then->task, node);
             }
 
             if (first_action == NULL) {
                 if (!pcmk_is_set(first_child->flags, pe_rsc_orphan)
                     && !pcmk__str_any_of(first_task, RSC_STOP, RSC_DEMOTE, NULL)) {
                     crm_err("Internal error: No action found for %s in %s (first)",
                             first_task, first_child->id);
 
                 } else {
                     crm_trace("No action found for %s in %s%s (first)",
                               first_task, first_child->id,
                               pcmk_is_set(first_child->flags, pe_rsc_orphan)? " (ORPHAN)" : "");
                 }
                 continue;
             }
 
             /* We're only interested if 'then' is neither stopping nor being demoted */ 
             if (then_action == NULL) {
                 if (!pcmk_is_set(then_child->flags, pe_rsc_orphan)
                     && !pcmk__str_any_of(then->task, RSC_STOP, RSC_DEMOTE, NULL)) {
                     crm_err("Internal error: No action found for %s in %s (then)",
                             then->task, then_child->id);
 
                 } else {
                     crm_trace("No action found for %s in %s%s (then)",
                               then->task, then_child->id,
                               pcmk_is_set(then_child->flags, pe_rsc_orphan)? " (ORPHAN)" : "");
                 }
                 continue;
             }
 
             if (order_actions(first_action, then_action, type)) {
                 crm_debug("Created constraint for %s (%d) -> %s (%d) %.6x",
                           first_action->uuid,
                           pcmk_is_set(first_action->flags, pe_action_optional),
                           then_action->uuid,
                           pcmk_is_set(then_action->flags, pe_action_optional),
                           type);
                 pe__set_graph_flags(changed, first,
                                     pe_graph_updated_first|pe_graph_updated_then);
             }
             if(first_action && then_action) {
                 changed |= then_child->cmds->update_actions(first_action,
                     then_action, node,
                     first_child->cmds->action_flags(first_action, node),
                     filter, type, data_set);
             } else {
                 crm_err("Nothing found either for %s (%p) or %s (%p) %s",
                         first_child->id, first_action,
                         then_child->id, then_action, task2text(task));
             }
         }
     }
 
     if(children != then->rsc->children) {
         g_list_free(children);
     }
     return changed;
 }
 
 static bool
 can_interleave_actions(pe_action_t *first, pe_action_t *then)
 {
     bool interleave = FALSE;
     pe_resource_t *rsc = NULL;
     const char *interleave_s = NULL;
 
     if(first->rsc == NULL || then->rsc == NULL) {
         crm_trace("Not interleaving %s with %s (both must be resources)", first->uuid, then->uuid);
         return FALSE;
     } else if(first->rsc == then->rsc) {
         crm_trace("Not interleaving %s with %s (must belong to different resources)", first->uuid, then->uuid);
         return FALSE;
     } else if(first->rsc->variant < pe_clone || then->rsc->variant < pe_clone) {
         crm_trace("Not interleaving %s with %s (both sides must be clones or bundles)", first->uuid, then->uuid);
         return FALSE;
     }
 
     if (pcmk__ends_with(then->uuid, "_stop_0")
         || pcmk__ends_with(then->uuid, "_demote_0")) {
         rsc = first->rsc;
     } else {
         rsc = then->rsc;
     }
 
     interleave_s = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INTERLEAVE);
     interleave = crm_is_true(interleave_s);
     crm_trace("Interleave %s -> %s: %s (based on %s)",
               first->uuid, then->uuid, interleave ? "yes" : "no", rsc->id);
 
     return interleave;
 }
 
 enum pe_graph_flags
 pcmk__multi_update_actions(pe_action_t *first, pe_action_t *then,
                            pe_node_t *node, enum pe_action_flags flags,
                            enum pe_action_flags filter, enum pe_ordering type,
                            pe_working_set_t *data_set)
 {
     enum pe_graph_flags changed = pe_graph_none;
 
     crm_trace("%s -> %s", first->uuid, then->uuid);
 
     if(can_interleave_actions(first, then)) {
         changed = multi_update_interleave_actions(first, then, node, flags,
                                                   filter, type, data_set);
 
     } else if(then->rsc) {
         GListPtr gIter = NULL;
         GListPtr children = NULL;
 
         // Handle the 'primitive' ordering case
         changed |= native_update_actions(first, then, node, flags, filter,
                                          type, data_set);
 
         // Now any children (or containers in the case of a bundle)
         children = get_containers_or_children(then->rsc);
         for (gIter = children; gIter != NULL; gIter = gIter->next) {
             pe_resource_t *then_child = (pe_resource_t *) gIter->data;
             enum pe_graph_flags then_child_changed = pe_graph_none;
             pe_action_t *then_child_action = find_first_action(then_child->actions, NULL, then->task, node);
 
             if (then_child_action) {
                 enum pe_action_flags then_child_flags = then_child->cmds->action_flags(then_child_action, node);
 
                 if (pcmk_is_set(then_child_flags, pe_action_runnable)) {
                     then_child_changed |= then_child->cmds->update_actions(first,
                         then_child_action, node, flags, filter, type, data_set);
                 }
                 changed |= then_child_changed;
                 if (then_child_changed & pe_graph_updated_then) {
                     for (GListPtr lpc = then_child_action->actions_after; lpc != NULL; lpc = lpc->next) {
                         pe_action_wrapper_t *next = (pe_action_wrapper_t *) lpc->data;
                         update_action(next->action, data_set);
                     }
                 }
             }
         }
 
         if(children != then->rsc->children) {
             g_list_free(children);
         }
     }
     return changed;
 }
 
 void
 pcmk__bundle_rsc_location(pe_resource_t *rsc, pe__location_t *constraint)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
     get_bundle_variant_data(bundle_data, rsc);
 
     native_rsc_location(rsc, constraint);
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         if (replica->container) {
             replica->container->cmds->rsc_location(replica->container,
                                                    constraint);
         }
         if (replica->ip) {
             replica->ip->cmds->rsc_location(replica->ip, constraint);
         }
     }
 
     if (bundle_data->child
         && ((constraint->role_filter == RSC_ROLE_SLAVE)
             || (constraint->role_filter == RSC_ROLE_MASTER))) {
         bundle_data->child->cmds->rsc_location(bundle_data->child, constraint);
         bundle_data->child->rsc_location = g_list_prepend(bundle_data->child->rsc_location,
                                                           constraint);
     }
 }
 
 void
 pcmk__bundle_expand(pe_resource_t *rsc, pe_working_set_t * data_set)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
 
     CRM_CHECK(rsc != NULL, return);
 
     get_bundle_variant_data(bundle_data, rsc);
 
     if (bundle_data->child) {
         bundle_data->child->cmds->expand(bundle_data->child, data_set);
     }
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         CRM_ASSERT(replica);
         if (replica->remote && replica->container
-            && pe__bundle_needs_remote_name(replica->remote)) {
+            && pe__bundle_needs_remote_name(replica->remote, data_set)) {
 
             /* REMOTE_CONTAINER_HACK: Allow remote nodes to run containers that
              * run pacemaker-remoted inside, without needing a separate IP for
              * the container. This is done by configuring the inner remote's
              * connection host as the magic string "#uname", then
              * replacing it with the underlying host when needed.
              */
             xmlNode *nvpair = get_xpath_object("//nvpair[@name='" XML_RSC_ATTR_REMOTE_RA_ADDR "']",
                                                replica->remote->xml, LOG_ERR);
             const char *calculated_addr = NULL;
 
+            // Replace the value in replica->remote->xml (if appropriate)
             calculated_addr = pe__add_bundle_remote_name(replica->remote,
+                                                         data_set,
                                                          nvpair, "value");
             if (calculated_addr) {
+                /* Since this is for the bundle as a resource, and not any
+                 * particular action, replace the value in the default
+                 * parameters (not evaluated for node). action2xml() will grab
+                 * it from there to replace it in node-evaluated parameters.
+                 */
+                GHashTable *params = pe_rsc_params(replica->remote,
+                                                   NULL, data_set);
+
                 crm_trace("Set address for bundle connection %s to bundle host %s",
                           replica->remote->id, calculated_addr);
-                g_hash_table_replace(replica->remote->parameters,
+                g_hash_table_replace(params,
                                      strdup(XML_RSC_ATTR_REMOTE_RA_ADDR),
                                      strdup(calculated_addr));
             } else {
                 /* The only way to get here is if the remote connection is
                  * neither currently running nor scheduled to run. That means we
                  * won't be doing any operations that require addr (only start
                  * requires it; we additionally use it to compare digests when
                  * unpacking status, promote, and migrate_from history, but
                  * that's already happened by this point).
                  */
                 crm_info("Unable to determine address for bundle %s remote connection",
                          rsc->id);
             }
         }
         if (replica->ip) {
             replica->ip->cmds->expand(replica->ip, data_set);
         }
         if (replica->container) {
             replica->container->cmds->expand(replica->container, data_set);
         }
         if (replica->remote) {
             replica->remote->cmds->expand(replica->remote, data_set);
         }
     }
 }
 
 gboolean
 pcmk__bundle_create_probe(pe_resource_t *rsc, pe_node_t *node,
                           pe_action_t *complete, gboolean force,
                           pe_working_set_t * data_set)
 {
     bool any_created = FALSE;
     pe__bundle_variant_data_t *bundle_data = NULL;
 
     CRM_CHECK(rsc != NULL, return FALSE);
 
     get_bundle_variant_data(bundle_data, rsc);
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         CRM_ASSERT(replica);
         if (replica->ip) {
             any_created |= replica->ip->cmds->create_probe(replica->ip, node,
                                                            complete, force,
                                                            data_set);
         }
         if (replica->child && (node->details == replica->node->details)) {
             any_created |= replica->child->cmds->create_probe(replica->child,
                                                               node, complete,
                                                               force, data_set);
         }
         if (replica->container) {
             bool created = replica->container->cmds->create_probe(replica->container,
                                                                   node, complete,
                                                                   force, data_set);
 
             if(created) {
                 any_created = TRUE;
                 /* If we're limited to one replica per host (due to
                  * the lack of an IP range probably), then we don't
                  * want any of our peer containers starting until
                  * we've established that no other copies are already
                  * running.
                  *
                  * Partly this is to ensure that nreplicas_per_host is
                  * observed, but also to ensure that the containers
                  * don't fail to start because the necessary port
                  * mappings (which won't include an IP for uniqueness)
                  * are already taken
                  */
 
                 for (GList *tIter = bundle_data->replicas;
                      tIter && (bundle_data->nreplicas_per_host == 1);
                      tIter = tIter->next) {
                     pe__bundle_replica_t *other = tIter->data;
 
                     if ((other != replica) && (other != NULL)
                         && (other->container != NULL)) {
 
                         custom_action_order(replica->container,
                                             pcmk__op_key(replica->container->id, RSC_STATUS, 0),
                                             NULL, other->container,
                                             pcmk__op_key(other->container->id, RSC_START, 0),
                                             NULL,
                                             pe_order_optional|pe_order_same_node,
                                             data_set);
                     }
                 }
             }
         }
         if (replica->container && replica->remote
             && replica->remote->cmds->create_probe(replica->remote, node,
                                                    complete, force,
                                                    data_set)) {
 
             /* Do not probe the remote resource until we know where the
              * container is running. This is required for REMOTE_CONTAINER_HACK
              * to correctly probe remote resources.
              */
             char *probe_uuid = pcmk__op_key(replica->remote->id, RSC_STATUS,
                                                0);
             pe_action_t *probe = find_first_action(replica->remote->actions,
                                                    probe_uuid, NULL, node);
 
             free(probe_uuid);
             if (probe) {
                 any_created = TRUE;
                 crm_trace("Ordering %s probe on %s",
                           replica->remote->id, node->details->uname);
                 custom_action_order(replica->container,
                                     pcmk__op_key(replica->container->id, RSC_START, 0),
                                     NULL, replica->remote, NULL, probe,
                                     pe_order_probe, data_set);
             }
         }
     }
     return any_created;
 }
 
 void
 pcmk__bundle_append_meta(pe_resource_t *rsc, xmlNode *xml)
 {
 }
 
 void
 pcmk__bundle_log_actions(pe_resource_t *rsc, pe_working_set_t *data_set,
                          gboolean terminal)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
 
     CRM_CHECK(rsc != NULL, return);
 
     get_bundle_variant_data(bundle_data, rsc);
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         CRM_ASSERT(replica);
         if (replica->ip) {
             LogActions(replica->ip, data_set, terminal);
         }
         if (replica->container) {
             LogActions(replica->container, data_set, terminal);
         }
         if (replica->remote) {
             LogActions(replica->remote, data_set, terminal);
         }
         if (replica->child) {
             LogActions(replica->child, data_set, terminal);
         }
     }
 }
diff --git a/lib/pacemaker/pcmk_sched_constraints.c b/lib/pacemaker/pcmk_sched_constraints.c
index 0029ad742a..92b9740f3d 100644
--- a/lib/pacemaker/pcmk_sched_constraints.c
+++ b/lib/pacemaker/pcmk_sched_constraints.c
@@ -1,3065 +1,3067 @@
 /*
  * Copyright 2004-2020 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <sys/param.h>
 #include <sys/types.h>
 #include <regex.h>
 #include <glib.h>
 
 #include <crm/crm.h>
 #include <crm/cib.h>
 #include <crm/msg_xml.h>
 #include <crm/common/xml.h>
 #include <crm/common/xml_internal.h>
 #include <crm/common/iso8601.h>
 #include <crm/pengine/status.h>
 #include <crm/pengine/internal.h>
 #include <crm/pengine/rules.h>
 #include <pacemaker-internal.h>
 
 enum pe_order_kind {
     pe_order_kind_optional,
     pe_order_kind_mandatory,
     pe_order_kind_serialize,
 };
 
 #define EXPAND_CONSTRAINT_IDREF(__set, __rsc, __name) do {				\
 	__rsc = pe_find_constraint_resource(data_set->resources, __name);		\
 	if(__rsc == NULL) {						\
 	    pcmk__config_err("%s: No resource found for %s", __set, __name);    \
 	    return FALSE;						\
 	}								\
     } while(0)
 
 enum pe_ordering get_flags(const char *id, enum pe_order_kind kind,
                            const char *action_first, const char *action_then, gboolean invert);
 enum pe_ordering get_asymmetrical_flags(enum pe_order_kind kind);
 static pe__location_t *generate_location_rule(pe_resource_t *rsc,
                                               xmlNode *rule_xml,
                                               const char *discovery,
                                               crm_time_t *next_change,
                                               pe_working_set_t *data_set,
-                                              pe_match_data_t *match_data);
+                                              pe_re_match_data_t *match_data);
 static void unpack_location(xmlNode *xml_obj, pe_working_set_t *data_set);
 static void unpack_rsc_colocation(xmlNode *xml_obj, pe_working_set_t *data_set);
 
 static bool
 evaluate_lifetime(xmlNode *lifetime, pe_working_set_t *data_set)
 {
     bool result = FALSE;
     crm_time_t *next_change = crm_time_new_undefined();
 
     result = pe_evaluate_rules(lifetime, NULL, data_set->now, next_change);
     if (crm_time_is_defined(next_change)) {
         time_t recheck = (time_t) crm_time_get_seconds_since_epoch(next_change);
 
         pe__update_recheck_time(recheck, data_set);
     }
     crm_time_free(next_change);
     return result;
 }
 
 gboolean
 unpack_constraints(xmlNode * xml_constraints, pe_working_set_t * data_set)
 {
     xmlNode *xml_obj = NULL;
     xmlNode *lifetime = NULL;
 
     for (xml_obj = pcmk__xe_first_child(xml_constraints); xml_obj != NULL;
          xml_obj = pcmk__xe_next(xml_obj)) {
         const char *id = crm_element_value(xml_obj, XML_ATTR_ID);
         const char *tag = crm_element_name(xml_obj);
 
         if (id == NULL) {
             pcmk__config_err("Ignoring <%s> constraint without "
                              XML_ATTR_ID, tag);
             continue;
         }
 
         crm_trace("Processing constraint %s %s", tag, id);
 
         lifetime = first_named_child(xml_obj, "lifetime");
         if (lifetime) {
             pcmk__config_warn("Support for 'lifetime' attribute (in %s) is "
                               "deprecated (the rules it contains should "
                               "instead be direct descendents of the "
                               "constraint object)", id);
         }
 
         if (lifetime && !evaluate_lifetime(lifetime, data_set)) {
             crm_info("Constraint %s %s is not active", tag, id);
 
         } else if (pcmk__str_eq(XML_CONS_TAG_RSC_ORDER, tag, pcmk__str_casei)) {
             unpack_rsc_order(xml_obj, data_set);
 
         } else if (pcmk__str_eq(XML_CONS_TAG_RSC_DEPEND, tag, pcmk__str_casei)) {
             unpack_rsc_colocation(xml_obj, data_set);
 
         } else if (pcmk__str_eq(XML_CONS_TAG_RSC_LOCATION, tag, pcmk__str_casei)) {
             unpack_location(xml_obj, data_set);
 
         } else if (pcmk__str_eq(XML_CONS_TAG_RSC_TICKET, tag, pcmk__str_casei)) {
             unpack_rsc_ticket(xml_obj, data_set);
 
         } else {
             pe_err("Unsupported constraint type: %s", tag);
         }
     }
 
     return TRUE;
 }
 
 static const char *
 invert_action(const char *action)
 {
     if (pcmk__str_eq(action, RSC_START, pcmk__str_casei)) {
         return RSC_STOP;
 
     } else if (pcmk__str_eq(action, RSC_STOP, pcmk__str_casei)) {
         return RSC_START;
 
     } else if (pcmk__str_eq(action, RSC_PROMOTE, pcmk__str_casei)) {
         return RSC_DEMOTE;
 
     } else if (pcmk__str_eq(action, RSC_DEMOTE, pcmk__str_casei)) {
         return RSC_PROMOTE;
 
     } else if (pcmk__str_eq(action, RSC_PROMOTED, pcmk__str_casei)) {
         return RSC_DEMOTED;
 
     } else if (pcmk__str_eq(action, RSC_DEMOTED, pcmk__str_casei)) {
         return RSC_PROMOTED;
 
     } else if (pcmk__str_eq(action, RSC_STARTED, pcmk__str_casei)) {
         return RSC_STOPPED;
 
     } else if (pcmk__str_eq(action, RSC_STOPPED, pcmk__str_casei)) {
         return RSC_STARTED;
     }
     crm_warn("Unknown action '%s' specified in order constraint", action);
     return NULL;
 }
 
 static enum pe_order_kind
 get_ordering_type(xmlNode * xml_obj)
 {
     enum pe_order_kind kind_e = pe_order_kind_mandatory;
     const char *kind = crm_element_value(xml_obj, XML_ORDER_ATTR_KIND);
 
     if (kind == NULL) {
         const char *score = crm_element_value(xml_obj, XML_RULE_ATTR_SCORE);
 
         kind_e = pe_order_kind_mandatory;
 
         if (score) {
             // @COMPAT deprecated informally since 1.0.7, formally since 2.0.1
             int score_i = char2score(score);
 
             if (score_i == 0) {
                 kind_e = pe_order_kind_optional;
             }
             pe_warn_once(pe_wo_order_score,
                          "Support for 'score' in rsc_order is deprecated "
                          "and will be removed in a future release (use 'kind' instead)");
         }
 
     } else if (pcmk__str_eq(kind, "Mandatory", pcmk__str_casei)) {
         kind_e = pe_order_kind_mandatory;
 
     } else if (pcmk__str_eq(kind, "Optional", pcmk__str_casei)) {
         kind_e = pe_order_kind_optional;
 
     } else if (pcmk__str_eq(kind, "Serialize", pcmk__str_casei)) {
         kind_e = pe_order_kind_serialize;
 
     } else {
         pcmk__config_err("Resetting '" XML_ORDER_ATTR_KIND "' for constraint "
                          "'%s' to Mandatory because '%s' is not valid",
                          crm_str(ID(xml_obj)), kind);
     }
     return kind_e;
 }
 
 static pe_resource_t *
 pe_find_constraint_resource(GListPtr rsc_list, const char *id)
 {
     GListPtr rIter = NULL;
 
     for (rIter = rsc_list; id && rIter; rIter = rIter->next) {
         pe_resource_t *parent = rIter->data;
         pe_resource_t *match = parent->fns->find_rsc(parent, id, NULL,
                                                      pe_find_renamed);
 
         if (match != NULL) {
             if(!pcmk__str_eq(match->id, id, pcmk__str_casei)) {
                 /* We found an instance of a clone instead */
                 match = uber_parent(match);
                 crm_debug("Found %s for %s", match->id, id);
             }
             return match;
         }
     }
     crm_trace("No match for %s", id);
     return NULL;
 }
 
 static gboolean
 pe_find_constraint_tag(pe_working_set_t * data_set, const char * id, pe_tag_t ** tag)
 {
     gboolean rc = FALSE;
 
     *tag = NULL;
     rc = g_hash_table_lookup_extended(data_set->template_rsc_sets, id,
                                        NULL, (gpointer*) tag);
 
     if (rc == FALSE) {
         rc = g_hash_table_lookup_extended(data_set->tags, id,
                                           NULL, (gpointer*) tag);
 
         if (rc == FALSE) {
             crm_warn("No template or tag named '%s'", id);
             return FALSE;
 
         } else if (*tag == NULL) {
             crm_warn("No resource is tagged with '%s'", id);
             return FALSE;
         }
 
     } else if (*tag == NULL) {
         crm_warn("No resource is derived from template '%s'", id);
         return FALSE;
     }
 
     return rc;
 }
 
 static gboolean
 valid_resource_or_tag(pe_working_set_t * data_set, const char * id,
                       pe_resource_t ** rsc, pe_tag_t ** tag)
 {
     gboolean rc = FALSE;
 
     if (rsc) {
         *rsc = NULL;
         *rsc = pe_find_constraint_resource(data_set->resources, id);
         if (*rsc) {
             return TRUE;
         }
     }
 
     if (tag) {
         *tag = NULL;
         rc = pe_find_constraint_tag(data_set, id, tag);
     }
 
     return rc;
 }
 
 static gboolean
 order_is_symmetrical(xmlNode * xml_obj,
                      enum pe_order_kind parent_kind, const char * parent_symmetrical_s)
 {
     const char *id = crm_element_value(xml_obj, XML_ATTR_ID);
     const char *kind_s = crm_element_value(xml_obj, XML_ORDER_ATTR_KIND);
     const char *score_s = crm_element_value(xml_obj, XML_RULE_ATTR_SCORE);
     const char *symmetrical_s = crm_element_value(xml_obj, XML_CONS_ATTR_SYMMETRICAL);
     enum pe_order_kind kind = parent_kind;
 
     if (kind_s || score_s) {
         kind = get_ordering_type(xml_obj);
     }
 
     if (symmetrical_s == NULL) {
         symmetrical_s = parent_symmetrical_s;
     }
 
     if (symmetrical_s) {
         gboolean symmetrical = crm_is_true(symmetrical_s);
 
         if (symmetrical && kind == pe_order_kind_serialize) {
             pcmk__config_warn("Ignoring " XML_CONS_ATTR_SYMMETRICAL
                               " for '%s' because not valid with "
                               XML_ORDER_ATTR_KIND " of 'Serialize'", id);
             return FALSE;
         }
 
         return symmetrical;
 
     } else {
         if (kind == pe_order_kind_serialize) {
             return FALSE;
 
         } else {
             return TRUE;
         }
     }
 }
 
 static gboolean
 unpack_simple_rsc_order(xmlNode * xml_obj, pe_working_set_t * data_set)
 {
     int order_id = 0;
     pe_resource_t *rsc_then = NULL;
     pe_resource_t *rsc_first = NULL;
     gboolean invert_bool = TRUE;
     int min_required_before = 0;
     enum pe_order_kind kind = pe_order_kind_mandatory;
     enum pe_ordering cons_weight = pe_order_optional;
 
     const char *id_first = NULL;
     const char *id_then = NULL;
     const char *action_then = NULL;
     const char *action_first = NULL;
     const char *instance_then = NULL;
     const char *instance_first = NULL;
 
     const char *id = NULL;
 
     CRM_CHECK(xml_obj != NULL, return FALSE);
 
     id = crm_element_value(xml_obj, XML_ATTR_ID);
     if (id == NULL) {
         pcmk__config_err("Ignoring <%s> constraint without " XML_ATTR_ID,
                          crm_element_name(xml_obj));
         return FALSE;
     }
 
     invert_bool = order_is_symmetrical(xml_obj, kind, NULL);
 
     id_then = crm_element_value(xml_obj, XML_ORDER_ATTR_THEN);
     id_first = crm_element_value(xml_obj, XML_ORDER_ATTR_FIRST);
 
     action_then = crm_element_value(xml_obj, XML_ORDER_ATTR_THEN_ACTION);
     action_first = crm_element_value(xml_obj, XML_ORDER_ATTR_FIRST_ACTION);
 
     instance_then = crm_element_value(xml_obj, XML_ORDER_ATTR_THEN_INSTANCE);
     instance_first = crm_element_value(xml_obj, XML_ORDER_ATTR_FIRST_INSTANCE);
 
     if (action_first == NULL) {
         action_first = RSC_START;
     }
     if (action_then == NULL) {
         action_then = action_first;
     }
 
     if (id_first == NULL) {
         pcmk__config_err("Ignoring constraint '%s' without "
                          XML_ORDER_ATTR_FIRST, id);
         return FALSE;
     }
     if (id_then == NULL) {
         pcmk__config_err("Ignoring constraint '%s' without "
                          XML_ORDER_ATTR_THEN, id);
         return FALSE;
     }
 
     rsc_then = pe_find_constraint_resource(data_set->resources, id_then);
     rsc_first = pe_find_constraint_resource(data_set->resources, id_first);
 
     if (rsc_then == NULL) {
         pcmk__config_err("Ignoring constraint '%s' because resource '%s' "
                          "does not exist", id, id_then);
         return FALSE;
 
     } else if (rsc_first == NULL) {
         pcmk__config_err("Ignoring constraint '%s' because resource '%s' "
                          "does not exist", id, id_first);
         return FALSE;
 
     } else if (instance_then && pe_rsc_is_clone(rsc_then) == FALSE) {
         pcmk__config_err("Ignoring constraint '%s' because resource '%s' "
                          "is not a clone but instance '%s' was requested",
                          id, id_then, instance_then);
         return FALSE;
 
     } else if (instance_first && pe_rsc_is_clone(rsc_first) == FALSE) {
         pcmk__config_err("Ignoring constraint '%s' because resource '%s' "
                          "is not a clone but instance '%s' was requested",
                          id, id_first, instance_first);
         return FALSE;
     }
 
     if (instance_then) {
         rsc_then = find_clone_instance(rsc_then, instance_then, data_set);
         if (rsc_then == NULL) {
             pcmk__config_warn("Ignoring constraint '%s' because resource '%s' "
                               "does not have an instance '%s'",
                               id, id_then, instance_then);
             return FALSE;
         }
     }
 
     if (instance_first) {
         rsc_first = find_clone_instance(rsc_first, instance_first, data_set);
         if (rsc_first == NULL) {
             pcmk__config_warn("Ignoring constraint '%s' because resource '%s' "
                               "does not have an instance '%s'",
                               "'%s'", id, id_first, instance_first);
             return FALSE;
         }
     }
 
     cons_weight = pe_order_optional;
     kind = get_ordering_type(xml_obj);
 
     if (kind == pe_order_kind_optional && rsc_then->restart_type == pe_restart_restart) {
         crm_trace("Upgrade : recovery - implies right");
         pe__set_order_flags(cons_weight, pe_order_implies_then);
     }
 
     if (invert_bool == FALSE) {
         pe__set_order_flags(cons_weight, get_asymmetrical_flags(kind));
     } else {
         pe__set_order_flags(cons_weight,
                               get_flags(id, kind, action_first, action_then, FALSE));
     }
 
     if (pe_rsc_is_clone(rsc_first)) {
         /* If clone-min is set, require at least that number of instances to be
          * runnable before allowing dependencies to be runnable.
          */
         const char *min_clones_s = g_hash_table_lookup(rsc_first->meta,
                                                        XML_RSC_ATTR_INCARNATION_MIN);
 
         // @COMPAT 1.1.13: deprecated
         const char *require_all_s = crm_element_value(xml_obj, "require-all");
 
         if (min_clones_s) {
             min_required_before = crm_parse_int(min_clones_s, "0");
 
         } else if (require_all_s) {
             pe_warn_once(pe_wo_require_all,
                         "Support for require-all in ordering constraints "
                         "is deprecated and will be removed in a future release"
                         " (use clone-min clone meta-attribute instead)");
             if (crm_is_true(require_all_s) == FALSE) {
                 // require-all=false is deprecated equivalent of clone-min=1
                 min_required_before = 1;
             }
         }
     }
 
     /* If there is a minimum number of instances that must be runnable before
      * the 'then' action is runnable, we use a pseudo action as an intermediate step
      * start min number of clones -> pseudo action is runnable -> dependency runnable. */
     if (min_required_before) {
         GListPtr rIter = NULL;
         char *task = crm_strdup_printf(CRM_OP_RELAXED_CLONE ":%s", id);
         pe_action_t *unordered_action = get_pseudo_op(task, data_set);
         free(task);
 
         /* require the pseudo action to have "min_required_before" number of
          * actions to be considered runnable before allowing the pseudo action
          * to be runnable. */ 
         unordered_action->required_runnable_before = min_required_before;
         update_action_flags(unordered_action, pe_action_requires_any,
                             __func__, __LINE__);
 
         for (rIter = rsc_first->children; id && rIter; rIter = rIter->next) {
             pe_resource_t *child = rIter->data;
             /* order each clone instance before the pseudo action */
             custom_action_order(child, pcmk__op_key(child->id, action_first, 0),
                                 NULL, NULL, NULL, unordered_action,
                                 pe_order_one_or_more|pe_order_implies_then_printed,
                                 data_set);
         }
 
         /* order the "then" dependency to occur after the pseudo action only if
          * the pseudo action is runnable */ 
         order_id = custom_action_order(NULL, NULL, unordered_action, rsc_then,
                                        pcmk__op_key(rsc_then->id, action_then, 0),
                                        NULL, cons_weight|pe_order_runnable_left,
                                        data_set);
     } else {
         order_id = new_rsc_order(rsc_first, action_first, rsc_then, action_then, cons_weight, data_set);
     }
 
     pe_rsc_trace(rsc_first, "order-%d (%s): %s_%s before %s_%s flags=0x%.6x",
                  order_id, id, rsc_first->id, action_first, rsc_then->id, action_then, cons_weight);
 
     if (invert_bool == FALSE) {
         return TRUE;
     }
 
     action_then = invert_action(action_then);
     action_first = invert_action(action_first);
     if (action_then == NULL || action_first == NULL) {
         pcmk__config_err("Cannot invert constraint '%s' "
                          "(please specify inverse manually)", id);
         return TRUE;
     }
 
     cons_weight = pe_order_optional;
     if (kind == pe_order_kind_optional && rsc_then->restart_type == pe_restart_restart) {
         crm_trace("Upgrade : recovery - implies left");
         pe__set_order_flags(cons_weight, pe_order_implies_first);
     }
 
     pe__set_order_flags(cons_weight,
                           get_flags(id, kind, action_first, action_then, TRUE));
 
     order_id = new_rsc_order(rsc_then, action_then, rsc_first, action_first, cons_weight, data_set);
 
     pe_rsc_trace(rsc_then, "order-%d (%s): %s_%s before %s_%s flags=0x%.6x",
                  order_id, id, rsc_then->id, action_then, rsc_first->id, action_first, cons_weight);
 
     return TRUE;
 }
 
 static gboolean
 expand_tags_in_sets(xmlNode * xml_obj, xmlNode ** expanded_xml, pe_working_set_t * data_set)
 {
     xmlNode *new_xml = NULL;
     xmlNode *set = NULL;
     gboolean any_refs = FALSE;
     const char *cons_id = NULL;
 
     *expanded_xml = NULL;
 
     CRM_CHECK(xml_obj != NULL, return FALSE);
 
     new_xml = copy_xml(xml_obj);
     cons_id = ID(new_xml);
 
     for (set = pcmk__xe_first_child(new_xml); set != NULL;
          set = pcmk__xe_next(set)) {
 
         xmlNode *xml_rsc = NULL;
         GListPtr tag_refs = NULL;
         GListPtr gIter = NULL;
 
         if (!pcmk__str_eq((const char *)set->name, XML_CONS_TAG_RSC_SET, pcmk__str_casei)) {
             continue;
         }
 
         for (xml_rsc = pcmk__xe_first_child(set); xml_rsc != NULL;
              xml_rsc = pcmk__xe_next(xml_rsc)) {
 
             pe_resource_t *rsc = NULL;
             pe_tag_t *tag = NULL;
             const char *id = ID(xml_rsc);
 
             if (!pcmk__str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, pcmk__str_casei)) {
                 continue;
             }
 
             if (valid_resource_or_tag(data_set, id, &rsc, &tag) == FALSE) {
                 pcmk__config_err("Ignoring resource sets for constraint '%s' "
                                  "because '%s' is not a valid resource or tag",
                                  cons_id, id);
                 free_xml(new_xml);
                 return FALSE;
 
             } else if (rsc) {
                 continue;
 
             } else if (tag) {
                 /* The resource_ref under the resource_set references a template/tag */
                 xmlNode *last_ref = xml_rsc;
 
                 /* A sample:
 
                    Original XML:
 
                    <resource_set id="tag1-colocation-0" sequential="true">
                      <resource_ref id="rsc1"/>
                      <resource_ref id="tag1"/>
                      <resource_ref id="rsc4"/>
                    </resource_set>
 
                    Now we are appending rsc2 and rsc3 which are tagged with tag1 right after it:
 
                    <resource_set id="tag1-colocation-0" sequential="true">
                      <resource_ref id="rsc1"/>
                      <resource_ref id="tag1"/>
                      <resource_ref id="rsc2"/>
                      <resource_ref id="rsc3"/>
                      <resource_ref id="rsc4"/>
                    </resource_set>
 
                  */
 
                 for (gIter = tag->refs; gIter != NULL; gIter = gIter->next) {
                     const char *obj_ref = (const char *) gIter->data;
                     xmlNode *new_rsc_ref = NULL;
 
                     new_rsc_ref = xmlNewDocRawNode(getDocPtr(set), NULL,
                                                    (pcmkXmlStr) XML_TAG_RESOURCE_REF, NULL);
                     crm_xml_add(new_rsc_ref, XML_ATTR_ID, obj_ref);
                     xmlAddNextSibling(last_ref, new_rsc_ref);
 
                     last_ref = new_rsc_ref;
                 }
 
                 any_refs = TRUE;
 
                 /* Do not directly free '<resource_ref id="tag1"/>', which would
                  * break the further pcmk__xe_next(xml_rsc)) and
                  * cause "Invalid read" seen by valgrind. Just remember it for
                  * freeing later.
                  */
                 tag_refs = g_list_append(tag_refs, xml_rsc);
             }
         }
 
         /* Now free '<resource_ref id="tag1"/>', and finally get:
 
            <resource_set id="tag1-colocation-0" sequential="true">
              <resource_ref id="rsc1"/>
              <resource_ref id="rsc2"/>
              <resource_ref id="rsc3"/>
              <resource_ref id="rsc4"/>
            </resource_set>
 
          */
         for (gIter = tag_refs; gIter != NULL; gIter = gIter->next) {
             xmlNode *tag_ref = gIter->data;
 
             free_xml(tag_ref);
         }
         g_list_free(tag_refs);
     }
 
     if (any_refs) {
         *expanded_xml = new_xml;
     } else {
         free_xml(new_xml);
     }
 
     return TRUE;
 }
 
 static gboolean
 tag_to_set(xmlNode * xml_obj, xmlNode ** rsc_set, const char * attr,
                 gboolean convert_rsc, pe_working_set_t * data_set)
 {
     const char *cons_id = NULL;
     const char *id = NULL;
 
     pe_resource_t *rsc = NULL;
     pe_tag_t *tag = NULL;
 
     *rsc_set = NULL;
 
     CRM_CHECK((xml_obj != NULL) && (attr != NULL), return FALSE);
 
     cons_id = ID(xml_obj);
     if (cons_id == NULL) {
         pcmk__config_err("Ignoring <%s> constraint without " XML_ATTR_ID,
                          crm_element_name(xml_obj));
         return FALSE;
     }
 
     id = crm_element_value(xml_obj, attr);
     if (id == NULL) {
         return TRUE;
     }
 
     if (valid_resource_or_tag(data_set, id, &rsc, &tag) == FALSE) {
         pcmk__config_err("Ignoring constraint '%s' because '%s' is not a "
                          "valid resource or tag", cons_id, id);
         return FALSE;
 
     } else if (tag) {
         GListPtr gIter = NULL;
 
         /* A template/tag is referenced by the "attr" attribute (first, then, rsc or with-rsc).
            Add the template/tag's corresponding "resource_set" which contains the resources derived
            from it or tagged with it under the constraint. */
         *rsc_set = create_xml_node(xml_obj, XML_CONS_TAG_RSC_SET);
         crm_xml_add(*rsc_set, XML_ATTR_ID, id);
 
         for (gIter = tag->refs; gIter != NULL; gIter = gIter->next) {
             const char *obj_ref = (const char *) gIter->data;
             xmlNode *rsc_ref = NULL;
 
             rsc_ref = create_xml_node(*rsc_set, XML_TAG_RESOURCE_REF);
             crm_xml_add(rsc_ref, XML_ATTR_ID, obj_ref);
         }
 
         /* Set sequential="false" for the resource_set */
         crm_xml_add(*rsc_set, "sequential", XML_BOOLEAN_FALSE);
 
     } else if (rsc && convert_rsc) {
         /* Even a regular resource is referenced by "attr", convert it into a resource_set.
            Because the other side of the constraint could be a template/tag reference. */
         xmlNode *rsc_ref = NULL;
 
         *rsc_set = create_xml_node(xml_obj, XML_CONS_TAG_RSC_SET);
         crm_xml_add(*rsc_set, XML_ATTR_ID, id);
 
         rsc_ref = create_xml_node(*rsc_set, XML_TAG_RESOURCE_REF);
         crm_xml_add(rsc_ref, XML_ATTR_ID, id);
 
     } else {
         return TRUE;
     }
 
     /* Remove the "attr" attribute referencing the template/tag */
     if (*rsc_set) {
         xml_remove_prop(xml_obj, attr);
     }
 
     return TRUE;
 }
 
 static void unpack_rsc_location(xmlNode *xml_obj, pe_resource_t *rsc_lh,
                                 const char *role, const char *score,
                                 pe_working_set_t *data_set,
-                                pe_match_data_t *match_data);
+                                pe_re_match_data_t *match_data);
 
 static void
 unpack_simple_location(xmlNode *xml_obj, pe_working_set_t *data_set)
 {
     const char *id = crm_element_value(xml_obj, XML_ATTR_ID);
     const char *value = crm_element_value(xml_obj, XML_LOC_ATTR_SOURCE);
 
     if(value) {
         pe_resource_t *rsc_lh = pe_find_constraint_resource(data_set->resources, value);
 
         unpack_rsc_location(xml_obj, rsc_lh, NULL, NULL, data_set, NULL);
     }
 
     value = crm_element_value(xml_obj, XML_LOC_ATTR_SOURCE_PATTERN);
     if(value) {
         regex_t *r_patt = calloc(1, sizeof(regex_t));
         bool invert = FALSE;
         GListPtr rIter = NULL;
 
         if(value[0] == '!') {
             value++;
             invert = TRUE;
         }
 
         if (regcomp(r_patt, value, REG_EXTENDED)) {
             pcmk__config_err("Ignoring constraint '%s' because "
                              XML_LOC_ATTR_SOURCE_PATTERN
                              " has invalid value '%s'", id, value);
             regfree(r_patt);
             free(r_patt);
             return;
         }
 
         for (rIter = data_set->resources; rIter; rIter = rIter->next) {
             pe_resource_t *r = rIter->data;
             int nregs = 0;
             regmatch_t *pmatch = NULL;
             int status;
 
             if(r_patt->re_nsub > 0) {
                 nregs = r_patt->re_nsub + 1;
             } else {
                 nregs = 1;
             }
             pmatch = calloc(nregs, sizeof(regmatch_t));
 
             status = regexec(r_patt, r->id, nregs, pmatch, 0);
 
             if(invert == FALSE && status == 0) {
                 pe_re_match_data_t re_match_data = {
                                                 .string = r->id,
                                                 .nregs = nregs,
                                                 .pmatch = pmatch
                                                };
-                pe_match_data_t match_data = {
-                                                .re = &re_match_data,
-                                                .params = r->parameters,
-                                                .meta = r->meta,
-                                             };
+
                 crm_debug("'%s' matched '%s' for %s", r->id, value, id);
-                unpack_rsc_location(xml_obj, r, NULL, NULL, data_set, &match_data);
+                unpack_rsc_location(xml_obj, r, NULL, NULL, data_set, &re_match_data);
 
             } else if (invert && (status != 0)) {
                 crm_debug("'%s' is an inverted match of '%s' for %s", r->id, value, id);
                 unpack_rsc_location(xml_obj, r, NULL, NULL, data_set, NULL);
 
             } else {
                 crm_trace("'%s' does not match '%s' for %s", r->id, value, id);
             }
 
             free(pmatch);
         }
 
         regfree(r_patt);
         free(r_patt);
     }
 }
 
 static void
 unpack_rsc_location(xmlNode *xml_obj, pe_resource_t *rsc_lh, const char *role,
                     const char *score, pe_working_set_t *data_set,
-                    pe_match_data_t *match_data)
+                    pe_re_match_data_t *re_match_data)
 {
     pe__location_t *location = NULL;
     const char *id_lh = crm_element_value(xml_obj, XML_LOC_ATTR_SOURCE);
     const char *id = crm_element_value(xml_obj, XML_ATTR_ID);
     const char *node = crm_element_value(xml_obj, XML_CIB_TAG_NODE);
     const char *discovery = crm_element_value(xml_obj, XML_LOCATION_ATTR_DISCOVERY);
 
     if (rsc_lh == NULL) {
         pcmk__config_warn("Ignoring constraint '%s' because resource '%s' "
                           "does not exist", id, id_lh);
         return;
     }
 
     if (score == NULL) {
         score = crm_element_value(xml_obj, XML_RULE_ATTR_SCORE);
     }
 
     if (node != NULL && score != NULL) {
         int score_i = char2score(score);
         pe_node_t *match = pe_find_node(data_set->nodes, node);
 
         if (!match) {
             return;
         }
         location = rsc2node_new(id, rsc_lh, score_i, discovery, match, data_set);
 
     } else {
         bool empty = TRUE;
         crm_time_t *next_change = crm_time_new_undefined();
 
         /* This loop is logically parallel to pe_evaluate_rules(), except
          * instead of checking whether any rule is active, we set up location
          * constraints for each active rule.
          */
         for (xmlNode *rule_xml = first_named_child(xml_obj, XML_TAG_RULE);
              rule_xml != NULL; rule_xml = crm_next_same_xml(rule_xml)) {
             empty = FALSE;
             crm_trace("Unpacking %s/%s", id, ID(rule_xml));
             generate_location_rule(rsc_lh, rule_xml, discovery, next_change,
-                                   data_set, match_data);
+                                   data_set, re_match_data);
         }
 
         if (empty) {
             pcmk__config_err("Ignoring constraint '%s' because it contains "
                              "no rules", id);
         }
 
         /* If there is a point in the future when the evaluation of a rule will
          * change, make sure the scheduler is re-run by that time.
          */
         if (crm_time_is_defined(next_change)) {
             time_t t = (time_t) crm_time_get_seconds_since_epoch(next_change);
 
             pe__update_recheck_time(t, data_set);
         }
         crm_time_free(next_change);
         return;
     }
 
     if (role == NULL) {
         role = crm_element_value(xml_obj, XML_RULE_ATTR_ROLE);
     }
 
     if (location && role) {
         if (text2role(role) == RSC_ROLE_UNKNOWN) {
             pe_err("Invalid constraint %s: Bad role %s", id, role);
             return;
 
         } else {
             enum rsc_role_e r = text2role(role);
             switch(r) {
                 case RSC_ROLE_UNKNOWN:
                 case RSC_ROLE_STARTED:
                 case RSC_ROLE_SLAVE:
                     /* Applies to all */
                     location->role_filter = RSC_ROLE_UNKNOWN;
                     break;
                 default:
                     location->role_filter = r;
                     break;
             }
         }
     }
 }
 
 static gboolean
 unpack_location_tags(xmlNode * xml_obj, xmlNode ** expanded_xml, pe_working_set_t * data_set)
 {
     const char *id = NULL;
     const char *id_lh = NULL;
     const char *state_lh = NULL;
 
     pe_resource_t *rsc_lh = NULL;
 
     pe_tag_t *tag_lh = NULL;
 
     xmlNode *new_xml = NULL;
     xmlNode *rsc_set_lh = NULL;
 
     *expanded_xml = NULL;
 
     CRM_CHECK(xml_obj != NULL, return FALSE);
 
     id = ID(xml_obj);
     if (id == NULL) {
         pcmk__config_err("Ignoring <%s> constraint without " XML_ATTR_ID,
                          crm_element_name(xml_obj));
         return FALSE;
     }
 
     /* Attempt to expand any template/tag references in possible resource sets. */
     expand_tags_in_sets(xml_obj, &new_xml, data_set);
     if (new_xml) {
         /* There are resource sets referencing templates. Return with the expanded XML. */
         crm_log_xml_trace(new_xml, "Expanded rsc_location...");
         *expanded_xml = new_xml;
         return TRUE;
     }
 
     id_lh = crm_element_value(xml_obj, XML_LOC_ATTR_SOURCE);
     if (id_lh == NULL) {
         return TRUE;
     }
 
     if (valid_resource_or_tag(data_set, id_lh, &rsc_lh, &tag_lh) == FALSE) {
         pcmk__config_err("Ignoring constraint '%s' because '%s' is not a "
                          "valid resource or tag", id, id_lh);
         return FALSE;
 
     } else if (rsc_lh) {
         /* No template is referenced. */
         return TRUE;
     }
 
     state_lh = crm_element_value(xml_obj, XML_RULE_ATTR_ROLE);
 
     new_xml = copy_xml(xml_obj);
 
     /* Convert the template/tag reference in "rsc" into a resource_set under the rsc_location constraint. */
     if (tag_to_set(new_xml, &rsc_set_lh, XML_LOC_ATTR_SOURCE, FALSE, data_set) == FALSE) {
         free_xml(new_xml);
         return FALSE;
     }
 
     if (rsc_set_lh) {
         if (state_lh) {
             /* A "rsc-role" is specified.
                Move it into the converted resource_set as a "role"" attribute. */
             crm_xml_add(rsc_set_lh, "role", state_lh);
             xml_remove_prop(new_xml, XML_RULE_ATTR_ROLE);
         }
         crm_log_xml_trace(new_xml, "Expanded rsc_location...");
         *expanded_xml = new_xml;
 
     } else {
         /* No sets */
         free_xml(new_xml);
     }
 
     return TRUE;
 }
 
 static gboolean
 unpack_location_set(xmlNode * location, xmlNode * set, pe_working_set_t * data_set)
 {
     xmlNode *xml_rsc = NULL;
     pe_resource_t *resource = NULL;
     const char *set_id;
     const char *role;
     const char *local_score;
 
     CRM_CHECK(set != NULL, return FALSE);
 
     set_id = ID(set);
     if (set_id == NULL) {
         pcmk__config_err("Ignoring " XML_CONS_TAG_RSC_SET " without "
                          XML_ATTR_ID " in constraint '%s'",
                          crm_str(ID(location)));
         return FALSE;
     }
 
     role = crm_element_value(set, "role");
     local_score = crm_element_value(set, XML_RULE_ATTR_SCORE);
 
     for (xml_rsc = pcmk__xe_first_child(set); xml_rsc != NULL;
          xml_rsc = pcmk__xe_next(xml_rsc)) {
 
         if (pcmk__str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, pcmk__str_none)) {
             EXPAND_CONSTRAINT_IDREF(set_id, resource, ID(xml_rsc));
             unpack_rsc_location(location, resource, role, local_score, data_set, NULL);
         }
     }
 
     return TRUE;
 }
 
 static void
 unpack_location(xmlNode *xml_obj, pe_working_set_t *data_set)
 {
     xmlNode *set = NULL;
     gboolean any_sets = FALSE;
 
     xmlNode *orig_xml = NULL;
     xmlNode *expanded_xml = NULL;
 
     if (unpack_location_tags(xml_obj, &expanded_xml, data_set) == FALSE) {
         return;
     }
 
     if (expanded_xml) {
         orig_xml = xml_obj;
         xml_obj = expanded_xml;
     }
 
     for (set = pcmk__xe_first_child(xml_obj); set != NULL;
          set = pcmk__xe_next(set)) {
 
         if (pcmk__str_eq((const char *)set->name, XML_CONS_TAG_RSC_SET, pcmk__str_none)) {
             any_sets = TRUE;
             set = expand_idref(set, data_set->input);
             if (unpack_location_set(xml_obj, set, data_set) == FALSE) {
                 if (expanded_xml) {
                     free_xml(expanded_xml);
                 }
                 return;
             }
         }
     }
 
     if (expanded_xml) {
         free_xml(expanded_xml);
         xml_obj = orig_xml;
     }
 
     if (any_sets == FALSE) {
         unpack_simple_location(xml_obj, data_set);
     }
 }
 
 static int
 get_node_score(const char *rule, const char *score, gboolean raw, pe_node_t * node, pe_resource_t *rsc)
 {
     int score_f = 0;
 
     if (score == NULL) {
         pe_err("Rule %s: no score specified.  Assuming 0.", rule);
 
     } else if (raw) {
         score_f = char2score(score);
 
     } else {
         const char *attr_score = pe_node_attribute_calculated(node, score, rsc);
 
         if (attr_score == NULL) {
             crm_debug("Rule %s: node %s did not have a value for %s",
                       rule, node->details->uname, score);
             score_f = -INFINITY;
 
         } else {
             crm_debug("Rule %s: node %s had value %s for %s",
                       rule, node->details->uname, attr_score, score);
             score_f = char2score(attr_score);
         }
     }
     return score_f;
 }
 
 static pe__location_t *
 generate_location_rule(pe_resource_t *rsc, xmlNode *rule_xml,
                        const char *discovery, crm_time_t *next_change,
-                       pe_working_set_t *data_set, pe_match_data_t *match_data)
+                       pe_working_set_t *data_set,
+                       pe_re_match_data_t *re_match_data)
 {
     const char *rule_id = NULL;
     const char *score = NULL;
     const char *boolean = NULL;
     const char *role = NULL;
 
     GListPtr gIter = NULL;
     GListPtr match_L = NULL;
 
     gboolean do_and = TRUE;
     gboolean accept = TRUE;
     gboolean raw_score = TRUE;
     gboolean score_allocated = FALSE;
 
     pe__location_t *location_rule = NULL;
 
     rule_xml = expand_idref(rule_xml, data_set->input);
     rule_id = crm_element_value(rule_xml, XML_ATTR_ID);
     boolean = crm_element_value(rule_xml, XML_RULE_ATTR_BOOLEAN_OP);
     role = crm_element_value(rule_xml, XML_RULE_ATTR_ROLE);
 
     crm_trace("Processing rule: %s", rule_id);
 
     if (role != NULL && text2role(role) == RSC_ROLE_UNKNOWN) {
         pe_err("Bad role specified for %s: %s", rule_id, role);
         return NULL;
     }
 
     score = crm_element_value(rule_xml, XML_RULE_ATTR_SCORE);
     if (score == NULL) {
         score = crm_element_value(rule_xml, XML_RULE_ATTR_SCORE_ATTRIBUTE);
         if (score != NULL) {
             raw_score = FALSE;
         }
     }
     if (pcmk__str_eq(boolean, "or", pcmk__str_casei)) {
         do_and = FALSE;
     }
 
     location_rule = rsc2node_new(rule_id, rsc, 0, discovery, NULL, data_set);
 
     if (location_rule == NULL) {
         return NULL;
     }
 
-    if (match_data && match_data->re && match_data->re->nregs > 0 && match_data->re->pmatch[0].rm_so != -1) {
-        if (raw_score == FALSE) {
-            char *result = pe_expand_re_matches(score, match_data->re);
+    if ((re_match_data != NULL) && (re_match_data->nregs > 0)
+        && (re_match_data->pmatch[0].rm_so != -1) && !raw_score) {
 
-            if (result) {
-                score = (const char *) result;
-                score_allocated = TRUE;
-            }
+        char *result = pe_expand_re_matches(score, re_match_data);
+
+        if (result != NULL) {
+            score = result;
+            score_allocated = TRUE;
         }
     }
 
     if (role != NULL) {
         crm_trace("Setting role filter: %s", role);
         location_rule->role_filter = text2role(role);
         if (location_rule->role_filter == RSC_ROLE_SLAVE) {
             /* Any promotable clone cannot be promoted without being a slave first
              * Ergo, any constraint for the slave role applies to every role
              */
             location_rule->role_filter = RSC_ROLE_UNKNOWN;
         }
     }
     if (do_and) {
         GListPtr gIter = NULL;
 
         match_L = pcmk__copy_node_list(data_set->nodes, true);
         for (gIter = match_L; gIter != NULL; gIter = gIter->next) {
             pe_node_t *node = (pe_node_t *) gIter->data;
 
             node->weight = get_node_score(rule_id, score, raw_score, node, rsc);
         }
     }
 
     for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
         int score_f = 0;
         pe_node_t *node = (pe_node_t *) gIter->data;
+        pe_match_data_t match_data = {
+            .re = re_match_data,
+            .params = pe_rsc_params(rsc, node, data_set),
+            .meta = rsc->meta,
+        };
 
         accept = pe_test_rule(rule_xml, node->details->attrs, RSC_ROLE_UNKNOWN,
-                              data_set->now, next_change, match_data);
+                              data_set->now, next_change, &match_data);
 
         crm_trace("Rule %s %s on %s", ID(rule_xml), accept ? "passed" : "failed",
                   node->details->uname);
 
         score_f = get_node_score(rule_id, score, raw_score, node, rsc);
 /* 			if(accept && score_f == -INFINITY) { */
 /* 				accept = FALSE; */
 /* 			} */
 
         if (accept) {
             pe_node_t *local = pe_find_node_id(match_L, node->details->id);
 
             if (local == NULL && do_and) {
                 continue;
 
             } else if (local == NULL) {
                 local = pe__copy_node(node);
                 match_L = g_list_append(match_L, local);
             }
 
             if (do_and == FALSE) {
                 local->weight = pe__add_scores(local->weight, score_f);
             }
             crm_trace("node %s now has weight %d", node->details->uname, local->weight);
 
         } else if (do_and && !accept) {
             /* remove it */
             pe_node_t *delete = pe_find_node_id(match_L, node->details->id);
 
             if (delete != NULL) {
                 match_L = g_list_remove(match_L, delete);
                 crm_trace("node %s did not match", node->details->uname);
             }
             free(delete);
         }
     }
 
     if (score_allocated == TRUE) {
         free((char *)score);
     }
 
     location_rule->node_list_rh = match_L;
     if (location_rule->node_list_rh == NULL) {
         crm_trace("No matching nodes for rule %s", rule_id);
         return NULL;
     }
 
     crm_trace("%s: %d nodes matched", rule_id, g_list_length(location_rule->node_list_rh));
     return location_rule;
 }
 
 static gint
 sort_cons_priority_lh(gconstpointer a, gconstpointer b)
 {
     const pcmk__colocation_t *rsc_constraint1 = (const pcmk__colocation_t *) a;
     const pcmk__colocation_t *rsc_constraint2 = (const pcmk__colocation_t *) b;
 
     if (a == NULL) {
         return 1;
     }
     if (b == NULL) {
         return -1;
     }
 
     CRM_ASSERT(rsc_constraint1->rsc_lh != NULL);
     CRM_ASSERT(rsc_constraint1->rsc_rh != NULL);
 
     if (rsc_constraint1->rsc_lh->priority > rsc_constraint2->rsc_lh->priority) {
         return -1;
     }
 
     if (rsc_constraint1->rsc_lh->priority < rsc_constraint2->rsc_lh->priority) {
         return 1;
     }
 
     /* Process clones before primitives and groups */
     if (rsc_constraint1->rsc_lh->variant > rsc_constraint2->rsc_lh->variant) {
         return -1;
     } else if (rsc_constraint1->rsc_lh->variant < rsc_constraint2->rsc_lh->variant) {
         return 1;
     }
 
     /* @COMPAT scheduler <2.0.0: Process promotable clones before nonpromotable
      * clones (probably unnecessary, but avoids having to update regression
      * tests)
      */
     if (rsc_constraint1->rsc_lh->variant == pe_clone) {
         if (pcmk_is_set(rsc_constraint1->rsc_lh->flags, pe_rsc_promotable)
             && !pcmk_is_set(rsc_constraint2->rsc_lh->flags, pe_rsc_promotable)) {
             return -1;
         } else if (!pcmk_is_set(rsc_constraint1->rsc_lh->flags, pe_rsc_promotable)
             && pcmk_is_set(rsc_constraint2->rsc_lh->flags, pe_rsc_promotable)) {
             return 1;
         }
     }
 
     return strcmp(rsc_constraint1->rsc_lh->id, rsc_constraint2->rsc_lh->id);
 }
 
 static gint
 sort_cons_priority_rh(gconstpointer a, gconstpointer b)
 {
     const pcmk__colocation_t *rsc_constraint1 = (const pcmk__colocation_t *) a;
     const pcmk__colocation_t *rsc_constraint2 = (const pcmk__colocation_t *) b;
 
     if (a == NULL) {
         return 1;
     }
     if (b == NULL) {
         return -1;
     }
 
     CRM_ASSERT(rsc_constraint1->rsc_lh != NULL);
     CRM_ASSERT(rsc_constraint1->rsc_rh != NULL);
 
     if (rsc_constraint1->rsc_rh->priority > rsc_constraint2->rsc_rh->priority) {
         return -1;
     }
 
     if (rsc_constraint1->rsc_rh->priority < rsc_constraint2->rsc_rh->priority) {
         return 1;
     }
 
     /* Process clones before primitives and groups */
     if (rsc_constraint1->rsc_rh->variant > rsc_constraint2->rsc_rh->variant) {
         return -1;
     } else if (rsc_constraint1->rsc_rh->variant < rsc_constraint2->rsc_rh->variant) {
         return 1;
     }
 
     /* @COMPAT scheduler <2.0.0: Process promotable clones before nonpromotable
      * clones (probably unnecessary, but avoids having to update regression
      * tests)
      */
     if (rsc_constraint1->rsc_rh->variant == pe_clone) {
         if (pcmk_is_set(rsc_constraint1->rsc_rh->flags, pe_rsc_promotable)
             && !pcmk_is_set(rsc_constraint2->rsc_rh->flags, pe_rsc_promotable)) {
             return -1;
         } else if (!pcmk_is_set(rsc_constraint1->rsc_rh->flags, pe_rsc_promotable)
             && pcmk_is_set(rsc_constraint2->rsc_rh->flags, pe_rsc_promotable)) {
             return 1;
         }
     }
 
     return strcmp(rsc_constraint1->rsc_rh->id, rsc_constraint2->rsc_rh->id);
 }
 
 static void
 anti_colocation_order(pe_resource_t * first_rsc, int first_role,
                       pe_resource_t * then_rsc, int then_role,
                       pe_working_set_t * data_set)
 {
     const char *first_tasks[] = { NULL, NULL };
     const char *then_tasks[] = { NULL, NULL };
     int first_lpc = 0;
     int then_lpc = 0;
 
     /* Actions to make first_rsc lose first_role */
     if (first_role == RSC_ROLE_MASTER) {
         first_tasks[0] = CRMD_ACTION_DEMOTE;
 
     } else {
         first_tasks[0] = CRMD_ACTION_STOP;
 
         if (first_role == RSC_ROLE_SLAVE) {
             first_tasks[1] = CRMD_ACTION_PROMOTE;
         }
     }
 
     /* Actions to make then_rsc gain then_role */
     if (then_role == RSC_ROLE_MASTER) {
         then_tasks[0] = CRMD_ACTION_PROMOTE;
 
     } else {
         then_tasks[0] = CRMD_ACTION_START;
 
         if (then_role == RSC_ROLE_SLAVE) {
             then_tasks[1] = CRMD_ACTION_DEMOTE;
         }
     }
 
     for (first_lpc = 0; first_lpc <= 1 && first_tasks[first_lpc] != NULL; first_lpc++) {
         for (then_lpc = 0; then_lpc <= 1 && then_tasks[then_lpc] != NULL; then_lpc++) {
             new_rsc_order(first_rsc, first_tasks[first_lpc], then_rsc, then_tasks[then_lpc],
                           pe_order_anti_colocation, data_set);
         }
     }
 }
 
 void
 pcmk__new_colocation(const char *id, const char *node_attr, int score,
                      pe_resource_t *rsc_lh, pe_resource_t *rsc_rh,
                      const char *state_lh, const char *state_rh,
                      pe_working_set_t *data_set)
 {
     pcmk__colocation_t *new_con = NULL;
 
     if ((rsc_lh == NULL) || (rsc_rh == NULL)) {
         pcmk__config_err("Ignoring colocation '%s' because resource "
                          "does not exist", id);
         return;
     }
 
     new_con = calloc(1, sizeof(pcmk__colocation_t));
     if (new_con == NULL) {
         return;
     }
 
     if (pcmk__str_eq(state_lh, RSC_ROLE_STARTED_S, pcmk__str_null_matches | pcmk__str_casei)) {
         state_lh = RSC_ROLE_UNKNOWN_S;
     }
 
     if (pcmk__str_eq(state_rh, RSC_ROLE_STARTED_S, pcmk__str_null_matches | pcmk__str_casei)) {
         state_rh = RSC_ROLE_UNKNOWN_S;
     }
 
     new_con->id = id;
     new_con->rsc_lh = rsc_lh;
     new_con->rsc_rh = rsc_rh;
     new_con->score = score;
     new_con->role_lh = text2role(state_lh);
     new_con->role_rh = text2role(state_rh);
     new_con->node_attribute = node_attr;
 
     if (node_attr == NULL) {
         node_attr = CRM_ATTR_UNAME;
     }
 
     pe_rsc_trace(rsc_lh, "%s ==> %s (%s %d)", rsc_lh->id, rsc_rh->id, node_attr, score);
 
     rsc_lh->rsc_cons = g_list_insert_sorted(rsc_lh->rsc_cons, new_con, sort_cons_priority_rh);
 
     rsc_rh->rsc_cons_lhs =
         g_list_insert_sorted(rsc_rh->rsc_cons_lhs, new_con, sort_cons_priority_lh);
 
     data_set->colocation_constraints = g_list_append(data_set->colocation_constraints, new_con);
 
     if (score <= -INFINITY) {
         anti_colocation_order(rsc_lh, new_con->role_lh, rsc_rh, new_con->role_rh, data_set);
         anti_colocation_order(rsc_rh, new_con->role_rh, rsc_lh, new_con->role_lh, data_set);
     }
 }
 
 /* LHS before RHS */
 int
 new_rsc_order(pe_resource_t * lh_rsc, const char *lh_task,
               pe_resource_t * rh_rsc, const char *rh_task,
               enum pe_ordering type, pe_working_set_t * data_set)
 {
     char *lh_key = NULL;
     char *rh_key = NULL;
 
     CRM_CHECK(lh_rsc != NULL, return -1);
     CRM_CHECK(lh_task != NULL, return -1);
     CRM_CHECK(rh_rsc != NULL, return -1);
     CRM_CHECK(rh_task != NULL, return -1);
 
 #if 0
     /* We do not need to test if these reference stonith resources
      * because the fencer has access to them even when they're not "running"
      */
     if (validate_order_resources(lh_rsc, lh_task, rh_rsc, rh_task)) {
         return -1;
     }
 #endif
 
     lh_key = pcmk__op_key(lh_rsc->id, lh_task, 0);
     rh_key = pcmk__op_key(rh_rsc->id, rh_task, 0);
 
     return custom_action_order(lh_rsc, lh_key, NULL, rh_rsc, rh_key, NULL, type, data_set);
 }
 
 static char *
 task_from_action_or_key(pe_action_t *action, const char *key)
 {
     char *res = NULL;
 
     if (action) {
         res = strdup(action->task);
     } else if (key) {
         parse_op_key(key, NULL, &res, NULL);
     }
     return res;
 }
 
 /* when order constraints are made between two resources start and stop actions
  * those constraints have to be mirrored against the corresponding
  * migration actions to ensure start/stop ordering is preserved during
  * a migration */
 static void
 handle_migration_ordering(pe__ordering_t *order, pe_working_set_t *data_set)
 {
     char *lh_task = NULL;
     char *rh_task = NULL;
     gboolean rh_migratable;
     gboolean lh_migratable;
 
     if (order->lh_rsc == NULL || order->rh_rsc == NULL) {
         return;
     } else if (order->lh_rsc == order->rh_rsc) {
         return;
     /* don't mess with those constraints built between parent
      * resources and the children */
     } else if (is_parent(order->lh_rsc, order->rh_rsc)) {
         return;
     } else if (is_parent(order->rh_rsc, order->lh_rsc)) {
         return;
     }
 
     lh_migratable = pcmk_is_set(order->lh_rsc->flags, pe_rsc_allow_migrate);
     rh_migratable = pcmk_is_set(order->rh_rsc->flags, pe_rsc_allow_migrate);
 
     /* one of them has to be migratable for
      * the migrate ordering logic to be applied */
     if (lh_migratable == FALSE && rh_migratable == FALSE) {
         return;
     }
 
     /* at this point we have two resources which allow migrations that have an
      * order dependency set between them.  If those order dependencies involve
      * start/stop actions, we need to mirror the corresponding migrate actions
      * so order will be preserved. */
     lh_task = task_from_action_or_key(order->lh_action, order->lh_action_task);
     rh_task = task_from_action_or_key(order->rh_action, order->rh_action_task);
     if (lh_task == NULL || rh_task == NULL) {
         goto cleanup_order;
     }
 
     if (pcmk__str_eq(lh_task, RSC_START, pcmk__str_casei) && pcmk__str_eq(rh_task, RSC_START, pcmk__str_casei)) {
         int flags = pe_order_optional;
 
         if (lh_migratable && rh_migratable) {
             /* A start then B start
              * A migrate_from then B migrate_to */
             custom_action_order(order->lh_rsc,
                                 pcmk__op_key(order->lh_rsc->id, RSC_MIGRATED, 0),
                                 NULL, order->rh_rsc,
                                 pcmk__op_key(order->rh_rsc->id, RSC_MIGRATE, 0),
                                 NULL, flags, data_set);
         }
 
         if (rh_migratable) {
             if (lh_migratable) {
                 pe__set_order_flags(flags, pe_order_apply_first_non_migratable);
             }
 
             /* A start then B start
              * A start then B migrate_to... only if A start is not a part of a migration*/
             custom_action_order(order->lh_rsc,
                                 pcmk__op_key(order->lh_rsc->id, RSC_START, 0),
                                 NULL, order->rh_rsc,
                                 pcmk__op_key(order->rh_rsc->id, RSC_MIGRATE, 0),
                                 NULL, flags, data_set);
         }
 
     } else if (rh_migratable == TRUE && pcmk__str_eq(lh_task, RSC_STOP, pcmk__str_casei) && pcmk__str_eq(rh_task, RSC_STOP, pcmk__str_casei)) {
         int flags = pe_order_optional;
 
         if (lh_migratable) {
             pe__set_order_flags(flags, pe_order_apply_first_non_migratable);
         }
 
         /* rh side is at the bottom of the stack during a stop. If we have a constraint
          * stop B then stop A, if B is migrating via stop/start, and A is migrating using migration actions,
          * we need to enforce that A's migrate_to action occurs after B's stop action. */
         custom_action_order(order->lh_rsc,
                             pcmk__op_key(order->lh_rsc->id, RSC_STOP, 0), NULL,
                             order->rh_rsc,
                             pcmk__op_key(order->rh_rsc->id, RSC_MIGRATE, 0),
                             NULL, flags, data_set);
 
         /* We need to build the stop constraint against migrate_from as well
          * to account for partial migrations. */
         if (order->rh_rsc->partial_migration_target) {
             custom_action_order(order->lh_rsc,
                                 pcmk__op_key(order->lh_rsc->id, RSC_STOP, 0),
                                 NULL, order->rh_rsc,
                                 pcmk__op_key(order->rh_rsc->id, RSC_MIGRATED, 0),
                                 NULL, flags, data_set);
         }
 
     } else if (pcmk__str_eq(lh_task, RSC_PROMOTE, pcmk__str_casei) && pcmk__str_eq(rh_task, RSC_START, pcmk__str_casei)) {
         int flags = pe_order_optional;
 
         if (rh_migratable) {
             /* A promote then B start
              * A promote then B migrate_to */
             custom_action_order(order->lh_rsc,
                                 pcmk__op_key(order->lh_rsc->id, RSC_PROMOTE, 0),
                                 NULL, order->rh_rsc,
                                 pcmk__op_key(order->rh_rsc->id, RSC_MIGRATE, 0),
                                 NULL, flags, data_set);
         }
 
     } else if (pcmk__str_eq(lh_task, RSC_DEMOTE, pcmk__str_casei) && pcmk__str_eq(rh_task, RSC_STOP, pcmk__str_casei)) {
         int flags = pe_order_optional;
 
         if (rh_migratable) {
             /* A demote then B stop
              * A demote then B migrate_to */
             custom_action_order(order->lh_rsc, pcmk__op_key(order->lh_rsc->id, RSC_DEMOTE, 0), NULL,
                                 order->rh_rsc, pcmk__op_key(order->rh_rsc->id, RSC_MIGRATE, 0), NULL,
                                 flags, data_set);
 
             /* We need to build the demote constraint against migrate_from as well
              * to account for partial migrations. */
             if (order->rh_rsc->partial_migration_target) {
                 custom_action_order(order->lh_rsc,
                                     pcmk__op_key(order->lh_rsc->id, RSC_DEMOTE, 0),
                                     NULL, order->rh_rsc,
                                     pcmk__op_key(order->rh_rsc->id, RSC_MIGRATED, 0),
                                     NULL, flags, data_set);
             }
         }
     }
 
 cleanup_order:
     free(lh_task);
     free(rh_task);
 }
 
 /* LHS before RHS */
 int
 custom_action_order(pe_resource_t * lh_rsc, char *lh_action_task, pe_action_t * lh_action,
                     pe_resource_t * rh_rsc, char *rh_action_task, pe_action_t * rh_action,
                     enum pe_ordering type, pe_working_set_t * data_set)
 {
     pe__ordering_t *order = NULL;
 
     if (lh_rsc == NULL && lh_action) {
         lh_rsc = lh_action->rsc;
     }
     if (rh_rsc == NULL && rh_action) {
         rh_rsc = rh_action->rsc;
     }
 
     if ((lh_action == NULL && lh_rsc == NULL)
         || (rh_action == NULL && rh_rsc == NULL)) {
         crm_err("Invalid ordering (bug?)");
         free(lh_action_task);
         free(rh_action_task);
         return -1;
     }
 
     order = calloc(1, sizeof(pe__ordering_t));
 
     crm_trace("Creating[%d] %s %s %s - %s %s %s", data_set->order_id,
               lh_rsc?lh_rsc->id:"NA", lh_action_task?lh_action_task:"NA", lh_action?lh_action->uuid:"NA",
               rh_rsc?rh_rsc->id:"NA", rh_action_task?rh_action_task:"NA", rh_action?rh_action->uuid:"NA");
 
     /* CRM_ASSERT(data_set->order_id != 291); */
 
     order->id = data_set->order_id++;
     order->type = type;
     order->lh_rsc = lh_rsc;
     order->rh_rsc = rh_rsc;
     order->lh_action = lh_action;
     order->rh_action = rh_action;
     order->lh_action_task = lh_action_task;
     order->rh_action_task = rh_action_task;
 
     if (order->lh_action_task == NULL && lh_action) {
         order->lh_action_task = strdup(lh_action->uuid);
     }
 
     if (order->rh_action_task == NULL && rh_action) {
         order->rh_action_task = strdup(rh_action->uuid);
     }
 
     if (order->lh_rsc == NULL && lh_action) {
         order->lh_rsc = lh_action->rsc;
     }
 
     if (order->rh_rsc == NULL && rh_action) {
         order->rh_rsc = rh_action->rsc;
     }
 
     data_set->ordering_constraints = g_list_prepend(data_set->ordering_constraints, order);
     handle_migration_ordering(order, data_set);
 
     return order->id;
 }
 
 enum pe_ordering
 get_asymmetrical_flags(enum pe_order_kind kind)
 {
     enum pe_ordering flags = pe_order_optional;
 
     if (kind == pe_order_kind_mandatory) {
         pe__set_order_flags(flags, pe_order_asymmetrical);
     } else if (kind == pe_order_kind_serialize) {
         pe__set_order_flags(flags, pe_order_serialize_only);
     }
     return flags;
 }
 
 enum pe_ordering
 get_flags(const char *id, enum pe_order_kind kind,
           const char *action_first, const char *action_then, gboolean invert)
 {
     enum pe_ordering flags = pe_order_optional;
 
     if (invert && kind == pe_order_kind_mandatory) {
         crm_trace("Upgrade %s: implies left", id);
         pe__set_order_flags(flags, pe_order_implies_first);
 
     } else if (kind == pe_order_kind_mandatory) {
         crm_trace("Upgrade %s: implies right", id);
         pe__set_order_flags(flags, pe_order_implies_then);
         if (pcmk__strcase_any_of(action_first, RSC_START, RSC_PROMOTE, NULL)) {
             crm_trace("Upgrade %s: runnable", id);
             pe__set_order_flags(flags, pe_order_runnable_left);
         }
 
     } else if (kind == pe_order_kind_serialize) {
         pe__set_order_flags(flags, pe_order_serialize_only);
     }
 
     return flags;
 }
 
 static gboolean
 unpack_order_set(xmlNode * set, enum pe_order_kind parent_kind, pe_resource_t ** rsc,
                  pe_action_t ** begin, pe_action_t ** end, pe_action_t ** inv_begin,
                  pe_action_t ** inv_end, const char *parent_symmetrical_s,
                  pe_working_set_t * data_set)
 {
     xmlNode *xml_rsc = NULL;
     GListPtr set_iter = NULL;
     GListPtr resources = NULL;
 
     pe_resource_t *last = NULL;
     pe_resource_t *resource = NULL;
 
     int local_kind = parent_kind;
     gboolean sequential = FALSE;
     enum pe_ordering flags = pe_order_optional;
     gboolean symmetrical = TRUE;
 
     char *key = NULL;
     const char *id = ID(set);
     const char *action = crm_element_value(set, "action");
     const char *sequential_s = crm_element_value(set, "sequential");
     const char *kind_s = crm_element_value(set, XML_ORDER_ATTR_KIND);
 
     /*
        char *pseudo_id = NULL;
        char *end_id    = NULL;
        char *begin_id  = NULL;
      */
 
     if (action == NULL) {
         action = RSC_START;
     }
 
     if (kind_s) {
         local_kind = get_ordering_type(set);
     }
     if (sequential_s == NULL) {
         sequential_s = "1";
     }
 
     sequential = crm_is_true(sequential_s);
 
     symmetrical = order_is_symmetrical(set, parent_kind, parent_symmetrical_s);
     if (symmetrical) {
         flags = get_flags(id, local_kind, action, action, FALSE);
     } else {
         flags = get_asymmetrical_flags(local_kind);
     }
 
     for (xml_rsc = pcmk__xe_first_child(set); xml_rsc != NULL;
          xml_rsc = pcmk__xe_next(xml_rsc)) {
 
         if (pcmk__str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, pcmk__str_none)) {
             EXPAND_CONSTRAINT_IDREF(id, resource, ID(xml_rsc));
             resources = g_list_append(resources, resource);
         }
     }
 
     if (pcmk__list_of_1(resources)) {
         crm_trace("Single set: %s", id);
         *rsc = resource;
         *end = NULL;
         *begin = NULL;
         *inv_end = NULL;
         *inv_begin = NULL;
         goto done;
     }
 
     /*
        pseudo_id = crm_strdup_printf("%s-%s", id, action);
        end_id    = crm_strdup_printf("%s-%s", pseudo_id, "end");
        begin_id  = crm_strdup_printf("%s-%s", pseudo_id, "begin");
      */
 
     *rsc = NULL;
     /*
      *end = get_pseudo_op(end_id, data_set);
      *begin = get_pseudo_op(begin_id, data_set);
 
      free(pseudo_id);
      free(begin_id);
      free(end_id);
      */
 
     set_iter = resources;
     while (set_iter != NULL) {
         resource = (pe_resource_t *) set_iter->data;
         set_iter = set_iter->next;
 
         key = pcmk__op_key(resource->id, action, 0);
 
         /*
            custom_action_order(NULL, NULL, *begin, resource, strdup(key), NULL,
            flags|pe_order_implies_first_printed, data_set);
 
            custom_action_order(resource, strdup(key), NULL, NULL, NULL, *end,
            flags|pe_order_implies_then_printed, data_set);
          */
 
         if (local_kind == pe_order_kind_serialize) {
             /* Serialize before everything that comes after */
 
             GListPtr gIter = NULL;
 
             for (gIter = set_iter; gIter != NULL; gIter = gIter->next) {
                 pe_resource_t *then_rsc = (pe_resource_t *) gIter->data;
                 char *then_key = pcmk__op_key(then_rsc->id, action, 0);
 
                 custom_action_order(resource, strdup(key), NULL, then_rsc, then_key, NULL,
                                     flags, data_set);
             }
 
         } else if (sequential) {
             if (last != NULL) {
                 new_rsc_order(last, action, resource, action, flags, data_set);
             }
             last = resource;
         }
         free(key);
     }
 
     if (symmetrical == FALSE) {
         goto done;
     }
 
     last = NULL;
     action = invert_action(action);
 
     /*
        pseudo_id = crm_strdup_printf("%s-%s", id, action);
        end_id    = crm_strdup_printf("%s-%s", pseudo_id, "end");
        begin_id  = crm_strdup_printf("%s-%s", pseudo_id, "begin");
 
        *inv_end = get_pseudo_op(end_id, data_set);
        *inv_begin = get_pseudo_op(begin_id, data_set);
 
        free(pseudo_id);
        free(begin_id);
        free(end_id);
      */
 
     flags = get_flags(id, local_kind, action, action, TRUE);
 
     set_iter = resources;
     while (set_iter != NULL) {
         resource = (pe_resource_t *) set_iter->data;
         set_iter = set_iter->next;
 
         /*
            key = pcmk__op_key(resource->id, action, 0);
 
            custom_action_order(NULL, NULL, *inv_begin, resource, strdup(key), NULL,
            flags|pe_order_implies_first_printed, data_set);
 
            custom_action_order(resource, key, NULL, NULL, NULL, *inv_end,
            flags|pe_order_implies_then_printed, data_set);
          */
 
         if (sequential) {
             if (last != NULL) {
                 new_rsc_order(resource, action, last, action, flags, data_set);
             }
             last = resource;
         }
     }
 
   done:
     g_list_free(resources);
     return TRUE;
 }
 
 static gboolean
 order_rsc_sets(const char *id, xmlNode * set1, xmlNode * set2, enum pe_order_kind kind,
                pe_working_set_t * data_set, gboolean invert, gboolean symmetrical)
 {
 
     xmlNode *xml_rsc = NULL;
     xmlNode *xml_rsc_2 = NULL;
 
     pe_resource_t *rsc_1 = NULL;
     pe_resource_t *rsc_2 = NULL;
 
     const char *action_1 = crm_element_value(set1, "action");
     const char *action_2 = crm_element_value(set2, "action");
 
     const char *sequential_1 = crm_element_value(set1, "sequential");
     const char *sequential_2 = crm_element_value(set2, "sequential");
 
     const char *require_all_s = crm_element_value(set1, "require-all");
     gboolean require_all = require_all_s ? crm_is_true(require_all_s) : TRUE;
 
     enum pe_ordering flags = pe_order_none;
 
     if (action_1 == NULL) {
         action_1 = RSC_START;
     };
 
     if (action_2 == NULL) {
         action_2 = RSC_START;
     };
 
     if (invert) {
         action_1 = invert_action(action_1);
         action_2 = invert_action(action_2);
     }
 
     if(pcmk__str_eq(RSC_STOP, action_1, pcmk__str_casei) || pcmk__str_eq(RSC_DEMOTE, action_1, pcmk__str_casei)) {
         /* Assuming: A -> ( B || C) -> D
          * The one-or-more logic only applies during the start/promote phase
          * During shutdown neither B nor can shutdown until D is down, so simply turn require_all back on.
          */
         require_all = TRUE;
     }
 
     if (symmetrical == FALSE) {
         flags = get_asymmetrical_flags(kind);
     } else {
         flags = get_flags(id, kind, action_2, action_1, invert);
     }
 
     /* If we have an un-ordered set1, whether it is sequential or not is irrelevant in regards to set2. */
     if (!require_all) {
         char *task = crm_strdup_printf(CRM_OP_RELAXED_SET ":%s", ID(set1));
         pe_action_t *unordered_action = get_pseudo_op(task, data_set);
 
         free(task);
         update_action_flags(unordered_action, pe_action_requires_any,
                             __func__, __LINE__);
 
         for (xml_rsc = pcmk__xe_first_child(set1); xml_rsc != NULL;
              xml_rsc = pcmk__xe_next(xml_rsc)) {
 
             if (!pcmk__str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, pcmk__str_none)) {
                 continue;
             }
 
             EXPAND_CONSTRAINT_IDREF(id, rsc_1, ID(xml_rsc));
 
             /* Add an ordering constraint between every element in set1 and the pseudo action.
              * If any action in set1 is runnable the pseudo action will be runnable. */
             custom_action_order(rsc_1, pcmk__op_key(rsc_1->id, action_1, 0),
                                 NULL, NULL, NULL, unordered_action,
                                 pe_order_one_or_more|pe_order_implies_then_printed,
                                 data_set);
         }
         for (xml_rsc_2 = pcmk__xe_first_child(set2); xml_rsc_2 != NULL;
              xml_rsc_2 = pcmk__xe_next(xml_rsc_2)) {
 
             if (!pcmk__str_eq((const char *)xml_rsc_2->name, XML_TAG_RESOURCE_REF, pcmk__str_none)) {
                 continue;
             }
 
             EXPAND_CONSTRAINT_IDREF(id, rsc_2, ID(xml_rsc_2));
 
             /* Add an ordering constraint between the pseudo action and every element in set2.
              * If the pseudo action is runnable, every action in set2 will be runnable */
             custom_action_order(NULL, NULL, unordered_action,
                                 rsc_2, pcmk__op_key(rsc_2->id, action_2, 0),
                                 NULL, flags|pe_order_runnable_left, data_set);
         }
 
         return TRUE;
     }
 
     if (crm_is_true(sequential_1)) {
         if (invert == FALSE) {
             /* get the last one */
             const char *rid = NULL;
 
             for (xml_rsc = pcmk__xe_first_child(set1); xml_rsc != NULL;
                  xml_rsc = pcmk__xe_next(xml_rsc)) {
 
                 if (pcmk__str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, pcmk__str_none)) {
                     rid = ID(xml_rsc);
                 }
             }
             EXPAND_CONSTRAINT_IDREF(id, rsc_1, rid);
 
         } else {
             /* get the first one */
             for (xml_rsc = pcmk__xe_first_child(set1); xml_rsc != NULL;
                  xml_rsc = pcmk__xe_next(xml_rsc)) {
 
                 if (pcmk__str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, pcmk__str_none)) {
                     EXPAND_CONSTRAINT_IDREF(id, rsc_1, ID(xml_rsc));
                     break;
                 }
             }
         }
     }
 
     if (crm_is_true(sequential_2)) {
         if (invert == FALSE) {
             /* get the first one */
             for (xml_rsc = pcmk__xe_first_child(set2); xml_rsc != NULL;
                  xml_rsc = pcmk__xe_next(xml_rsc)) {
 
                 if (pcmk__str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, pcmk__str_none)) {
                     EXPAND_CONSTRAINT_IDREF(id, rsc_2, ID(xml_rsc));
                     break;
                 }
             }
 
         } else {
             /* get the last one */
             const char *rid = NULL;
 
             for (xml_rsc = pcmk__xe_first_child(set2); xml_rsc != NULL;
                  xml_rsc = pcmk__xe_next(xml_rsc)) {
 
                 if (pcmk__str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, pcmk__str_none)) {
                     rid = ID(xml_rsc);
                 }
             }
             EXPAND_CONSTRAINT_IDREF(id, rsc_2, rid);
         }
     }
 
     if (rsc_1 != NULL && rsc_2 != NULL) {
         new_rsc_order(rsc_1, action_1, rsc_2, action_2, flags, data_set);
 
     } else if (rsc_1 != NULL) {
         for (xml_rsc = pcmk__xe_first_child(set2); xml_rsc != NULL;
              xml_rsc = pcmk__xe_next(xml_rsc)) {
 
             if (pcmk__str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, pcmk__str_none)) {
                 EXPAND_CONSTRAINT_IDREF(id, rsc_2, ID(xml_rsc));
                 new_rsc_order(rsc_1, action_1, rsc_2, action_2, flags, data_set);
             }
         }
 
     } else if (rsc_2 != NULL) {
         xmlNode *xml_rsc = NULL;
 
         for (xml_rsc = pcmk__xe_first_child(set1); xml_rsc != NULL;
              xml_rsc = pcmk__xe_next(xml_rsc)) {
 
             if (pcmk__str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, pcmk__str_none)) {
                 EXPAND_CONSTRAINT_IDREF(id, rsc_1, ID(xml_rsc));
                 new_rsc_order(rsc_1, action_1, rsc_2, action_2, flags, data_set);
             }
         }
 
     } else {
         for (xml_rsc = pcmk__xe_first_child(set1); xml_rsc != NULL;
              xml_rsc = pcmk__xe_next(xml_rsc)) {
 
             if (pcmk__str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, pcmk__str_none)) {
                 xmlNode *xml_rsc_2 = NULL;
 
                 EXPAND_CONSTRAINT_IDREF(id, rsc_1, ID(xml_rsc));
 
                 for (xml_rsc_2 = pcmk__xe_first_child(set2);
                      xml_rsc_2 != NULL;
                      xml_rsc_2 = pcmk__xe_next(xml_rsc_2)) {
 
                     if (pcmk__str_eq((const char *)xml_rsc_2->name, XML_TAG_RESOURCE_REF, pcmk__str_none)) {
                         EXPAND_CONSTRAINT_IDREF(id, rsc_2, ID(xml_rsc_2));
                         new_rsc_order(rsc_1, action_1, rsc_2, action_2, flags, data_set);
                     }
                 }
             }
         }
     }
 
     return TRUE;
 }
 
 static gboolean
 unpack_order_tags(xmlNode * xml_obj, xmlNode ** expanded_xml, pe_working_set_t * data_set)
 {
     const char *id = NULL;
     const char *id_first = NULL;
     const char *id_then = NULL;
     const char *action_first = NULL;
     const char *action_then = NULL;
 
     pe_resource_t *rsc_first = NULL;
     pe_resource_t *rsc_then = NULL;
     pe_tag_t *tag_first = NULL;
     pe_tag_t *tag_then = NULL;
 
     xmlNode *new_xml = NULL;
     xmlNode *rsc_set_first = NULL;
     xmlNode *rsc_set_then = NULL;
     gboolean any_sets = FALSE;
 
     *expanded_xml = NULL;
 
     CRM_CHECK(xml_obj != NULL, return FALSE);
 
     id = ID(xml_obj);
     if (id == NULL) {
         pcmk__config_err("Ignoring <%s> constraint without " XML_ATTR_ID,
                          crm_element_name(xml_obj));
         return FALSE;
     }
 
     /* Attempt to expand any template/tag references in possible resource sets. */
     expand_tags_in_sets(xml_obj, &new_xml, data_set);
     if (new_xml) {
         /* There are resource sets referencing templates/tags. Return with the expanded XML. */
         crm_log_xml_trace(new_xml, "Expanded rsc_order...");
         *expanded_xml = new_xml;
         return TRUE;
     }
 
     id_first = crm_element_value(xml_obj, XML_ORDER_ATTR_FIRST);
     id_then = crm_element_value(xml_obj, XML_ORDER_ATTR_THEN);
     if (id_first == NULL || id_then == NULL) {
         return TRUE;
     }
 
     if (valid_resource_or_tag(data_set, id_first, &rsc_first, &tag_first) == FALSE) {
         pcmk__config_err("Ignoring constraint '%s' because '%s' is not a "
                          "valid resource or tag", id, id_first);
         return FALSE;
     }
 
     if (valid_resource_or_tag(data_set, id_then, &rsc_then, &tag_then) == FALSE) {
         pcmk__config_err("Ignoring constraint '%s' because '%s' is not a "
                          "valid resource or tag", id, id_then);
         return FALSE;
     }
 
     if (rsc_first && rsc_then) {
         /* Neither side references any template/tag. */
         return TRUE;
     }
 
     action_first = crm_element_value(xml_obj, XML_ORDER_ATTR_FIRST_ACTION);
     action_then = crm_element_value(xml_obj, XML_ORDER_ATTR_THEN_ACTION);
 
     new_xml = copy_xml(xml_obj);
 
     /* Convert the template/tag reference in "first" into a resource_set under the order constraint. */
     if (tag_to_set(new_xml, &rsc_set_first, XML_ORDER_ATTR_FIRST, TRUE, data_set) == FALSE) {
         free_xml(new_xml);
         return FALSE;
     }
 
     if (rsc_set_first) {
         if (action_first) {
             /* A "first-action" is specified.
                Move it into the converted resource_set as an "action" attribute. */
             crm_xml_add(rsc_set_first, "action", action_first);
             xml_remove_prop(new_xml, XML_ORDER_ATTR_FIRST_ACTION);
         }
         any_sets = TRUE;
     }
 
     /* Convert the template/tag reference in "then" into a resource_set under the order constraint. */
     if (tag_to_set(new_xml, &rsc_set_then, XML_ORDER_ATTR_THEN, TRUE, data_set) == FALSE) {
         free_xml(new_xml);
         return FALSE;
     }
 
     if (rsc_set_then) {
         if (action_then) {
             /* A "then-action" is specified.
                Move it into the converted resource_set as an "action" attribute. */
             crm_xml_add(rsc_set_then, "action", action_then);
             xml_remove_prop(new_xml, XML_ORDER_ATTR_THEN_ACTION);
         }
         any_sets = TRUE;
     }
 
     if (any_sets) {
         crm_log_xml_trace(new_xml, "Expanded rsc_order...");
         *expanded_xml = new_xml;
     } else {
         free_xml(new_xml);
     }
 
     return TRUE;
 }
 
 gboolean
 unpack_rsc_order(xmlNode * xml_obj, pe_working_set_t * data_set)
 {
     gboolean any_sets = FALSE;
 
     pe_resource_t *rsc = NULL;
 
     /*
        pe_resource_t *last_rsc = NULL;
      */
 
     pe_action_t *set_end = NULL;
     pe_action_t *set_begin = NULL;
 
     pe_action_t *set_inv_end = NULL;
     pe_action_t *set_inv_begin = NULL;
 
     xmlNode *set = NULL;
     xmlNode *last = NULL;
 
     xmlNode *orig_xml = NULL;
     xmlNode *expanded_xml = NULL;
 
     /*
        pe_action_t *last_end = NULL;
        pe_action_t *last_begin = NULL;
        pe_action_t *last_inv_end = NULL;
        pe_action_t *last_inv_begin = NULL;
      */
 
     const char *id = crm_element_value(xml_obj, XML_ATTR_ID);
     const char *invert = crm_element_value(xml_obj, XML_CONS_ATTR_SYMMETRICAL);
     enum pe_order_kind kind = get_ordering_type(xml_obj);
 
     gboolean invert_bool = order_is_symmetrical(xml_obj, kind, NULL);
     gboolean rc = TRUE;
 
     rc = unpack_order_tags(xml_obj, &expanded_xml, data_set);
     if (expanded_xml) {
         orig_xml = xml_obj;
         xml_obj = expanded_xml;
 
     } else if (rc == FALSE) {
         return FALSE;
     }
 
     for (set = pcmk__xe_first_child(xml_obj); set != NULL;
          set = pcmk__xe_next(set)) {
 
         if (pcmk__str_eq((const char *)set->name, XML_CONS_TAG_RSC_SET, pcmk__str_none)) {
             any_sets = TRUE;
             set = expand_idref(set, data_set->input);
             if (unpack_order_set(set, kind, &rsc, &set_begin, &set_end,
                                  &set_inv_begin, &set_inv_end, invert, data_set) == FALSE) {
                 return FALSE;
 
                 /* Expand orders in order_rsc_sets() instead of via pseudo actions. */
                 /*
                    } else if(last) {
                    const char *set_action = crm_element_value(set, "action");
                    const char *last_action = crm_element_value(last, "action");
                    enum pe_ordering flags = get_flags(id, kind, last_action, set_action, FALSE);
 
                    if(!set_action) { set_action = RSC_START; }
                    if(!last_action) { last_action = RSC_START; }
 
                    if(rsc == NULL && last_rsc == NULL) {
                    order_actions(last_end, set_begin, flags);
                    } else {
                    custom_action_order(
                    last_rsc, null_or_opkey(last_rsc, last_action), last_end,
                    rsc, null_or_opkey(rsc, set_action), set_begin,
                    flags, data_set);
                    }
 
                    if(crm_is_true(invert)) {
                    set_action = invert_action(set_action);
                    last_action = invert_action(last_action);
 
                    flags = get_flags(id, kind, last_action, set_action, TRUE);
                    if(rsc == NULL && last_rsc == NULL) {
                    order_actions(last_inv_begin, set_inv_end, flags);
 
                    } else {
                    custom_action_order(
                    last_rsc, null_or_opkey(last_rsc, last_action), last_inv_begin,
                    rsc, null_or_opkey(rsc, set_action), set_inv_end,
                    flags, data_set);
                    }
                    }
                  */
 
             } else if (         /* never called -- Now call it for supporting clones in resource sets */
                           last) {
                 if (order_rsc_sets(id, last, set, kind, data_set, FALSE, invert_bool) == FALSE) {
                     return FALSE;
                 }
 
                 if (invert_bool
                     && order_rsc_sets(id, set, last, kind, data_set, TRUE, invert_bool) == FALSE) {
                     return FALSE;
                 }
 
             }
             last = set;
             /*
                last_rsc = rsc;
                last_end = set_end;
                last_begin = set_begin;
                last_inv_end = set_inv_end;
                last_inv_begin = set_inv_begin;
              */
         }
     }
 
     if (expanded_xml) {
         free_xml(expanded_xml);
         xml_obj = orig_xml;
     }
 
     if (any_sets == FALSE) {
         return unpack_simple_rsc_order(xml_obj, data_set);
     }
 
     return TRUE;
 }
 
 static gboolean
 unpack_colocation_set(xmlNode * set, int score, pe_working_set_t * data_set)
 {
     xmlNode *xml_rsc = NULL;
     pe_resource_t *with = NULL;
     pe_resource_t *resource = NULL;
     const char *set_id = ID(set);
     const char *role = crm_element_value(set, "role");
     const char *sequential = crm_element_value(set, "sequential");
     const char *ordering = crm_element_value(set, "ordering");
     int local_score = score;
 
     const char *score_s = crm_element_value(set, XML_RULE_ATTR_SCORE);
 
     if (score_s) {
         local_score = char2score(score_s);
     }
 
     if(ordering == NULL) {
         ordering = "group";
     }
 
     if (sequential != NULL && crm_is_true(sequential) == FALSE) {
         return TRUE;
 
     } else if ((local_score >= 0)
                && pcmk__str_eq(ordering, "group", pcmk__str_casei)) {
         for (xml_rsc = pcmk__xe_first_child(set); xml_rsc != NULL;
              xml_rsc = pcmk__xe_next(xml_rsc)) {
 
             if (pcmk__str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, pcmk__str_none)) {
                 EXPAND_CONSTRAINT_IDREF(set_id, resource, ID(xml_rsc));
                 if (with != NULL) {
                     pe_rsc_trace(resource, "Colocating %s with %s", resource->id, with->id);
                     pcmk__new_colocation(set_id, NULL, local_score, resource,
                                          with, role, role, data_set);
                 }
 
                 with = resource;
             }
         }
     } else if (local_score >= 0) {
         pe_resource_t *last = NULL;
         for (xml_rsc = pcmk__xe_first_child(set); xml_rsc != NULL;
              xml_rsc = pcmk__xe_next(xml_rsc)) {
 
             if (pcmk__str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, pcmk__str_none)) {
                 EXPAND_CONSTRAINT_IDREF(set_id, resource, ID(xml_rsc));
                 if (last != NULL) {
                     pe_rsc_trace(resource, "Colocating %s with %s", last->id, resource->id);
                     pcmk__new_colocation(set_id, NULL, local_score, last,
                                          resource, role, role, data_set);
                 }
 
                 last = resource;
             }
         }
 
     } else {
         /* Anti-colocating with every prior resource is
          * the only way to ensure the intuitive result
          * (i.e. that no one in the set can run with anyone else in the set)
          */
 
         for (xml_rsc = pcmk__xe_first_child(set); xml_rsc != NULL;
              xml_rsc = pcmk__xe_next(xml_rsc)) {
 
             if (pcmk__str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, pcmk__str_none)) {
                 xmlNode *xml_rsc_with = NULL;
 
                 EXPAND_CONSTRAINT_IDREF(set_id, resource, ID(xml_rsc));
 
                 for (xml_rsc_with = pcmk__xe_first_child(set);
                      xml_rsc_with != NULL;
                      xml_rsc_with = pcmk__xe_next(xml_rsc_with)) {
 
                     if (pcmk__str_eq((const char *)xml_rsc_with->name, XML_TAG_RESOURCE_REF, pcmk__str_none)) {
                         if (pcmk__str_eq(resource->id, ID(xml_rsc_with), pcmk__str_casei)) {
                             break;
                         }
                         EXPAND_CONSTRAINT_IDREF(set_id, with, ID(xml_rsc_with));
                         pe_rsc_trace(resource, "Anti-Colocating %s with %s", resource->id,
                                      with->id);
                         pcmk__new_colocation(set_id, NULL, local_score,
                                              resource, with, role, role,
                                              data_set);
                     }
                 }
             }
         }
     }
 
     return TRUE;
 }
 
 static gboolean
 colocate_rsc_sets(const char *id, xmlNode * set1, xmlNode * set2, int score,
                   pe_working_set_t * data_set)
 {
     xmlNode *xml_rsc = NULL;
     pe_resource_t *rsc_1 = NULL;
     pe_resource_t *rsc_2 = NULL;
 
     const char *role_1 = crm_element_value(set1, "role");
     const char *role_2 = crm_element_value(set2, "role");
 
     const char *sequential_1 = crm_element_value(set1, "sequential");
     const char *sequential_2 = crm_element_value(set2, "sequential");
 
     if (sequential_1 == NULL || crm_is_true(sequential_1)) {
         /* get the first one */
         for (xml_rsc = pcmk__xe_first_child(set1); xml_rsc != NULL;
              xml_rsc = pcmk__xe_next(xml_rsc)) {
 
             if (pcmk__str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, pcmk__str_none)) {
                 EXPAND_CONSTRAINT_IDREF(id, rsc_1, ID(xml_rsc));
                 break;
             }
         }
     }
 
     if (sequential_2 == NULL || crm_is_true(sequential_2)) {
         /* get the last one */
         const char *rid = NULL;
 
         for (xml_rsc = pcmk__xe_first_child(set2); xml_rsc != NULL;
              xml_rsc = pcmk__xe_next(xml_rsc)) {
 
             if (pcmk__str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, pcmk__str_none)) {
                 rid = ID(xml_rsc);
             }
         }
         EXPAND_CONSTRAINT_IDREF(id, rsc_2, rid);
     }
 
     if (rsc_1 != NULL && rsc_2 != NULL) {
         pcmk__new_colocation(id, NULL, score, rsc_1, rsc_2, role_1, role_2,
                              data_set);
 
     } else if (rsc_1 != NULL) {
         for (xml_rsc = pcmk__xe_first_child(set2); xml_rsc != NULL;
              xml_rsc = pcmk__xe_next(xml_rsc)) {
 
             if (pcmk__str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, pcmk__str_none)) {
                 EXPAND_CONSTRAINT_IDREF(id, rsc_2, ID(xml_rsc));
                 pcmk__new_colocation(id, NULL, score, rsc_1, rsc_2, role_1,
                                      role_2, data_set);
             }
         }
 
     } else if (rsc_2 != NULL) {
         for (xml_rsc = pcmk__xe_first_child(set1); xml_rsc != NULL;
              xml_rsc = pcmk__xe_next(xml_rsc)) {
 
             if (pcmk__str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, pcmk__str_none)) {
                 EXPAND_CONSTRAINT_IDREF(id, rsc_1, ID(xml_rsc));
                 pcmk__new_colocation(id, NULL, score, rsc_1, rsc_2, role_1,
                                      role_2, data_set);
             }
         }
 
     } else {
         for (xml_rsc = pcmk__xe_first_child(set1); xml_rsc != NULL;
              xml_rsc = pcmk__xe_next(xml_rsc)) {
 
             if (pcmk__str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, pcmk__str_none)) {
                 xmlNode *xml_rsc_2 = NULL;
 
                 EXPAND_CONSTRAINT_IDREF(id, rsc_1, ID(xml_rsc));
 
                 for (xml_rsc_2 = pcmk__xe_first_child(set2);
                      xml_rsc_2 != NULL;
                      xml_rsc_2 = pcmk__xe_next(xml_rsc_2)) {
 
                     if (pcmk__str_eq((const char *)xml_rsc_2->name, XML_TAG_RESOURCE_REF, pcmk__str_none)) {
                         EXPAND_CONSTRAINT_IDREF(id, rsc_2, ID(xml_rsc_2));
                         pcmk__new_colocation(id, NULL, score, rsc_1, rsc_2,
                                              role_1, role_2, data_set);
                     }
                 }
             }
         }
     }
 
     return TRUE;
 }
 
 static void
 unpack_simple_colocation(xmlNode * xml_obj, pe_working_set_t * data_set)
 {
     int score_i = 0;
 
     const char *id = crm_element_value(xml_obj, XML_ATTR_ID);
     const char *score = crm_element_value(xml_obj, XML_RULE_ATTR_SCORE);
 
     const char *id_lh = crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE);
     const char *id_rh = crm_element_value(xml_obj, XML_COLOC_ATTR_TARGET);
     const char *state_lh = crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE_ROLE);
     const char *state_rh = crm_element_value(xml_obj, XML_COLOC_ATTR_TARGET_ROLE);
     const char *attr = crm_element_value(xml_obj, XML_COLOC_ATTR_NODE_ATTR);
     const char *symmetrical = crm_element_value(xml_obj, XML_CONS_ATTR_SYMMETRICAL);
 
     // experimental syntax from pacemaker-next (unlikely to be adopted as-is)
     const char *instance_lh = crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE_INSTANCE);
     const char *instance_rh = crm_element_value(xml_obj, XML_COLOC_ATTR_TARGET_INSTANCE);
 
     pe_resource_t *rsc_lh = pe_find_constraint_resource(data_set->resources, id_lh);
     pe_resource_t *rsc_rh = pe_find_constraint_resource(data_set->resources, id_rh);
 
     if (rsc_lh == NULL) {
         pcmk__config_err("Ignoring constraint '%s' because resource '%s' "
                          "does not exist", id, id_lh);
         return;
 
     } else if (rsc_rh == NULL) {
         pcmk__config_err("Ignoring constraint '%s' because resource '%s' "
                          "does not exist", id, id_rh);
         return;
 
     } else if (instance_lh && pe_rsc_is_clone(rsc_lh) == FALSE) {
         pcmk__config_err("Ignoring constraint '%s' because resource '%s' "
                          "is not a clone but instance '%s' was requested",
                          id, id_lh, instance_lh);
         return;
 
     } else if (instance_rh && pe_rsc_is_clone(rsc_rh) == FALSE) {
         pcmk__config_err("Ignoring constraint '%s' because resource '%s' "
                          "is not a clone but instance '%s' was requested",
                          id, id_rh, instance_rh);
         return;
     }
 
     if (instance_lh) {
         rsc_lh = find_clone_instance(rsc_lh, instance_lh, data_set);
         if (rsc_lh == NULL) {
             pcmk__config_warn("Ignoring constraint '%s' because resource '%s' "
                               "does not have an instance '%s'",
                               id, id_lh, instance_lh);
             return;
         }
     }
 
     if (instance_rh) {
         rsc_rh = find_clone_instance(rsc_rh, instance_rh, data_set);
         if (rsc_rh == NULL) {
             pcmk__config_warn("Ignoring constraint '%s' because resource '%s' "
                               "does not have an instance '%s'",
                               "'%s'", id, id_rh, instance_rh);
             return;
         }
     }
 
     if (crm_is_true(symmetrical)) {
         pcmk__config_warn("The colocation constraint '"
                           XML_CONS_ATTR_SYMMETRICAL
                           "' attribute has been removed");
     }
 
     if (score) {
         score_i = char2score(score);
     }
 
     pcmk__new_colocation(id, attr, score_i, rsc_lh, rsc_rh, state_lh, state_rh,
                          data_set);
 }
 
 static gboolean
 unpack_colocation_tags(xmlNode * xml_obj, xmlNode ** expanded_xml, pe_working_set_t * data_set)
 {
     const char *id = NULL;
     const char *id_lh = NULL;
     const char *id_rh = NULL;
     const char *state_lh = NULL;
     const char *state_rh = NULL;
 
     pe_resource_t *rsc_lh = NULL;
     pe_resource_t *rsc_rh = NULL;
 
     pe_tag_t *tag_lh = NULL;
     pe_tag_t *tag_rh = NULL;
 
     xmlNode *new_xml = NULL;
     xmlNode *rsc_set_lh = NULL;
     xmlNode *rsc_set_rh = NULL;
     gboolean any_sets = FALSE;
 
     *expanded_xml = NULL;
 
     CRM_CHECK(xml_obj != NULL, return FALSE);
 
     id = ID(xml_obj);
     if (id == NULL) {
         pcmk__config_err("Ignoring <%s> constraint without " XML_ATTR_ID,
                          crm_element_name(xml_obj));
         return FALSE;
     }
 
     /* Attempt to expand any template/tag references in possible resource sets. */
     expand_tags_in_sets(xml_obj, &new_xml, data_set);
     if (new_xml) {
         /* There are resource sets referencing templates/tags. Return with the expanded XML. */
         crm_log_xml_trace(new_xml, "Expanded rsc_colocation...");
         *expanded_xml = new_xml;
         return TRUE;
     }
 
     id_lh = crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE);
     id_rh = crm_element_value(xml_obj, XML_COLOC_ATTR_TARGET);
     if (id_lh == NULL || id_rh == NULL) {
         return TRUE;
     }
 
     if (valid_resource_or_tag(data_set, id_lh, &rsc_lh, &tag_lh) == FALSE) {
         pcmk__config_err("Ignoring constraint '%s' because '%s' is not a "
                          "valid resource or tag", id, id_lh);
         return FALSE;
     }
 
     if (valid_resource_or_tag(data_set, id_rh, &rsc_rh, &tag_rh) == FALSE) {
         pcmk__config_err("Ignoring constraint '%s' because '%s' is not a "
                          "valid resource or tag", id, id_rh);
         return FALSE;
     }
 
     if (rsc_lh && rsc_rh) {
         /* Neither side references any template/tag. */
         return TRUE;
     }
 
     if (tag_lh && tag_rh) {
         /* A colocation constraint between two templates/tags makes no sense. */
         pcmk__config_err("Ignoring constraint '%s' because two templates or "
                          "tags cannot be colocated", id);
         return FALSE;
     }
 
     state_lh = crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE_ROLE);
     state_rh = crm_element_value(xml_obj, XML_COLOC_ATTR_TARGET_ROLE);
 
     new_xml = copy_xml(xml_obj);
 
     /* Convert the template/tag reference in "rsc" into a resource_set under the colocation constraint. */
     if (tag_to_set(new_xml, &rsc_set_lh, XML_COLOC_ATTR_SOURCE, TRUE, data_set) == FALSE) {
         free_xml(new_xml);
         return FALSE;
     }
 
     if (rsc_set_lh) {
         if (state_lh) {
             /* A "rsc-role" is specified.
                Move it into the converted resource_set as a "role"" attribute. */
             crm_xml_add(rsc_set_lh, "role", state_lh);
             xml_remove_prop(new_xml, XML_COLOC_ATTR_SOURCE_ROLE);
         }
         any_sets = TRUE;
     }
 
     /* Convert the template/tag reference in "with-rsc" into a resource_set under the colocation constraint. */
     if (tag_to_set(new_xml, &rsc_set_rh, XML_COLOC_ATTR_TARGET, TRUE, data_set) == FALSE) {
         free_xml(new_xml);
         return FALSE;
     }
 
     if (rsc_set_rh) {
         if (state_rh) {
             /* A "with-rsc-role" is specified.
                Move it into the converted resource_set as a "role"" attribute. */
             crm_xml_add(rsc_set_rh, "role", state_rh);
             xml_remove_prop(new_xml, XML_COLOC_ATTR_TARGET_ROLE);
         }
         any_sets = TRUE;
     }
 
     if (any_sets) {
         crm_log_xml_trace(new_xml, "Expanded rsc_colocation...");
         *expanded_xml = new_xml;
     } else {
         free_xml(new_xml);
     }
 
     return TRUE;
 }
 
 static void
 unpack_rsc_colocation(xmlNode *xml_obj, pe_working_set_t *data_set)
 {
     int score_i = 0;
     xmlNode *set = NULL;
     xmlNode *last = NULL;
     gboolean any_sets = FALSE;
 
     xmlNode *orig_xml = NULL;
     xmlNode *expanded_xml = NULL;
 
     const char *id = crm_element_value(xml_obj, XML_ATTR_ID);
     const char *score = crm_element_value(xml_obj, XML_RULE_ATTR_SCORE);
 
     if (score) {
         score_i = char2score(score);
     }
 
     if (!unpack_colocation_tags(xml_obj, &expanded_xml, data_set)) {
         return;
     }
     if (expanded_xml) {
         orig_xml = xml_obj;
         xml_obj = expanded_xml;
     }
 
     for (set = pcmk__xe_first_child(xml_obj); set != NULL;
          set = pcmk__xe_next(set)) {
 
         if (pcmk__str_eq((const char *)set->name, XML_CONS_TAG_RSC_SET, pcmk__str_none)) {
             any_sets = TRUE;
             set = expand_idref(set, data_set->input);
             if (!unpack_colocation_set(set, score_i, data_set)) {
                 return;
             }
             if (last && !colocate_rsc_sets(id, last, set, score_i, data_set)) {
                 return;
             }
             last = set;
         }
     }
 
     if (expanded_xml) {
         free_xml(expanded_xml);
         xml_obj = orig_xml;
     }
 
     if (!any_sets) {
         unpack_simple_colocation(xml_obj, data_set);
     }
 }
 
 gboolean
 rsc_ticket_new(const char *id, pe_resource_t * rsc_lh, pe_ticket_t * ticket,
                const char *state_lh, const char *loss_policy, pe_working_set_t * data_set)
 {
     rsc_ticket_t *new_rsc_ticket = NULL;
 
     if (rsc_lh == NULL) {
         pcmk__config_err("Ignoring ticket '%s' because resource "
                          "does not exist", id);
         return FALSE;
     }
 
     new_rsc_ticket = calloc(1, sizeof(rsc_ticket_t));
     if (new_rsc_ticket == NULL) {
         return FALSE;
     }
 
     if (pcmk__str_eq(state_lh, RSC_ROLE_STARTED_S, pcmk__str_null_matches | pcmk__str_casei)) {
         state_lh = RSC_ROLE_UNKNOWN_S;
     }
 
     new_rsc_ticket->id = id;
     new_rsc_ticket->ticket = ticket;
     new_rsc_ticket->rsc_lh = rsc_lh;
     new_rsc_ticket->role_lh = text2role(state_lh);
 
     if (pcmk__str_eq(loss_policy, "fence", pcmk__str_casei)) {
         if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
             new_rsc_ticket->loss_policy = loss_ticket_fence;
         } else {
             pcmk__config_err("Resetting '" XML_TICKET_ATTR_LOSS_POLICY
                              "' for ticket '%s' to 'stop' "
                              "because fencing is not configured", ticket->id);
             loss_policy = "stop";
         }
     }
 
     if (new_rsc_ticket->loss_policy == loss_ticket_fence) {
         crm_debug("On loss of ticket '%s': Fence the nodes running %s (%s)",
                   new_rsc_ticket->ticket->id, new_rsc_ticket->rsc_lh->id,
                   role2text(new_rsc_ticket->role_lh));
 
     } else if (pcmk__str_eq(loss_policy, "freeze", pcmk__str_casei)) {
         crm_debug("On loss of ticket '%s': Freeze %s (%s)",
                   new_rsc_ticket->ticket->id, new_rsc_ticket->rsc_lh->id,
                   role2text(new_rsc_ticket->role_lh));
         new_rsc_ticket->loss_policy = loss_ticket_freeze;
 
     } else if (pcmk__str_eq(loss_policy, "demote", pcmk__str_casei)) {
         crm_debug("On loss of ticket '%s': Demote %s (%s)",
                   new_rsc_ticket->ticket->id, new_rsc_ticket->rsc_lh->id,
                   role2text(new_rsc_ticket->role_lh));
         new_rsc_ticket->loss_policy = loss_ticket_demote;
 
     } else if (pcmk__str_eq(loss_policy, "stop", pcmk__str_casei)) {
         crm_debug("On loss of ticket '%s': Stop %s (%s)",
                   new_rsc_ticket->ticket->id, new_rsc_ticket->rsc_lh->id,
                   role2text(new_rsc_ticket->role_lh));
         new_rsc_ticket->loss_policy = loss_ticket_stop;
 
     } else {
         if (new_rsc_ticket->role_lh == RSC_ROLE_MASTER) {
             crm_debug("On loss of ticket '%s': Default to demote %s (%s)",
                       new_rsc_ticket->ticket->id, new_rsc_ticket->rsc_lh->id,
                       role2text(new_rsc_ticket->role_lh));
             new_rsc_ticket->loss_policy = loss_ticket_demote;
 
         } else {
             crm_debug("On loss of ticket '%s': Default to stop %s (%s)",
                       new_rsc_ticket->ticket->id, new_rsc_ticket->rsc_lh->id,
                       role2text(new_rsc_ticket->role_lh));
             new_rsc_ticket->loss_policy = loss_ticket_stop;
         }
     }
 
     pe_rsc_trace(rsc_lh, "%s (%s) ==> %s", rsc_lh->id, role2text(new_rsc_ticket->role_lh),
                  ticket->id);
 
     rsc_lh->rsc_tickets = g_list_append(rsc_lh->rsc_tickets, new_rsc_ticket);
 
     data_set->ticket_constraints = g_list_append(data_set->ticket_constraints, new_rsc_ticket);
 
     if (new_rsc_ticket->ticket->granted == FALSE || new_rsc_ticket->ticket->standby) {
         rsc_ticket_constraint(rsc_lh, new_rsc_ticket, data_set);
     }
 
     return TRUE;
 }
 
 static gboolean
 unpack_rsc_ticket_set(xmlNode * set, pe_ticket_t * ticket, const char *loss_policy,
                       pe_working_set_t * data_set)
 {
     xmlNode *xml_rsc = NULL;
     pe_resource_t *resource = NULL;
     const char *set_id = NULL;
     const char *role = NULL;
 
     CRM_CHECK(set != NULL, return FALSE);
     CRM_CHECK(ticket != NULL, return FALSE);
 
     set_id = ID(set);
     if (set_id == NULL) {
         pcmk__config_err("Ignoring <" XML_CONS_TAG_RSC_SET "> without "
                          XML_ATTR_ID);
         return FALSE;
     }
 
     role = crm_element_value(set, "role");
 
     for (xml_rsc = first_named_child(set, XML_TAG_RESOURCE_REF);
          xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) {
 
         EXPAND_CONSTRAINT_IDREF(set_id, resource, ID(xml_rsc));
         pe_rsc_trace(resource, "Resource '%s' depends on ticket '%s'",
                      resource->id, ticket->id);
         rsc_ticket_new(set_id, resource, ticket, role, loss_policy, data_set);
     }
 
     return TRUE;
 }
 
 static gboolean
 unpack_simple_rsc_ticket(xmlNode * xml_obj, pe_working_set_t * data_set)
 {
     const char *id = NULL;
     const char *ticket_str = crm_element_value(xml_obj, XML_TICKET_ATTR_TICKET);
     const char *loss_policy = crm_element_value(xml_obj, XML_TICKET_ATTR_LOSS_POLICY);
 
     pe_ticket_t *ticket = NULL;
 
     const char *id_lh = crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE);
     const char *state_lh = crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE_ROLE);
 
     // experimental syntax from pacemaker-next (unlikely to be adopted as-is)
     const char *instance_lh = crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE_INSTANCE);
 
     pe_resource_t *rsc_lh = NULL;
 
     CRM_CHECK(xml_obj != NULL, return FALSE);
 
     id = ID(xml_obj);
     if (id == NULL) {
         pcmk__config_err("Ignoring <%s> constraint without " XML_ATTR_ID,
                          crm_element_name(xml_obj));
         return FALSE;
     }
 
     if (ticket_str == NULL) {
         pcmk__config_err("Ignoring constraint '%s' without ticket specified",
                          id);
         return FALSE;
     } else {
         ticket = g_hash_table_lookup(data_set->tickets, ticket_str);
     }
 
     if (ticket == NULL) {
         pcmk__config_err("Ignoring constraint '%s' because ticket '%s' "
                          "does not exist", id, ticket_str);
         return FALSE;
     }
 
     if (id_lh == NULL) {
         pcmk__config_err("Ignoring constraint '%s' without resource", id);
         return FALSE;
     } else {
         rsc_lh = pe_find_constraint_resource(data_set->resources, id_lh);
     }
 
     if (rsc_lh == NULL) {
         pcmk__config_err("Ignoring constraint '%s' because resource '%s' "
                          "does not exist", id, id_lh);
         return FALSE;
 
     } else if (instance_lh && pe_rsc_is_clone(rsc_lh) == FALSE) {
         pcmk__config_err("Ignoring constraint '%s' because resource '%s' "
                          "is not a clone but instance '%s' was requested",
                          id, id_lh, instance_lh);
         return FALSE;
     }
 
     if (instance_lh) {
         rsc_lh = find_clone_instance(rsc_lh, instance_lh, data_set);
         if (rsc_lh == NULL) {
             pcmk__config_warn("Ignoring constraint '%s' because resource '%s' "
                               "does not have an instance '%s'",
                               "'%s'", id, id_lh, instance_lh);
             return FALSE;
         }
     }
 
     rsc_ticket_new(id, rsc_lh, ticket, state_lh, loss_policy, data_set);
     return TRUE;
 }
 
 static gboolean
 unpack_rsc_ticket_tags(xmlNode * xml_obj, xmlNode ** expanded_xml, pe_working_set_t * data_set)
 {
     const char *id = NULL;
     const char *id_lh = NULL;
     const char *state_lh = NULL;
 
     pe_resource_t *rsc_lh = NULL;
     pe_tag_t *tag_lh = NULL;
 
     xmlNode *new_xml = NULL;
     xmlNode *rsc_set_lh = NULL;
     gboolean any_sets = FALSE;
 
     *expanded_xml = NULL;
 
     CRM_CHECK(xml_obj != NULL, return FALSE);
 
     id = ID(xml_obj);
     if (id == NULL) {
         pcmk__config_err("Ignoring <%s> constraint without " XML_ATTR_ID,
                          crm_element_name(xml_obj));
         return FALSE;
     }
 
     /* Attempt to expand any template/tag references in possible resource sets. */
     expand_tags_in_sets(xml_obj, &new_xml, data_set);
     if (new_xml) {
         /* There are resource sets referencing templates/tags. Return with the expanded XML. */
         crm_log_xml_trace(new_xml, "Expanded rsc_ticket...");
         *expanded_xml = new_xml;
         return TRUE;
     }
 
     id_lh = crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE);
     if (id_lh == NULL) {
         return TRUE;
     }
 
     if (valid_resource_or_tag(data_set, id_lh, &rsc_lh, &tag_lh) == FALSE) {
         pcmk__config_err("Ignoring constraint '%s' because '%s' is not a "
                          "valid resource or tag", id, id_lh);
         return FALSE;
 
     } else if (rsc_lh) {
         /* No template/tag is referenced. */
         return TRUE;
     }
 
     state_lh = crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE_ROLE);
 
     new_xml = copy_xml(xml_obj);
 
     /* Convert the template/tag reference in "rsc" into a resource_set under the rsc_ticket constraint. */
     if (tag_to_set(new_xml, &rsc_set_lh, XML_COLOC_ATTR_SOURCE, FALSE, data_set) == FALSE) {
         free_xml(new_xml);
         return FALSE;
     }
 
     if (rsc_set_lh) {
         if (state_lh) {
             /* A "rsc-role" is specified.
                Move it into the converted resource_set as a "role"" attribute. */
             crm_xml_add(rsc_set_lh, "role", state_lh);
             xml_remove_prop(new_xml, XML_COLOC_ATTR_SOURCE_ROLE);
         }
         any_sets = TRUE;
     }
 
     if (any_sets) {
         crm_log_xml_trace(new_xml, "Expanded rsc_ticket...");
         *expanded_xml = new_xml;
     } else {
         free_xml(new_xml);
     }
 
     return TRUE;
 }
 
 gboolean
 unpack_rsc_ticket(xmlNode * xml_obj, pe_working_set_t * data_set)
 {
     xmlNode *set = NULL;
     gboolean any_sets = FALSE;
 
     const char *id = NULL;
     const char *ticket_str = crm_element_value(xml_obj, XML_TICKET_ATTR_TICKET);
     const char *loss_policy = crm_element_value(xml_obj, XML_TICKET_ATTR_LOSS_POLICY);
 
     pe_ticket_t *ticket = NULL;
 
     xmlNode *orig_xml = NULL;
     xmlNode *expanded_xml = NULL;
 
     gboolean rc = TRUE;
 
     CRM_CHECK(xml_obj != NULL, return FALSE);
 
     id = ID(xml_obj);
     if (id == NULL) {
         pcmk__config_err("Ignoring <%s> constraint without " XML_ATTR_ID,
                          crm_element_name(xml_obj));
         return FALSE;
     }
 
     if (data_set->tickets == NULL) {
         data_set->tickets =
             g_hash_table_new_full(crm_str_hash, g_str_equal, free, destroy_ticket);
     }
 
     if (ticket_str == NULL) {
         pcmk__config_err("Ignoring constraint '%s' without ticket", id);
         return FALSE;
     } else {
         ticket = g_hash_table_lookup(data_set->tickets, ticket_str);
     }
 
     if (ticket == NULL) {
         ticket = ticket_new(ticket_str, data_set);
         if (ticket == NULL) {
             return FALSE;
         }
     }
 
     rc = unpack_rsc_ticket_tags(xml_obj, &expanded_xml, data_set);
     if (expanded_xml) {
         orig_xml = xml_obj;
         xml_obj = expanded_xml;
 
     } else if (rc == FALSE) {
         return FALSE;
     }
 
     for (set = pcmk__xe_first_child(xml_obj); set != NULL;
          set = pcmk__xe_next(set)) {
 
         if (pcmk__str_eq((const char *)set->name, XML_CONS_TAG_RSC_SET, pcmk__str_none)) {
             any_sets = TRUE;
             set = expand_idref(set, data_set->input);
             if (unpack_rsc_ticket_set(set, ticket, loss_policy, data_set) == FALSE) {
                 return FALSE;
             }
         }
     }
 
     if (expanded_xml) {
         free_xml(expanded_xml);
         xml_obj = orig_xml;
     }
 
     if (any_sets == FALSE) {
         return unpack_simple_rsc_ticket(xml_obj, data_set);
     }
 
     return TRUE;
 }
diff --git a/lib/pacemaker/pcmk_sched_graph.c b/lib/pacemaker/pcmk_sched_graph.c
index c012d23ce6..f0d1f476fe 100644
--- a/lib/pacemaker/pcmk_sched_graph.c
+++ b/lib/pacemaker/pcmk_sched_graph.c
@@ -1,1859 +1,1879 @@
 /*
  * Copyright 2004-2020 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <sys/param.h>
 #include <crm/crm.h>
 #include <crm/cib.h>
 #include <crm/msg_xml.h>
 #include <crm/common/xml.h>
 
 #include <glib.h>
 
 #include <pacemaker-internal.h>
 
 void update_colo_start_chain(pe_action_t *action, pe_working_set_t *data_set);
 gboolean rsc_update_action(pe_action_t * first, pe_action_t * then, enum pe_ordering type);
 
 static enum pe_action_flags
 get_action_flags(pe_action_t * action, pe_node_t * node)
 {
     enum pe_action_flags flags = action->flags;
 
     if (action->rsc) {
         flags = action->rsc->cmds->action_flags(action, NULL);
 
         if (pe_rsc_is_clone(action->rsc) && node) {
 
             /* We only care about activity on $node */
             enum pe_action_flags clone_flags = action->rsc->cmds->action_flags(action, node);
 
             /* Go to great lengths to ensure the correct value for pe_action_runnable...
              *
              * If we are a clone, then for _ordering_ constraints, it's only relevant
              * if we are runnable _anywhere_.
              *
              * This only applies to _runnable_ though, and only for ordering constraints.
              * If this function is ever used during colocation, then we'll need additional logic
              *
              * Not very satisfying, but it's logical and appears to work well.
              */
             if (!pcmk_is_set(clone_flags, pe_action_runnable)
                 && pcmk_is_set(flags, pe_action_runnable)) {
 
                 pe__set_raw_action_flags(clone_flags, action->rsc->id,
                                          pe_action_runnable);
             }
             flags = clone_flags;
         }
     }
     return flags;
 }
 
 static char *
 convert_non_atomic_uuid(char *old_uuid, pe_resource_t * rsc, gboolean allow_notify,
                         gboolean free_original)
 {
     guint interval_ms = 0;
     char *uuid = NULL;
     char *rid = NULL;
     char *raw_task = NULL;
     int task = no_action;
 
     CRM_ASSERT(rsc);
     pe_rsc_trace(rsc, "Processing %s", old_uuid);
     if (old_uuid == NULL) {
         return NULL;
 
     } else if (strstr(old_uuid, "notify") != NULL) {
         goto done;              /* no conversion */
 
     } else if (rsc->variant < pe_group) {
         goto done;              /* no conversion */
     }
 
     CRM_ASSERT(parse_op_key(old_uuid, &rid, &raw_task, &interval_ms));
     if (interval_ms > 0) {
         goto done;              /* no conversion */
     }
 
     task = text2task(raw_task);
     switch (task) {
         case stop_rsc:
         case start_rsc:
         case action_notify:
         case action_promote:
         case action_demote:
             break;
         case stopped_rsc:
         case started_rsc:
         case action_notified:
         case action_promoted:
         case action_demoted:
             task--;
             break;
         case monitor_rsc:
         case shutdown_crm:
         case stonith_node:
             task = no_action;
             break;
         default:
             crm_err("Unknown action: %s", raw_task);
             task = no_action;
             break;
     }
 
     if (task != no_action) {
         if (pcmk_is_set(rsc->flags, pe_rsc_notify) && allow_notify) {
             uuid = pcmk__notify_key(rid, "confirmed-post", task2text(task + 1));
 
         } else {
             uuid = pcmk__op_key(rid, task2text(task + 1), 0);
         }
         pe_rsc_trace(rsc, "Converted %s -> %s", old_uuid, uuid);
     }
 
   done:
     if (uuid == NULL) {
         uuid = strdup(old_uuid);
     }
 
     if (free_original) {
         free(old_uuid);
     }
 
     free(raw_task);
     free(rid);
     return uuid;
 }
 
 static pe_action_t *
 rsc_expand_action(pe_action_t * action)
 {
     gboolean notify = FALSE;
     pe_action_t *result = action;
     pe_resource_t *rsc = action->rsc;
 
     if (rsc == NULL) {
         return action;
     }
 
     if ((rsc->parent == NULL)
         || (pe_rsc_is_clone(rsc) && (rsc->parent->variant == pe_container))) {
         /* Only outermost resources have notification actions.
          * The exception is those in bundles.
          */
         notify = pcmk_is_set(rsc->flags, pe_rsc_notify);
     }
 
     if (rsc->variant >= pe_group) {
         /* Expand 'start' -> 'started' */
         char *uuid = NULL;
 
         uuid = convert_non_atomic_uuid(action->uuid, rsc, notify, FALSE);
         if (uuid) {
             pe_rsc_trace(rsc, "Converting %s to %s %d", action->uuid, uuid,
                          pcmk_is_set(rsc->flags, pe_rsc_notify));
             result = find_first_action(rsc->actions, uuid, NULL, NULL);
             if (result == NULL) {
                 crm_err("Couldn't expand %s to %s in %s", action->uuid, uuid, rsc->id);
                 result = action;
             }
             free(uuid);
         }
     }
     return result;
 }
 
 static enum pe_graph_flags
 graph_update_action(pe_action_t * first, pe_action_t * then, pe_node_t * node,
                     enum pe_action_flags first_flags, enum pe_action_flags then_flags,
                     pe_action_wrapper_t *order, pe_working_set_t *data_set)
 {
     enum pe_graph_flags changed = pe_graph_none;
     enum pe_ordering type = order->type;
     gboolean processed = FALSE;
 
     /* TODO: Do as many of these in parallel as possible */
 
     if (pcmk_is_set(type, pe_order_implies_then_on_node)) {
         /* Normally we want the _whole_ 'then' clone to
          * restart if 'first' is restarted, so then->node is
          * needed.
          *
          * However for unfencing, we want to limit this to
          * instances on the same node as 'first' (the
          * unfencing operation), so first->node is supplied.
          *
          * Swap the node, from then on we can can treat it
          * like any other 'pe_order_implies_then'
          */
 
         pe__clear_order_flags(type, pe_order_implies_then_on_node);
         pe__set_order_flags(type, pe_order_implies_then);
         node = first->node;
     }
 
     pe__clear_raw_action_flags(first_flags, "first action update",
                                pe_action_pseudo);
 
     if (type & pe_order_implies_then) {
         processed = TRUE;
         if (then->rsc) {
             changed |= then->rsc->cmds->update_actions(first, then, node,
                 first_flags & pe_action_optional, pe_action_optional,
                 pe_order_implies_then, data_set);
 
         } else if (!pcmk_is_set(first_flags, pe_action_optional)) {
             if (update_action_flags(then, pe_action_optional | pe_action_clear, __func__, __LINE__)) {
                 pe__set_graph_flags(changed, first, pe_graph_updated_then);
             }
         }
         if (changed) {
             pe_rsc_trace(then->rsc, "implies right: %s then %s: changed", first->uuid, then->uuid);
         } else {
             crm_trace("implies right: %s then %s %p", first->uuid, then->uuid, then->rsc);
         }
     }
 
     if ((type & pe_order_restart) && then->rsc) {
         enum pe_action_flags restart = (pe_action_optional | pe_action_runnable);
 
         processed = TRUE;
         changed |= then->rsc->cmds->update_actions(first, then, node,
                                                    first_flags, restart,
                                                    pe_order_restart, data_set);
         if (changed) {
             pe_rsc_trace(then->rsc, "restart: %s then %s: changed", first->uuid, then->uuid);
         } else {
             crm_trace("restart: %s then %s", first->uuid, then->uuid);
         }
     }
 
     if (type & pe_order_implies_first) {
         processed = TRUE;
         if (first->rsc) {
             changed |= first->rsc->cmds->update_actions(first, then, node,
                 first_flags, pe_action_optional, pe_order_implies_first,
                 data_set);
 
         } else if (!pcmk_is_set(first_flags, pe_action_optional)) {
             pe_rsc_trace(first->rsc, "first unrunnable: %s (%d) then %s (%d)",
                          first->uuid, pcmk_is_set(first_flags, pe_action_optional),
                          then->uuid, pcmk_is_set(then_flags, pe_action_optional));
             if (update_action_flags(first, pe_action_runnable | pe_action_clear, __func__, __LINE__)) {
                 pe__set_graph_flags(changed, first, pe_graph_updated_first);
             }
         }
 
         if (changed) {
             pe_rsc_trace(then->rsc, "implies left: %s then %s: changed", first->uuid, then->uuid);
         } else {
             crm_trace("implies left: %s (%d) then %s (%d)",
                       first->uuid, pcmk_is_set(first_flags, pe_action_optional),
                       then->uuid, pcmk_is_set(then_flags, pe_action_optional));
         }
     }
 
     if (type & pe_order_implies_first_master) {
         processed = TRUE;
         if (then->rsc) {
             changed |= then->rsc->cmds->update_actions(first, then, node,
                 first_flags & pe_action_optional, pe_action_optional,
                 pe_order_implies_first_master, data_set);
         }
 
         if (changed) {
             pe_rsc_trace(then->rsc,
                          "implies left when right rsc is Master role: %s then %s: changed",
                          first->uuid, then->uuid);
         } else {
             crm_trace("implies left when right rsc is Master role: %s then %s", first->uuid,
                       then->uuid);
         }
     }
 
     if (type & pe_order_one_or_more) {
         processed = TRUE;
         if (then->rsc) {
             changed |= then->rsc->cmds->update_actions(first, then, node,
                 first_flags, pe_action_runnable, pe_order_one_or_more,
                 data_set);
 
         } else if (pcmk_is_set(first_flags, pe_action_runnable)) {
             /* alright. a "first" action is considered runnable, incremente
              * the 'runnable_before' counter */
             then->runnable_before++;
 
             /* if the runnable before count for then exceeds the required number
              * of "before" runnable actions... mark then as runnable */
             if (then->runnable_before >= then->required_runnable_before) {
                 if (update_action_flags(then, pe_action_runnable, __func__, __LINE__)) {
                     pe__set_graph_flags(changed, first, pe_graph_updated_then);
                 }
             }
         }
         if (changed) {
             pe_rsc_trace(then->rsc, "runnable_one_or_more: %s then %s: changed", first->uuid,
                          then->uuid);
         } else {
             crm_trace("runnable_one_or_more: %s then %s", first->uuid, then->uuid);
         }
     }
 
     if (then->rsc && pcmk_is_set(type, pe_order_probe)) {
         processed = TRUE;
 
         if (!pcmk_is_set(first_flags, pe_action_runnable)
             && (first->rsc->running_on != NULL)) {
 
             pe_rsc_trace(then->rsc, "Ignoring %s then %s - %s is about to be stopped",
                          first->uuid, then->uuid, first->rsc->id);
             type = pe_order_none;
             order->type = pe_order_none;
 
         } else {
             pe_rsc_trace(then->rsc, "Enforcing %s then %s", first->uuid, then->uuid);
             changed |= then->rsc->cmds->update_actions(first, then, node,
                 first_flags, pe_action_runnable, pe_order_runnable_left,
                 data_set);
         }
 
         if (changed) {
             pe_rsc_trace(then->rsc, "runnable: %s then %s: changed", first->uuid, then->uuid);
         } else {
             crm_trace("runnable: %s then %s", first->uuid, then->uuid);
         }
     }
 
     if (type & pe_order_runnable_left) {
         processed = TRUE;
         if (then->rsc) {
             changed |= then->rsc->cmds->update_actions(first, then, node,
                 first_flags, pe_action_runnable, pe_order_runnable_left,
                 data_set);
 
         } else if (!pcmk_is_set(first_flags, pe_action_runnable)) {
             pe_rsc_trace(then->rsc, "then unrunnable: %s then %s", first->uuid, then->uuid);
             if (update_action_flags(then, pe_action_runnable | pe_action_clear, __func__, __LINE__)) {
                 pe__set_graph_flags(changed, first, pe_graph_updated_then);
             }
         }
         if (changed) {
             pe_rsc_trace(then->rsc, "runnable: %s then %s: changed", first->uuid, then->uuid);
         } else {
             crm_trace("runnable: %s then %s", first->uuid, then->uuid);
         }
     }
 
     if (type & pe_order_implies_first_migratable) {
         processed = TRUE;
         if (then->rsc) {
             changed |= then->rsc->cmds->update_actions(first, then, node,
                 first_flags, pe_action_optional,
                 pe_order_implies_first_migratable, data_set);
         }
         if (changed) {
             pe_rsc_trace(then->rsc, "optional: %s then %s: changed", first->uuid, then->uuid);
         } else {
             crm_trace("optional: %s then %s", first->uuid, then->uuid);
         }
     }
 
     if (type & pe_order_pseudo_left) {
         processed = TRUE;
         if (then->rsc) {
             changed |= then->rsc->cmds->update_actions(first, then, node,
                 first_flags, pe_action_optional, pe_order_pseudo_left,
                 data_set);
         }
         if (changed) {
             pe_rsc_trace(then->rsc, "optional: %s then %s: changed", first->uuid, then->uuid);
         } else {
             crm_trace("optional: %s then %s", first->uuid, then->uuid);
         }
     }
 
     if (type & pe_order_optional) {
         processed = TRUE;
         if (then->rsc) {
             changed |= then->rsc->cmds->update_actions(first, then, node,
                 first_flags, pe_action_runnable, pe_order_optional, data_set);
         }
         if (changed) {
             pe_rsc_trace(then->rsc, "optional: %s then %s: changed", first->uuid, then->uuid);
         } else {
             crm_trace("optional: %s then %s", first->uuid, then->uuid);
         }
     }
 
     if (type & pe_order_asymmetrical) {
         processed = TRUE;
         if (then->rsc) {
             changed |= then->rsc->cmds->update_actions(first, then, node,
                 first_flags, pe_action_runnable, pe_order_asymmetrical,
                 data_set);
         }
 
         if (changed) {
             pe_rsc_trace(then->rsc, "asymmetrical: %s then %s: changed", first->uuid, then->uuid);
         } else {
             crm_trace("asymmetrical: %s then %s", first->uuid, then->uuid);
         }
 
     }
 
     if ((first->flags & pe_action_runnable) && (type & pe_order_implies_then_printed)
         && (first_flags & pe_action_optional) == 0) {
         processed = TRUE;
         crm_trace("%s implies %s printed", first->uuid, then->uuid);
         update_action_flags(then, pe_action_print_always, __func__, __LINE__);  /* don't care about changed */
     }
 
     if (pcmk_is_set(type, pe_order_implies_first_printed)
         && !pcmk_is_set(then_flags, pe_action_optional)) {
 
         processed = TRUE;
         crm_trace("%s implies %s printed", then->uuid, first->uuid);
         update_action_flags(first, pe_action_print_always, __func__, __LINE__); /* don't care about changed */
     }
 
     if ((type & pe_order_implies_then
          || type & pe_order_implies_first
          || type & pe_order_restart)
         && first->rsc
         && pcmk__str_eq(first->task, RSC_STOP, pcmk__str_casei)
         && !pcmk_is_set(first->rsc->flags, pe_rsc_managed)
         && pcmk_is_set(first->rsc->flags, pe_rsc_block)
         && !pcmk_is_set(first->flags, pe_action_runnable)) {
 
         if (update_action_flags(then, pe_action_runnable | pe_action_clear, __func__, __LINE__)) {
             pe__set_graph_flags(changed, first, pe_graph_updated_then);
         }
 
         if (changed) {
             pe_rsc_trace(then->rsc, "unmanaged left: %s then %s: changed", first->uuid, then->uuid);
         } else {
             crm_trace("unmanaged left: %s then %s", first->uuid, then->uuid);
         }
     }
 
     if (processed == FALSE) {
         crm_trace("Constraint 0x%.6x not applicable", type);
     }
 
     return changed;
 }
 
 static void
 mark_start_blocked(pe_resource_t *rsc, pe_resource_t *reason,
                    pe_working_set_t *data_set)
 {
     GListPtr gIter = rsc->actions;
     char *reason_text = crm_strdup_printf("colocation with %s", reason->id);
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_action_t *action = (pe_action_t *) gIter->data;
 
         if (!pcmk__str_eq(action->task, RSC_START, pcmk__str_casei)) {
             continue;
         }
         if (pcmk_is_set(action->flags, pe_action_runnable)) {
             pe_action_set_flag_reason(__func__, __LINE__, action, NULL,
                                       reason_text, pe_action_runnable, FALSE);
             update_colo_start_chain(action, data_set);
             update_action(action, data_set);
         }
     }
     free(reason_text);
 }
 
 void
 update_colo_start_chain(pe_action_t *action, pe_working_set_t *data_set)
 {
     GListPtr gIter = NULL;
     pe_resource_t *rsc = NULL;
 
     if (!pcmk_is_set(action->flags, pe_action_runnable)
         && pcmk__str_eq(action->task, RSC_START, pcmk__str_casei)) {
 
         rsc = uber_parent(action->rsc);
         if (rsc->parent) {
             /* For bundles, uber_parent() returns the clone/master, not the
              * bundle, so the existence of rsc->parent implies this is a bundle.
              * In this case, we need the bundle resource, so that we can check
              * if all containers are stopped/stopping.
              */
             rsc = rsc->parent;
         }
     }
 
     if (rsc == NULL || rsc->rsc_cons_lhs == NULL) {
         return;
     }
 
     /* if rsc has children, all the children need to have start set to
      * unrunnable before we follow the colo chain for the parent. */
     for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child = (pe_resource_t *)gIter->data;
         pe_action_t *start = find_first_action(child->actions, NULL, RSC_START, NULL);
         if ((start == NULL) || pcmk_is_set(start->flags, pe_action_runnable)) {
             return;
         }
     }
 
     for (gIter = rsc->rsc_cons_lhs; gIter != NULL; gIter = gIter->next) {
         pcmk__colocation_t *colocate_with = (pcmk__colocation_t *) gIter->data;
 
         if (colocate_with->score == INFINITY) {
             mark_start_blocked(colocate_with->rsc_lh, action->rsc, data_set);
         }
     }
 }
 
 gboolean
 update_action(pe_action_t *then, pe_working_set_t *data_set)
 {
     GListPtr lpc = NULL;
     enum pe_graph_flags changed = pe_graph_none;
     int last_flags = then->flags;
 
     crm_trace("Processing %s (%s %s %s)",
               then->uuid,
               pcmk_is_set(then->flags, pe_action_optional)? "optional" : "required",
               pcmk_is_set(then->flags, pe_action_runnable)? "runnable" : "unrunnable",
               pcmk_is_set(then->flags, pe_action_pseudo)? "pseudo"
                 : (then->node? then->node->details->uname : ""));
 
     if (pcmk_is_set(then->flags, pe_action_requires_any)) {
         /* initialize current known runnable before actions to 0
          * from here as graph_update_action is called for each of
          * then's before actions, this number will increment as
          * runnable 'first' actions are encountered */
         then->runnable_before = 0;
 
         /* for backwards compatibility with previous options that use
          * the 'requires_any' flag, initialize required to 1 if it is
          * not set. */ 
         if (then->required_runnable_before == 0) {
             then->required_runnable_before = 1;
         }
         pe__clear_action_flags(then, pe_action_runnable);
         /* We are relying on the pe_order_one_or_more clause of
          * graph_update_action(), called as part of the:
          *
          *    'if (first == other->action)'
          *
          * block below, to set this back if appropriate
          */
     }
 
     for (lpc = then->actions_before; lpc != NULL; lpc = lpc->next) {
         pe_action_wrapper_t *other = (pe_action_wrapper_t *) lpc->data;
         pe_action_t *first = other->action;
 
         pe_node_t *then_node = then->node;
         pe_node_t *first_node = first->node;
 
         enum pe_action_flags then_flags = 0;
         enum pe_action_flags first_flags = 0;
 
         if (first->rsc && first->rsc->variant == pe_group && pcmk__str_eq(first->task, RSC_START, pcmk__str_casei)) {
             first_node = first->rsc->fns->location(first->rsc, NULL, FALSE);
             if (first_node) {
                 crm_trace("First: Found node %s for %s", first_node->details->uname, first->uuid);
             }
         }
 
         if (then->rsc && then->rsc->variant == pe_group && pcmk__str_eq(then->task, RSC_START, pcmk__str_casei)) {
             then_node = then->rsc->fns->location(then->rsc, NULL, FALSE);
             if (then_node) {
                 crm_trace("Then: Found node %s for %s", then_node->details->uname, then->uuid);
             }
         }
         /* Disable constraint if it only applies when on same node, but isn't */
         if (pcmk_is_set(other->type, pe_order_same_node)
             && (first_node != NULL) && (then_node != NULL)
             && (first_node->details != then_node->details)) {
 
             crm_trace("Disabled constraint %s on %s -> %s on %s",
                        other->action->uuid, first_node->details->uname,
                        then->uuid, then_node->details->uname);
             other->type = pe_order_none;
             continue;
         }
 
         pe__clear_graph_flags(changed, then, pe_graph_updated_first);
 
         if (first->rsc && pcmk_is_set(other->type, pe_order_then_cancels_first)
             && !pcmk_is_set(then->flags, pe_action_optional)) {
 
             /* 'then' is required, so we must abandon 'first'
              * (e.g. a required stop cancels any reload).
              */
             pe__set_action_flags(other->action, pe_action_optional);
             if (!strcmp(first->task, CRMD_ACTION_RELOAD)) {
                 pe__clear_resource_flags(first->rsc, pe_rsc_reload);
             }
         }
 
         if (first->rsc && then->rsc && (first->rsc != then->rsc)
             && (is_parent(then->rsc, first->rsc) == FALSE)) {
             first = rsc_expand_action(first);
         }
         if (first != other->action) {
             crm_trace("Ordering %s after %s instead of %s", then->uuid, first->uuid,
                       other->action->uuid);
         }
 
         first_flags = get_action_flags(first, then_node);
         then_flags = get_action_flags(then, first_node);
 
         crm_trace("Checking %s (%s %s %s) against %s (%s %s %s) filter=0x%.6x type=0x%.6x",
                   then->uuid,
                   pcmk_is_set(then_flags, pe_action_optional)? "optional" : "required",
                   pcmk_is_set(then_flags, pe_action_runnable)? "runnable" : "unrunnable",
                   pcmk_is_set(then_flags, pe_action_pseudo)? "pseudo"
                     : (then->node? then->node->details->uname : ""),
                   first->uuid,
                   pcmk_is_set(first_flags, pe_action_optional)? "optional" : "required",
                   pcmk_is_set(first_flags, pe_action_runnable)? "runnable" : "unrunnable",
                   pcmk_is_set(first_flags, pe_action_pseudo)? "pseudo"
                     : (first->node? first->node->details->uname : ""),
                   first_flags, other->type);
 
         if (first == other->action) {
             /*
              * 'first' was not expanded (e.g. from 'start' to 'running'), which could mean it:
              * - has no associated resource,
              * - was a primitive,
              * - was pre-expanded (e.g. 'running' instead of 'start')
              *
              * The third argument here to graph_update_action() is a node which is used under two conditions:
              * - Interleaving, in which case first->node and
              *   then->node are equal (and NULL)
              * - If 'then' is a clone, to limit the scope of the
              *   constraint to instances on the supplied node
              *
              */
             pe_node_t *node = then->node;
             changed |= graph_update_action(first, then, node, first_flags,
                                            then_flags, other, data_set);
 
             /* 'first' was for a complex resource (clone, group, etc),
              * create a new dependency if necessary
              */
         } else if (order_actions(first, then, other->type)) {
             /* This was the first time 'first' and 'then' were associated,
              * start again to get the new actions_before list
              */
             pe__set_graph_flags(changed, then,
                                 pe_graph_updated_then|pe_graph_disable);
         }
 
         if (changed & pe_graph_disable) {
             crm_trace("Disabled constraint %s -> %s in favor of %s -> %s",
                       other->action->uuid, then->uuid, first->uuid, then->uuid);
             pe__clear_graph_flags(changed, then, pe_graph_disable);
             other->type = pe_order_none;
         }
 
         if (changed & pe_graph_updated_first) {
             GListPtr lpc2 = NULL;
 
             crm_trace("Updated %s (first %s %s %s), processing dependents ",
                       first->uuid,
                       pcmk_is_set(first->flags, pe_action_optional)? "optional" : "required",
                       pcmk_is_set(first->flags, pe_action_runnable)? "runnable" : "unrunnable",
                       pcmk_is_set(first->flags, pe_action_pseudo)? "pseudo"
                         : (first->node? first->node->details->uname : ""));
             for (lpc2 = first->actions_after; lpc2 != NULL; lpc2 = lpc2->next) {
                 pe_action_wrapper_t *other = (pe_action_wrapper_t *) lpc2->data;
 
                 update_action(other->action, data_set);
             }
             update_action(first, data_set);
         }
     }
 
     if (pcmk_is_set(then->flags, pe_action_requires_any)) {
         if (last_flags != then->flags) {
             pe__set_graph_flags(changed, then, pe_graph_updated_then);
         } else {
             pe__clear_graph_flags(changed, then, pe_graph_updated_then);
         }
     }
 
     if (changed & pe_graph_updated_then) {
         crm_trace("Updated %s (then %s %s %s), processing dependents ",
                   then->uuid,
                   pcmk_is_set(then->flags, pe_action_optional)? "optional" : "required",
                   pcmk_is_set(then->flags, pe_action_runnable)? "runnable" : "unrunnable",
                   pcmk_is_set(then->flags, pe_action_pseudo)? "pseudo"
                     : (then->node? then->node->details-> uname : ""));
 
         if (pcmk_is_set(last_flags, pe_action_runnable)
             && !pcmk_is_set(then->flags, pe_action_runnable)) {
             update_colo_start_chain(then, data_set);
         }
         update_action(then, data_set);
         for (lpc = then->actions_after; lpc != NULL; lpc = lpc->next) {
             pe_action_wrapper_t *other = (pe_action_wrapper_t *) lpc->data;
 
             update_action(other->action, data_set);
         }
     }
 
     return FALSE;
 }
 
 gboolean
 shutdown_constraints(pe_node_t * node, pe_action_t * shutdown_op, pe_working_set_t * data_set)
 {
     /* add the stop to the before lists so it counts as a pre-req
      * for the shutdown
      */
     GListPtr lpc = NULL;
 
     for (lpc = data_set->actions; lpc != NULL; lpc = lpc->next) {
         pe_action_t *action = (pe_action_t *) lpc->data;
 
         if (action->rsc == NULL || action->node == NULL) {
             continue;
         } else if (action->node->details != node->details) {
             continue;
         } else if (pcmk_is_set(action->rsc->flags, pe_rsc_maintenance)) {
             pe_rsc_trace(action->rsc, "Skipping %s: maintenance mode", action->uuid);
             continue;
         } else if (node->details->maintenance) {
             pe_rsc_trace(action->rsc, "Skipping %s: node %s is in maintenance mode",
                          action->uuid, node->details->uname);
             continue;
         } else if (!pcmk__str_eq(action->task, RSC_STOP, pcmk__str_casei)) {
             continue;
         } else if (!pcmk_any_flags_set(action->rsc->flags,
                                        pe_rsc_managed|pe_rsc_block)) {
             /*
              * If another action depends on this one, we may still end up blocking
              */
             pe_rsc_trace(action->rsc, "Skipping %s: unmanaged", action->uuid);
             continue;
         }
 
         pe_rsc_trace(action->rsc, "Ordering %s before shutdown on %s", action->uuid,
                      node->details->uname);
         pe__clear_action_flags(action, pe_action_optional);
         custom_action_order(action->rsc, NULL, action,
                             NULL, strdup(CRM_OP_SHUTDOWN), shutdown_op,
                             pe_order_optional | pe_order_runnable_left, data_set);
     }
 
     return TRUE;
 }
 
 /*!
  * \internal
  * \brief Order all actions appropriately relative to a fencing operation
  *
  * Ensure start operations of affected resources are ordered after fencing,
  * imply stop and demote operations of affected resources by marking them as
  * pseudo-actions, etc.
  *
  * \param[in]     stonith_op  Fencing operation
  * \param[in,out] data_set    Working set of cluster
  */
 void
 pcmk__order_vs_fence(pe_action_t *stonith_op, pe_working_set_t *data_set)
 {
     CRM_CHECK(stonith_op && data_set, return);
     for (GList *r = data_set->resources; r != NULL; r = r->next) {
         rsc_stonith_ordering((pe_resource_t *) r->data, stonith_op, data_set);
     }
 }
 
 static pe_node_t *
 get_router_node(pe_action_t *action)
 {
     pe_node_t *began_on = NULL;
     pe_node_t *ended_on = NULL;
     pe_node_t *router_node = NULL;
     bool partial_migration = FALSE;
     const char *task = action->task;
 
     if (pcmk__str_eq(task, CRM_OP_FENCE, pcmk__str_casei)
         || !pe__is_guest_or_remote_node(action->node)) {
         return NULL;
     }
 
     CRM_ASSERT(action->node->details->remote_rsc != NULL);
 
     began_on = pe__current_node(action->node->details->remote_rsc);
     ended_on = action->node->details->remote_rsc->allocated_to;
     if (action->node->details->remote_rsc
         && (action->node->details->remote_rsc->container == NULL)
         && action->node->details->remote_rsc->partial_migration_target) {
         partial_migration = TRUE;
     }
 
     /* if there is only one location to choose from,
      * this is easy. Check for those conditions first */
     if (!began_on || !ended_on) {
         /* remote rsc is either shutting down or starting up */
         return began_on ? began_on : ended_on;
     } else if (began_on->details == ended_on->details) {
         /* remote rsc didn't move nodes. */
         return began_on;
     }
 
     /* If we have get here, we know the remote resource
      * began on one node and is moving to another node.
      *
      * This means some actions will get routed through the cluster
      * node the connection rsc began on, and others are routed through
      * the cluster node the connection rsc ends up on.
      *
      * 1. stop, demote, migrate actions of resources living in the remote
      *    node _MUST_ occur _BEFORE_ the connection can move (these actions
      *    are all required before the remote rsc stop action can occur.) In
      *    this case, we know these actions have to be routed through the initial
      *    cluster node the connection resource lived on before the move takes place.
      *    The exception is a partial migration of a (non-guest) remote
      *    connection resource; in that case, all actions (even these) will be
      *    ordered after the connection's pseudo-start on the migration target,
      *    so the target is the router node.
      *
      * 2. Everything else (start, promote, monitor, probe, refresh, clear failcount
      *    delete ....) must occur after the resource starts on the node it is
      *    moving to.
      */
 
     if (pcmk__str_eq(task, "notify", pcmk__str_casei)) {
         task = g_hash_table_lookup(action->meta, "notify_operation");
     }
 
     /* 1. before connection rsc moves. */
     if (pcmk__strcase_any_of(task, "stop", "demote", "migrate_from", "migrate_to",
                              NULL) && !partial_migration) {
         router_node = began_on;
 
     /* 2. after connection rsc moves. */
     } else {
         router_node = ended_on;
     }
     return router_node;
 }
 
 /*!
  * \internal
  * \brief Add an XML node tag for a specified ID
  *
  * \param[in]     id      Node UUID to add
  * \param[in,out] xml     Parent XML tag to add to
  */
 static xmlNode*
 add_node_to_xml_by_id(const char *id, xmlNode *xml)
 {
     xmlNode *node_xml;
 
     node_xml = create_xml_node(xml, XML_CIB_TAG_NODE);
     crm_xml_add(node_xml, XML_ATTR_UUID, id);
 
     return node_xml;
 }
 
 /*!
  * \internal
  * \brief Add an XML node tag for a specified node
  *
  * \param[in]     node  Node to add
  * \param[in,out] xml   XML to add node to
  */
 static void
 add_node_to_xml(const pe_node_t *node, void *xml)
 {
     add_node_to_xml_by_id(node->details->id, (xmlNode *) xml);
 }
 
 /*!
  * \internal
  * \brief Add XML with nodes that need an update of their maintenance state
  *
  * \param[in,out] xml       Parent XML tag to add to
  * \param[in]     data_set  Working set for cluster
  */
 static int
 add_maintenance_nodes(xmlNode *xml, const pe_working_set_t *data_set)
 {
     GListPtr gIter = NULL;
     xmlNode *maintenance =
         xml?create_xml_node(xml, XML_GRAPH_TAG_MAINTENANCE):NULL;
     int count = 0;
 
     for (gIter = data_set->nodes; gIter != NULL;
          gIter = gIter->next) {
         pe_node_t *node = (pe_node_t *) gIter->data;
         struct pe_node_shared_s *details = node->details;
 
         if (!pe__is_guest_or_remote_node(node)) {
             continue; /* just remote nodes need to know atm */
         }
 
         if (details->maintenance != details->remote_maintenance) {
             if (maintenance) {
                 crm_xml_add(
                     add_node_to_xml_by_id(node->details->id, maintenance),
                     XML_NODE_IS_MAINTENANCE, details->maintenance?"1":"0");
             }
             count++;
         }
     }
     crm_trace("%s %d nodes to adjust maintenance-mode "
               "to transition", maintenance?"Added":"Counted", count);
     return count;
 }
 
 /*!
  * \internal
  * \brief Add pseudo action with nodes needing maintenance state update
  *
  * \param[in,out] data_set  Working set for cluster
  */
 void
 add_maintenance_update(pe_working_set_t *data_set)
 {
     pe_action_t *action = NULL;
 
     if (add_maintenance_nodes(NULL, data_set)) {
         crm_trace("adding maintenance state update pseudo action");
         action = get_pseudo_op(CRM_OP_MAINTENANCE_NODES, data_set);
         pe__set_action_flags(action, pe_action_print_always);
     }
 }
 
 /*!
  * \internal
  * \brief Add XML with nodes that an action is expected to bring down
  *
  * If a specified action is expected to bring any nodes down, add an XML block
  * with their UUIDs. When a node is lost, this allows the controller to
  * determine whether it was expected.
  *
  * \param[in,out] xml       Parent XML tag to add to
  * \param[in]     action    Action to check for downed nodes
  * \param[in]     data_set  Working set for cluster
  */
 static void
 add_downed_nodes(xmlNode *xml, const pe_action_t *action,
                  const pe_working_set_t *data_set)
 {
     CRM_CHECK(xml && action && action->node && data_set, return);
 
     if (pcmk__str_eq(action->task, CRM_OP_SHUTDOWN, pcmk__str_casei)) {
 
         /* Shutdown makes the action's node down */
         xmlNode *downed = create_xml_node(xml, XML_GRAPH_TAG_DOWNED);
         add_node_to_xml_by_id(action->node->details->id, downed);
 
     } else if (pcmk__str_eq(action->task, CRM_OP_FENCE, pcmk__str_casei)) {
 
         /* Fencing makes the action's node and any hosted guest nodes down */
         const char *fence = g_hash_table_lookup(action->meta, "stonith_action");
 
         if (pcmk__strcase_any_of(fence, "off", "reboot", NULL)) {
             xmlNode *downed = create_xml_node(xml, XML_GRAPH_TAG_DOWNED);
             add_node_to_xml_by_id(action->node->details->id, downed);
             pe_foreach_guest_node(data_set, action->node, add_node_to_xml, downed);
         }
 
     } else if (action->rsc && action->rsc->is_remote_node
                && pcmk__str_eq(action->task, CRMD_ACTION_STOP, pcmk__str_casei)) {
 
         /* Stopping a remote connection resource makes connected node down,
          * unless it's part of a migration
          */
         GListPtr iter;
         pe_action_t *input;
         gboolean migrating = FALSE;
 
         for (iter = action->actions_before; iter != NULL; iter = iter->next) {
             input = ((pe_action_wrapper_t *) iter->data)->action;
             if (input->rsc && pcmk__str_eq(action->rsc->id, input->rsc->id, pcmk__str_casei)
                 && pcmk__str_eq(input->task, CRMD_ACTION_MIGRATED, pcmk__str_casei)) {
                 migrating = TRUE;
                 break;
             }
         }
         if (!migrating) {
             xmlNode *downed = create_xml_node(xml, XML_GRAPH_TAG_DOWNED);
             add_node_to_xml_by_id(action->rsc->id, downed);
         }
     }
 }
 
 static bool
 should_lock_action(pe_action_t *action)
 {
     // Only actions taking place on resource's lock node are locked
     if ((action->rsc->lock_node == NULL) || (action->node == NULL)
         || (action->node->details != action->rsc->lock_node->details)) {
         return false;
     }
 
     /* During shutdown, only stops are locked (otherwise, another action such as
      * a demote would cause the controller to clear the lock)
      */
     if (action->node->details->shutdown && action->task
         && strcmp(action->task, RSC_STOP)) {
         return false;
     }
 
     return true;
 }
 
 static xmlNode *
 action2xml(pe_action_t * action, gboolean as_input, pe_working_set_t *data_set)
 {
     gboolean needs_node_info = TRUE;
     gboolean needs_maintenance_info = FALSE;
     xmlNode *action_xml = NULL;
     xmlNode *args_xml = NULL;
 #if ENABLE_VERSIONED_ATTRS
     pe_rsc_action_details_t *rsc_details = NULL;
 #endif
 
     if (action == NULL) {
         return NULL;
     }
 
     if (pcmk__str_eq(action->task, CRM_OP_FENCE, pcmk__str_casei)) {
         /* All fences need node info; guest node fences are pseudo-events */
         action_xml = create_xml_node(NULL,
                                      pcmk_is_set(action->flags, pe_action_pseudo)?
                                      XML_GRAPH_TAG_PSEUDO_EVENT :
                                      XML_GRAPH_TAG_CRM_EVENT);
 
     } else if (pcmk__str_eq(action->task, CRM_OP_SHUTDOWN, pcmk__str_casei)) {
         action_xml = create_xml_node(NULL, XML_GRAPH_TAG_CRM_EVENT);
 
     } else if (pcmk__str_eq(action->task, CRM_OP_CLEAR_FAILCOUNT, pcmk__str_casei)) {
         action_xml = create_xml_node(NULL, XML_GRAPH_TAG_CRM_EVENT);
 
     } else if (pcmk__str_eq(action->task, CRM_OP_LRM_REFRESH, pcmk__str_casei)) {
         action_xml = create_xml_node(NULL, XML_GRAPH_TAG_CRM_EVENT);
 
     } else if (pcmk__str_eq(action->task, CRM_OP_LRM_DELETE, pcmk__str_casei)) {
         // CIB-only clean-up for shutdown locks
         action_xml = create_xml_node(NULL, XML_GRAPH_TAG_CRM_EVENT);
         crm_xml_add(action_xml, PCMK__XA_MODE, XML_TAG_CIB);
 
 /* 	} else if(pcmk__str_eq(action->task, RSC_PROBED, pcmk__str_casei)) { */
 /* 		action_xml = create_xml_node(NULL, XML_GRAPH_TAG_CRM_EVENT); */
 
     } else if (pcmk_is_set(action->flags, pe_action_pseudo)) {
         if (pcmk__str_eq(action->task, CRM_OP_MAINTENANCE_NODES, pcmk__str_casei)) {
             needs_maintenance_info = TRUE;
         }
         action_xml = create_xml_node(NULL, XML_GRAPH_TAG_PSEUDO_EVENT);
         needs_node_info = FALSE;
 
     } else {
         action_xml = create_xml_node(NULL, XML_GRAPH_TAG_RSC_OP);
 
 #if ENABLE_VERSIONED_ATTRS
         rsc_details = pe_rsc_action_details(action);
 #endif
     }
 
     crm_xml_add_int(action_xml, XML_ATTR_ID, action->id);
     crm_xml_add(action_xml, XML_LRM_ATTR_TASK, action->task);
     if (action->rsc != NULL && action->rsc->clone_name != NULL) {
         char *clone_key = NULL;
         guint interval_ms;
 
         if (pcmk__guint_from_hash(action->meta,
                                   XML_LRM_ATTR_INTERVAL_MS, 0,
                                   &interval_ms) != pcmk_rc_ok) {
             interval_ms = 0;
         }
 
         if (pcmk__str_eq(action->task, RSC_NOTIFY, pcmk__str_casei)) {
             const char *n_type = g_hash_table_lookup(action->meta, "notify_type");
             const char *n_task = g_hash_table_lookup(action->meta, "notify_operation");
 
             CRM_CHECK(n_type != NULL, crm_err("No notify type value found for %s", action->uuid));
             CRM_CHECK(n_task != NULL,
                       crm_err("No notify operation value found for %s", action->uuid));
             clone_key = pcmk__notify_key(action->rsc->clone_name,
                                          n_type, n_task);
 
         } else if(action->cancel_task) {
             clone_key = pcmk__op_key(action->rsc->clone_name,
                                      action->cancel_task, interval_ms);
         } else {
             clone_key = pcmk__op_key(action->rsc->clone_name,
                                      action->task, interval_ms);
         }
 
         CRM_CHECK(clone_key != NULL, crm_err("Could not generate a key for %s", action->uuid));
         crm_xml_add(action_xml, XML_LRM_ATTR_TASK_KEY, clone_key);
         crm_xml_add(action_xml, "internal_" XML_LRM_ATTR_TASK_KEY, action->uuid);
         free(clone_key);
 
     } else {
         crm_xml_add(action_xml, XML_LRM_ATTR_TASK_KEY, action->uuid);
     }
 
     if (needs_node_info && action->node != NULL) {
         pe_node_t *router_node = get_router_node(action);
 
         crm_xml_add(action_xml, XML_LRM_ATTR_TARGET, action->node->details->uname);
         crm_xml_add(action_xml, XML_LRM_ATTR_TARGET_UUID, action->node->details->id);
         if (router_node) {
             crm_xml_add(action_xml, XML_LRM_ATTR_ROUTER_NODE, router_node->details->uname);
         }
 
         g_hash_table_insert(action->meta, strdup(XML_LRM_ATTR_TARGET), strdup(action->node->details->uname));
         g_hash_table_insert(action->meta, strdup(XML_LRM_ATTR_TARGET_UUID), strdup(action->node->details->id));
     }
 
     /* No details if this action is only being listed in the inputs section */
     if (as_input) {
         return action_xml;
     }
 
     if (action->rsc && !pcmk_is_set(action->flags, pe_action_pseudo)) {
         int lpc = 0;
         xmlNode *rsc_xml = NULL;
         const char *attr_list[] = {
             XML_AGENT_ATTR_CLASS,
             XML_AGENT_ATTR_PROVIDER,
             XML_ATTR_TYPE
         };
 
         /* If a resource is locked to a node via shutdown-lock, mark its actions
          * so the controller can preserve the lock when the action completes.
          */
         if (should_lock_action(action)) {
             crm_xml_add_ll(action_xml, XML_CONFIG_ATTR_SHUTDOWN_LOCK,
                            (long long) action->rsc->lock_time);
         }
 
         // List affected resource
 
         rsc_xml = create_xml_node(action_xml,
                                   crm_element_name(action->rsc->xml));
         if (pcmk_is_set(action->rsc->flags, pe_rsc_orphan)
             && action->rsc->clone_name) {
             /* Do not use the 'instance free' name here as that
              * might interfere with the instance we plan to keep.
              * Ie. if there are more than two named /anonymous/
              * instances on a given node, we need to make sure the
              * command goes to the right one.
              *
              * Keep this block, even when everyone is using
              * 'instance free' anonymous clone names - it means
              * we'll do the right thing if anyone toggles the
              * unique flag to 'off'
              */
             crm_debug("Using orphan clone name %s instead of %s", action->rsc->id,
                       action->rsc->clone_name);
             crm_xml_add(rsc_xml, XML_ATTR_ID, action->rsc->clone_name);
             crm_xml_add(rsc_xml, XML_ATTR_ID_LONG, action->rsc->id);
 
         } else if (!pcmk_is_set(action->rsc->flags, pe_rsc_unique)) {
             const char *xml_id = ID(action->rsc->xml);
 
             crm_debug("Using anonymous clone name %s for %s (aka. %s)", xml_id, action->rsc->id,
                       action->rsc->clone_name);
 
             /* ID is what we'd like client to use
              * ID_LONG is what they might know it as instead
              *
              * ID_LONG is only strictly needed /here/ during the
              * transition period until all nodes in the cluster
              * are running the new software /and/ have rebooted
              * once (meaning that they've only ever spoken to a DC
              * supporting this feature).
              *
              * If anyone toggles the unique flag to 'on', the
              * 'instance free' name will correspond to an orphan
              * and fall into the clause above instead
              */
             crm_xml_add(rsc_xml, XML_ATTR_ID, xml_id);
             if (action->rsc->clone_name && !pcmk__str_eq(xml_id, action->rsc->clone_name, pcmk__str_casei)) {
                 crm_xml_add(rsc_xml, XML_ATTR_ID_LONG, action->rsc->clone_name);
             } else {
                 crm_xml_add(rsc_xml, XML_ATTR_ID_LONG, action->rsc->id);
             }
 
         } else {
             CRM_ASSERT(action->rsc->clone_name == NULL);
             crm_xml_add(rsc_xml, XML_ATTR_ID, action->rsc->id);
         }
 
         for (lpc = 0; lpc < DIMOF(attr_list); lpc++) {
             crm_xml_add(rsc_xml, attr_list[lpc],
                         g_hash_table_lookup(action->rsc->meta, attr_list[lpc]));
         }
     }
 
     /* List any attributes in effect */
     args_xml = create_xml_node(NULL, XML_TAG_ATTRS);
     crm_xml_add(args_xml, XML_ATTR_CRM_VERSION, CRM_FEATURE_SET);
 
     g_hash_table_foreach(action->extra, hash2field, args_xml);
     if (action->rsc != NULL && action->node) {
-        GHashTable *p = crm_str_table_new();
+        // Get the resource instance attributes, evaluated properly for node
+        GHashTable *params = pe_rsc_params(action->rsc, action->node, data_set);
 
-        get_rsc_attributes(p, action->rsc, action->node, data_set);
-        g_hash_table_foreach(p, hash2smartfield, args_xml);
-        g_hash_table_destroy(p);
+        /* REMOTE_CONTAINER_HACK: If this is a remote connection resource with
+         * addr="#uname", pull the actual value from the parameters evaluated
+         * without a node (which was put there earlier in stage8() when the
+         * bundle's expand() method was called).
+         */
+        const char *remote_addr = g_hash_table_lookup(params,
+                                                      XML_RSC_ATTR_REMOTE_RA_ADDR);
+
+        if (pcmk__str_eq(remote_addr, "#uname", pcmk__str_none)) {
+            GHashTable *base = pe_rsc_params(action->rsc, NULL, data_set);
+
+            remote_addr = g_hash_table_lookup(base,
+                                              XML_RSC_ATTR_REMOTE_RA_ADDR);
+            if (remote_addr != NULL) {
+                g_hash_table_insert(params, strdup(XML_RSC_ATTR_REMOTE_RA_ADDR),
+                                    strdup(remote_addr));
+            }
+        }
+
+        g_hash_table_foreach(params, hash2smartfield, args_xml);
 
 #if ENABLE_VERSIONED_ATTRS
         {
             xmlNode *versioned_parameters = create_xml_node(NULL, XML_TAG_RSC_VER_ATTRS);
 
             pe_get_versioned_attributes(versioned_parameters, action->rsc,
                                         action->node, data_set);
             if (xml_has_children(versioned_parameters)) {
                 add_node_copy(action_xml, versioned_parameters);
             }
             free_xml(versioned_parameters);
         }
 #endif
 
     } else if(action->rsc && action->rsc->variant <= pe_native) {
-        g_hash_table_foreach(action->rsc->parameters, hash2smartfield, args_xml);
+        GHashTable *params = pe_rsc_params(action->rsc, NULL, data_set);
+
+        g_hash_table_foreach(params, hash2smartfield, args_xml);
 
 #if ENABLE_VERSIONED_ATTRS
         if (xml_has_children(action->rsc->versioned_parameters)) {
             add_node_copy(action_xml, action->rsc->versioned_parameters);
         }
 #endif
     }
 
 #if ENABLE_VERSIONED_ATTRS
     if (rsc_details) {
         if (xml_has_children(rsc_details->versioned_parameters)) {
             add_node_copy(action_xml, rsc_details->versioned_parameters);
         }
 
         if (xml_has_children(rsc_details->versioned_meta)) {
             add_node_copy(action_xml, rsc_details->versioned_meta);
         }
     }
 #endif
 
     g_hash_table_foreach(action->meta, hash2metafield, args_xml);
     if (action->rsc != NULL) {
         const char *value = g_hash_table_lookup(action->rsc->meta, "external-ip");
         pe_resource_t *parent = action->rsc;
 
         while (parent != NULL) {
             parent->cmds->append_meta(parent, args_xml);
             parent = parent->parent;
         }
 
         if(value) {
             hash2smartfield((gpointer)"pcmk_external_ip", (gpointer)value, (gpointer)args_xml);
         }
 
         if (action->node && /* make clang analyzer happy */
             pe__is_guest_node(action->node)) {
             pe_node_t *host = NULL;
             enum action_tasks task = text2task(action->task);
 
             if(task == action_notify || task == action_notified) {
                 const char *n_task = g_hash_table_lookup(action->meta, "notify_operation");
                 task = text2task(n_task);
             }
 
             // Differentiate between up and down actions
             switch (task) {
                 case stop_rsc:
                 case stopped_rsc:
                 case action_demote:
                 case action_demoted:
                     host = pe__current_node(action->node->details->remote_rsc->container);
                     break;
                 case start_rsc:
                 case started_rsc:
                 case monitor_rsc:
                 case action_promote:
                 case action_promoted:
                     host = action->node->details->remote_rsc->container->allocated_to;
                     break;
                 default:
                     break;
             }
 
             if(host) {
                 hash2metafield((gpointer)XML_RSC_ATTR_TARGET,
                                (gpointer)g_hash_table_lookup(action->rsc->meta, XML_RSC_ATTR_TARGET), (gpointer)args_xml);
                 hash2metafield((gpointer) PCMK__ENV_PHYSICAL_HOST,
                                (gpointer)host->details->uname,
                                (gpointer)args_xml);
             }
         }
 
     } else if (pcmk__str_eq(action->task, CRM_OP_FENCE, pcmk__str_casei) && action->node) {
         /* Pass the node's attributes as meta-attributes.
          *
          * @TODO: Determine whether it is still necessary to do this. It was
          * added in 33d99707, probably for the libfence-based implementation in
          * c9a90bd, which is no longer used.
          */
         g_hash_table_foreach(action->node->details->attrs, hash2metafield, args_xml);
     }
 
     sorted_xml(args_xml, action_xml, FALSE);
     free_xml(args_xml);
 
     /* List any nodes this action is expected to make down */
     if (needs_node_info && (action->node != NULL)) {
         add_downed_nodes(action_xml, action, data_set);
     }
 
     if (needs_maintenance_info) {
         add_maintenance_nodes(action_xml, data_set);
     }
 
     crm_log_xml_trace(action_xml, "dumped action");
     return action_xml;
 }
 
 static bool
 should_dump_action(pe_action_t *action)
 {
     CRM_CHECK(action != NULL, return false);
 
     if (pcmk_is_set(action->flags, pe_action_dumped)) {
         crm_trace("Action %s (%d) already dumped", action->uuid, action->id);
         return false;
 
     } else if (pcmk_is_set(action->flags, pe_action_pseudo)
                && pcmk__str_eq(action->task, CRM_OP_PROBED, pcmk__str_casei)) {
         GListPtr lpc = NULL;
 
         /* This is a horrible but convenient hack
          *
          * It mimimizes the number of actions with unsatisfied inputs
          * (i.e. not included in the graph)
          *
          * This in turn, means we can be more concise when printing
          * aborted/incomplete graphs.
          *
          * It also makes it obvious which node is preventing
          * probe_complete from running (presumably because it is only
          * partially up)
          *
          * For these reasons we tolerate such perversions
          */
 
         for (lpc = action->actions_after; lpc != NULL; lpc = lpc->next) {
             pe_action_wrapper_t *wrapper = (pe_action_wrapper_t *) lpc->data;
 
             if (!pcmk_is_set(wrapper->action->flags, pe_action_runnable)) {
                 /* Only interested in runnable operations */
             } else if (!pcmk__str_eq(wrapper->action->task, RSC_START, pcmk__str_casei)) {
                 /* Only interested in start operations */
             } else if (pcmk_is_set(wrapper->action->flags, pe_action_dumped)
                        || should_dump_action(wrapper->action)) {
                 crm_trace("Action %s (%d) should be dumped: "
                           "dependency of %s (%d)",
                           action->uuid, action->id,
                           wrapper->action->uuid, wrapper->action->id);
                 return true;
             }
         }
     }
 
     if (!pcmk_is_set(action->flags, pe_action_runnable)) {
         crm_trace("Ignoring action %s (%d): unrunnable",
                   action->uuid, action->id);
         return false;
 
     } else if (pcmk_is_set(action->flags, pe_action_optional)
                && !pcmk_is_set(action->flags, pe_action_print_always)) {
         crm_trace("Ignoring action %s (%d): optional",
                   action->uuid, action->id);
         return false;
 
     // Monitors should be dumped even for unmanaged resources
     } else if (action->rsc && !pcmk_is_set(action->rsc->flags, pe_rsc_managed)
                && !pcmk__str_eq(action->task, RSC_STATUS, pcmk__str_casei)) {
 
         const char *interval_ms_s = g_hash_table_lookup(action->meta,
                                                         XML_LRM_ATTR_INTERVAL_MS);
 
         // Cancellation of recurring monitors should still be dumped
         if (pcmk__str_eq(interval_ms_s, "0", pcmk__str_null_matches)) {
             crm_trace("Ignoring action %s (%d): for unmanaged resource (%s)",
                       action->uuid, action->id, action->rsc->id);
             return false;
         }
     }
 
     if (pcmk_is_set(action->flags, pe_action_pseudo) ||
         pcmk__strcase_any_of(action->task, CRM_OP_FENCE, CRM_OP_SHUTDOWN, NULL)) {
         /* skip the next checks */
         return true;
     }
 
     if (action->node == NULL) {
         pe_err("Skipping action %s (%d) "
                "because it was not allocated to a node (bug?)",
                action->uuid, action->id);
         log_action(LOG_DEBUG, "Unallocated action", action, false);
         return false;
 
     } else if (pcmk_is_set(action->flags, pe_action_dc)) {
         crm_trace("Action %s (%d) should be dumped: "
                   "can run on DC instead of %s",
                   action->uuid, action->id, action->node->details->uname);
 
     } else if (pe__is_guest_node(action->node)
                && !action->node->details->remote_requires_reset) {
         crm_trace("Action %s (%d) should be dumped: "
                   "assuming will be runnable on guest node %s",
                   action->uuid, action->id, action->node->details->uname);
 
     } else if (action->node->details->online == false) {
         pe_err("Skipping action %s (%d) "
                "because it was scheduled for offline node (bug?)",
                action->uuid, action->id);
         log_action(LOG_DEBUG, "Action for offline node", action, FALSE);
         return false;
 #if 0
         /* but this would also affect resources that can be safely
          *  migrated before a fencing op
          */
     } else if (action->node->details->unclean == false) {
         pe_err("Skipping action %s (%d) "
                "because it was scheduled for unclean node (bug?)",
                action->uuid, action->id);
         log_action(LOG_DEBUG, "Action for unclean node", action, false);
         return false;
 #endif
     }
     return true;
 }
 
 /* lowest to highest */
 static gint
 sort_action_id(gconstpointer a, gconstpointer b)
 {
     const pe_action_wrapper_t *action_wrapper2 = (const pe_action_wrapper_t *)a;
     const pe_action_wrapper_t *action_wrapper1 = (const pe_action_wrapper_t *)b;
 
     if (a == NULL) {
         return 1;
     }
     if (b == NULL) {
         return -1;
     }
 
     if (action_wrapper1->action->id > action_wrapper2->action->id) {
         return -1;
     }
 
     if (action_wrapper1->action->id < action_wrapper2->action->id) {
         return 1;
     }
     return 0;
 }
 
 /*!
  * \internal
  * \brief Check whether an action input should be in the transition graph
  *
  * \param[in]     action  Action to check
  * \param[in,out] input   Action input to check
  *
  * \return true if input should be in graph, false otherwise
  * \note This function may not only check an input, but disable it under certian
  *       circumstances (load or anti-colocation orderings that are not needed).
  */
 static bool
 check_dump_input(pe_action_t *action, pe_action_wrapper_t *input)
 {
     int type = input->type;
 
     if (input->state == pe_link_dumped) {
         return true;
     }
 
     pe__clear_order_flags(type, pe_order_implies_first_printed
                                 |pe_order_implies_then_printed
                                 |pe_order_optional);
 
     if (input->type == pe_order_none) {
         crm_trace("Ignoring %s (%d) input %s (%d): "
                   "ordering disabled",
                   action->uuid, action->id,
                   input->action->uuid, input->action->id);
         return false;
 
     } else if (!pcmk_is_set(input->action->flags, pe_action_runnable)
                && (type == pe_order_none)
                && !pcmk__str_eq(input->action->uuid, CRM_OP_PROBED, pcmk__str_casei)) {
         crm_trace("Ignoring %s (%d) input %s (%d): "
                   "optional and input unrunnable",
                   action->uuid, action->id,
                   input->action->uuid, input->action->id);
         return false;
 
     } else if (!pcmk_is_set(input->action->flags, pe_action_runnable)
                && pcmk_is_set(input->type, pe_order_one_or_more)) {
         crm_trace("Ignoring %s (%d) input %s (%d): "
                   "one-or-more and input unrunnable",
                   action->uuid, action->id,
                   input->action->uuid, input->action->id);
         return false;
 
     } else if (pcmk_is_set(action->flags, pe_action_pseudo)
                && pcmk_is_set(input->type, pe_order_stonith_stop)) {
         crm_trace("Ignoring %s (%d) input %s (%d): "
                   "stonith stop but action is pseudo",
                   action->uuid, action->id,
                   input->action->uuid, input->action->id);
         return false;
 
     } else if (pcmk_is_set(input->type, pe_order_implies_first_migratable)
                && !pcmk_is_set(input->action->flags, pe_action_runnable)) {
         crm_trace("Ignoring %s (%d) input %s (%d): "
                   "implies input migratable but input unrunnable",
                   action->uuid, action->id,
                   input->action->uuid, input->action->id);
         return false;
 
     } else if (pcmk_is_set(input->type, pe_order_apply_first_non_migratable)
                && pcmk_is_set(input->action->flags, pe_action_migrate_runnable)) {
         crm_trace("Ignoring %s (%d) input %s (%d): "
                   "only if input unmigratable but input unrunnable",
                   action->uuid, action->id,
                   input->action->uuid, input->action->id);
         return false;
 
     } else if ((input->type == pe_order_optional)
                && pcmk_is_set(input->action->flags, pe_action_migrate_runnable)
                && pcmk__ends_with(input->action->uuid, "_stop_0")) {
         crm_trace("Ignoring %s (%d) input %s (%d): "
                   "optional but stop in migration",
                   action->uuid, action->id,
                   input->action->uuid, input->action->id);
         return false;
 
     } else if (input->type == pe_order_load) {
         pe_node_t *input_node = input->action->node;
 
         // load orderings are relevant only if actions are for same node
 
         if (action->rsc && pcmk__str_eq(action->task, RSC_MIGRATE, pcmk__str_casei)) {
             pe_node_t *allocated = action->rsc->allocated_to;
 
             /* For load_stopped -> migrate_to orderings, we care about where it
              * has been allocated to, not where it will be executed.
              */
             if ((input_node == NULL) || (allocated == NULL)
                 || (input_node->details != allocated->details)) {
                 crm_trace("Ignoring %s (%d) input %s (%d): "
                           "load ordering node mismatch %s vs %s",
                           action->uuid, action->id,
                           input->action->uuid, input->action->id,
                           (allocated? allocated->details->uname : "<none>"),
                           (input_node? input_node->details->uname : "<none>"));
                 input->type = pe_order_none;
                 return false;
             }
 
         } else if ((input_node == NULL) || (action->node == NULL)
                    || (input_node->details != action->node->details)) {
             crm_trace("Ignoring %s (%d) input %s (%d): "
                       "load ordering node mismatch %s vs %s",
                       action->uuid, action->id,
                       input->action->uuid, input->action->id,
                       (action->node? action->node->details->uname : "<none>"),
                       (input_node? input_node->details->uname : "<none>"));
             input->type = pe_order_none;
             return false;
 
         } else if (pcmk_is_set(input->action->flags, pe_action_optional)) {
             crm_trace("Ignoring %s (%d) input %s (%d): "
                       "load ordering input optional",
                       action->uuid, action->id,
                       input->action->uuid, input->action->id);
             input->type = pe_order_none;
             return false;
         }
 
     } else if (input->type == pe_order_anti_colocation) {
         if (input->action->node && action->node
             && (input->action->node->details != action->node->details)) {
             crm_trace("Ignoring %s (%d) input %s (%d): "
                       "anti-colocation node mismatch %s vs %s",
                       action->uuid, action->id,
                       input->action->uuid, input->action->id,
                       action->node->details->uname,
                       input->action->node->details->uname);
             input->type = pe_order_none;
             return false;
 
         } else if (pcmk_is_set(input->action->flags, pe_action_optional)) {
             crm_trace("Ignoring %s (%d) input %s (%d): "
                       "anti-colocation input optional",
                       action->uuid, action->id,
                       input->action->uuid, input->action->id);
             input->type = pe_order_none;
             return false;
         }
 
     } else if (input->action->rsc
                && input->action->rsc != action->rsc
                && pcmk_is_set(input->action->rsc->flags, pe_rsc_failed)
                && !pcmk_is_set(input->action->rsc->flags, pe_rsc_managed)
                && pcmk__ends_with(input->action->uuid, "_stop_0")
                && action->rsc && pe_rsc_is_clone(action->rsc)) {
         crm_warn("Ignoring requirement that %s complete before %s:"
                  " unmanaged failed resources cannot prevent clone shutdown",
                  input->action->uuid, action->uuid);
         return false;
 
     } else if (pcmk_is_set(input->action->flags, pe_action_optional)
                && !pcmk_any_flags_set(input->action->flags,
                                       pe_action_print_always|pe_action_dumped)
                && !should_dump_action(input->action)) {
         crm_trace("Ignoring %s (%d) input %s (%d): "
                   "input optional",
                   action->uuid, action->id,
                   input->action->uuid, input->action->id);
         return false;
     }
 
     crm_trace("%s (%d) input %s (%d) @ %s should be dumped: %s, %s, %s, 0x%.6x",
               action->uuid, action->id,
               input->action->uuid, input->action->id,
               input->action->node? input->action->node->details->uname : "no node",
               pcmk_is_set(input->action->flags, pe_action_pseudo)? "pseudo" : "real",
               pcmk_is_set(input->action->flags, pe_action_runnable)? "runnable" : "unrunnable",
               pcmk_is_set(input->action->flags, pe_action_optional)? "optional" : "required",
               input->type);
     return true;
 }
 
 static bool
 graph_has_loop(pe_action_t *init_action, pe_action_t *action,
                pe_action_wrapper_t *input)
 {
     bool has_loop = false;
 
     if (pcmk_is_set(input->action->flags, pe_action_tracking)) {
         crm_trace("Breaking tracking loop: %s@%s -> %s@%s (0x%.6x)",
                   input->action->uuid,
                   input->action->node? input->action->node->details->uname : "",
                   action->uuid,
                   action->node? action->node->details->uname : "",
                   input->type);
         return false;
     }
 
     // Don't need to check inputs that won't be used
     if (!check_dump_input(action, input)) {
         return false;
     }
 
     if (input->action == init_action) {
         crm_debug("Input loop found in %s@%s ->...-> %s@%s",
                   action->uuid,
                   action->node? action->node->details->uname : "",
                   init_action->uuid,
                   init_action->node? init_action->node->details->uname : "");
         return true;
     }
 
     pe__set_action_flags(input->action, pe_action_tracking);
 
     crm_trace("Checking inputs of action %s@%s input %s@%s (0x%.6x)"
               "for graph loop with %s@%s ",
               action->uuid,
               action->node? action->node->details->uname : "",
               input->action->uuid,
               input->action->node? input->action->node->details->uname : "",
               input->type,
               init_action->uuid,
               init_action->node? init_action->node->details->uname : "");
 
     // Recursively check input itself for loops
     for (GList *iter = input->action->actions_before;
          iter != NULL; iter = iter->next) {
 
         if (graph_has_loop(init_action, input->action,
                            (pe_action_wrapper_t *) iter->data)) {
             // Recursive call already logged a debug message
             has_loop = true;
             goto done;
         }
     }
 
 done:
     pe__clear_action_flags(input->action, pe_action_tracking);
 
     if (!has_loop) {
         crm_trace("No input loop found in %s@%s -> %s@%s (0x%.6x)",
                   input->action->uuid,
                   input->action->node? input->action->node->details->uname : "",
                   action->uuid,
                   action->node? action->node->details->uname : "",
                   input->type);
     }
     return has_loop;
 }
 
 bool
 pcmk__ordering_is_invalid(pe_action_t *action, pe_action_wrapper_t *input)
 {
     /* Prevent user-defined ordering constraints between resources
      * running in a guest node and the resource that defines that node.
      */
     if (!pcmk_is_set(input->type, pe_order_preserve)
         && action->rsc && action->rsc->fillers
         && input->action->rsc && input->action->node
         && input->action->node->details->remote_rsc
         && (input->action->node->details->remote_rsc->container == action->rsc)) {
         crm_warn("Invalid ordering constraint between %s and %s",
                  input->action->rsc->id, action->rsc->id);
         return true;
     }
 
     /* If there's an order like
      * "rscB_stop node2"-> "load_stopped_node2" -> "rscA_migrate_to node1"
      *
      * then rscA is being migrated from node1 to node2, while rscB is being
      * migrated from node2 to node1. If there would be a graph loop,
      * break the order "load_stopped_node2" -> "rscA_migrate_to node1".
      */
     if ((input->type == pe_order_load) && action->rsc
         && pcmk__str_eq(action->task, RSC_MIGRATE, pcmk__str_casei)
         && graph_has_loop(action, action, input)) {
         return true;
     }
 
     return false;
 }
 
 // Remove duplicate inputs (regardless of flags)
 static void
 deduplicate_inputs(pe_action_t *action)
 {
     GList *item = NULL;
     GList *next = NULL;
     pe_action_wrapper_t *last_input = NULL;
 
     action->actions_before = g_list_sort(action->actions_before,
                                          sort_action_id);
     for (item = action->actions_before; item != NULL; item = next) {
         pe_action_wrapper_t *input = (pe_action_wrapper_t *) item->data;
 
         next = item->next;
         if (last_input && (input->action->id == last_input->action->id)) {
             crm_trace("Input %s (%d) duplicate skipped for action %s (%d)",
                       input->action->uuid, input->action->id,
                       action->uuid, action->id);
 
             /* For the purposes of scheduling, the ordering flags no longer
              * matter, but crm_simulate looks at certain ones when creating a
              * dot graph. Combining the flags is sufficient for that purpose.
              */
             last_input->type |= input->type;
             if (input->state == pe_link_dumped) {
                 last_input->state = pe_link_dumped;
             }
 
             free(item->data);
             action->actions_before = g_list_delete_link(action->actions_before,
                                                         item);
         } else {
             last_input = input;
             input->state = pe_link_not_dumped;
         }
     }
 }
 
 /*!
  * \internal
  * \brief Add an action to the transition graph XML if appropriate
  *
  * \param[in] action    Action to possibly add
  * \param[in] data_set  Cluster working set
  *
  * \note This will de-duplicate the action inputs, meaning that the
  *       pe_action_wrapper_t:type flags can no longer be relied on to retain
  *       their original settings. That means this MUST be called after stage7()
  *       is complete, and nothing after this should rely on those type flags.
  *       (For example, some code looks for type equal to some flag rather than
  *       whether the flag is set, and some code looks for particular
  *       combinations of flags -- such code must be done before stage8().)
  */
 void
 graph_element_from_action(pe_action_t *action, pe_working_set_t *data_set)
 {
     GList *lpc = NULL;
     int synapse_priority = 0;
     xmlNode *syn = NULL;
     xmlNode *set = NULL;
     xmlNode *in = NULL;
     xmlNode *xml_action = NULL;
     pe_action_wrapper_t *input = NULL;
 
     /* If we haven't already, de-duplicate inputs -- even if we won't be dumping
      * the action, so that crm_simulate dot graphs don't have duplicates.
      */
     if (!pcmk_is_set(action->flags, pe_action_dedup)) {
         deduplicate_inputs(action);
         pe__set_action_flags(action, pe_action_dedup);
     }
 
     if (should_dump_action(action) == FALSE) {
         return;
     }
 
     pe__set_action_flags(action, pe_action_dumped);
 
     syn = create_xml_node(data_set->graph, "synapse");
     set = create_xml_node(syn, "action_set");
     in = create_xml_node(syn, "inputs");
 
     crm_xml_add_int(syn, XML_ATTR_ID, data_set->num_synapse);
     data_set->num_synapse++;
 
     if (action->rsc != NULL) {
         synapse_priority = action->rsc->priority;
     }
     if (action->priority > synapse_priority) {
         synapse_priority = action->priority;
     }
     if (synapse_priority > 0) {
         crm_xml_add_int(syn, XML_CIB_ATTR_PRIORITY, synapse_priority);
     }
 
     xml_action = action2xml(action, FALSE, data_set);
     add_node_nocopy(set, crm_element_name(xml_action), xml_action);
 
     for (lpc = action->actions_before; lpc != NULL; lpc = lpc->next) {
         input = (pe_action_wrapper_t *) lpc->data;
         if (check_dump_input(action, input)) {
             xmlNode *input_xml = create_xml_node(in, "trigger");
 
             input->state = pe_link_dumped;
             xml_action = action2xml(input->action, TRUE, data_set);
             add_node_nocopy(input_xml, crm_element_name(xml_action), xml_action);
         }
     }
 }
diff --git a/lib/pengine/bundle.c b/lib/pengine/bundle.c
index 7b326e967c..615a35ac35 100644
--- a/lib/pengine/bundle.c
+++ b/lib/pengine/bundle.c
@@ -1,2098 +1,2102 @@
 /*
  * Copyright 2004-2020 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <ctype.h>
 
 #include <crm/pengine/rules.h>
 #include <crm/pengine/status.h>
 #include <crm/pengine/internal.h>
 #include <crm/msg_xml.h>
 #include <crm/common/xml_internal.h>
 #include <pe_status_private.h>
 
 #define PE__VARIANT_BUNDLE 1
 #include "./variant.h"
 
 static char *
 next_ip(const char *last_ip)
 {
     unsigned int oct1 = 0;
     unsigned int oct2 = 0;
     unsigned int oct3 = 0;
     unsigned int oct4 = 0;
     int rc = sscanf(last_ip, "%u.%u.%u.%u", &oct1, &oct2, &oct3, &oct4);
 
     if (rc != 4) {
         /*@ TODO check for IPv6 */
         return NULL;
 
     } else if (oct3 > 253) {
         return NULL;
 
     } else if (oct4 > 253) {
         ++oct3;
         oct4 = 1;
 
     } else {
         ++oct4;
     }
 
     return crm_strdup_printf("%u.%u.%u.%u", oct1, oct2, oct3, oct4);
 }
 
 static int
 allocate_ip(pe__bundle_variant_data_t *data, pe__bundle_replica_t *replica,
             char *buffer, int max)
 {
     if(data->ip_range_start == NULL) {
         return 0;
 
     } else if(data->ip_last) {
         replica->ipaddr = next_ip(data->ip_last);
 
     } else {
         replica->ipaddr = strdup(data->ip_range_start);
     }
 
     data->ip_last = replica->ipaddr;
     switch (data->agent_type) {
         case PE__CONTAINER_AGENT_DOCKER:
         case PE__CONTAINER_AGENT_PODMAN:
             if (data->add_host) {
                 return snprintf(buffer, max, " --add-host=%s-%d:%s",
                                 data->prefix, replica->offset,
                                 replica->ipaddr);
             }
         case PE__CONTAINER_AGENT_RKT:
             return snprintf(buffer, max, " --hosts-entry=%s=%s-%d",
                             replica->ipaddr, data->prefix, replica->offset);
         default: // PE__CONTAINER_AGENT_UNKNOWN
             break;
     }
     return 0;
 }
 
 static xmlNode *
 create_resource(const char *name, const char *provider, const char *kind)
 {
     xmlNode *rsc = create_xml_node(NULL, XML_CIB_TAG_RESOURCE);
 
     crm_xml_add(rsc, XML_ATTR_ID, name);
     crm_xml_add(rsc, XML_AGENT_ATTR_CLASS, PCMK_RESOURCE_CLASS_OCF);
     crm_xml_add(rsc, XML_AGENT_ATTR_PROVIDER, provider);
     crm_xml_add(rsc, XML_ATTR_TYPE, kind);
 
     return rsc;
 }
 
 /*!
  * \internal
  * \brief Check whether cluster can manage resource inside container
  *
  * \param[in] data  Container variant data
  *
  * \return TRUE if networking configuration is acceptable, FALSE otherwise
  *
  * \note The resource is manageable if an IP range or control port has been
  *       specified. If a control port is used without an IP range, replicas per
  *       host must be 1.
  */
 static bool
 valid_network(pe__bundle_variant_data_t *data)
 {
     if(data->ip_range_start) {
         return TRUE;
     }
     if(data->control_port) {
         if(data->nreplicas_per_host > 1) {
             pe_err("Specifying the 'control-port' for %s requires 'replicas-per-host=1'", data->prefix);
             data->nreplicas_per_host = 1;
             // @TODO to be sure: pe__clear_resource_flags(rsc, pe_rsc_unique);
         }
         return TRUE;
     }
     return FALSE;
 }
 
 static bool
 create_ip_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
                    pe__bundle_replica_t *replica, pe_working_set_t *data_set)
 {
     if(data->ip_range_start) {
         char *id = NULL;
         xmlNode *xml_ip = NULL;
         xmlNode *xml_obj = NULL;
 
         id = crm_strdup_printf("%s-ip-%s", data->prefix, replica->ipaddr);
         crm_xml_sanitize_id(id);
         xml_ip = create_resource(id, "heartbeat", "IPaddr2");
         free(id);
 
         xml_obj = create_xml_node(xml_ip, XML_TAG_ATTR_SETS);
         crm_xml_set_id(xml_obj, "%s-attributes-%d",
                        data->prefix, replica->offset);
 
         crm_create_nvpair_xml(xml_obj, NULL, "ip", replica->ipaddr);
         if(data->host_network) {
             crm_create_nvpair_xml(xml_obj, NULL, "nic", data->host_network);
         }
 
         if(data->host_netmask) {
             crm_create_nvpair_xml(xml_obj, NULL,
                                   "cidr_netmask", data->host_netmask);
 
         } else {
             crm_create_nvpair_xml(xml_obj, NULL, "cidr_netmask", "32");
         }
 
         xml_obj = create_xml_node(xml_ip, "operations");
         crm_create_op_xml(xml_obj, ID(xml_ip), "monitor", "60s", NULL);
 
         // TODO: Other ops? Timeouts and intervals from underlying resource?
 
         if (!common_unpack(xml_ip, &replica->ip, parent, data_set)) {
             return FALSE;
         }
 
         parent->children = g_list_append(parent->children, replica->ip);
     }
     return TRUE;
 }
 
 static bool
 create_docker_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
                        pe__bundle_replica_t *replica,
                        pe_working_set_t *data_set)
 {
         int offset = 0, max = 4096;
         char *buffer = calloc(1, max+1);
 
         int doffset = 0, dmax = 1024;
         char *dbuffer = calloc(1, dmax+1);
 
         char *id = NULL;
         xmlNode *xml_container = NULL;
         xmlNode *xml_obj = NULL;
 
         id = crm_strdup_printf("%s-docker-%d", data->prefix, replica->offset);
         crm_xml_sanitize_id(id);
         xml_container = create_resource(id, "heartbeat",
                                         PE__CONTAINER_AGENT_DOCKER_S);
         free(id);
 
         xml_obj = create_xml_node(xml_container, XML_TAG_ATTR_SETS);
         crm_xml_set_id(xml_obj, "%s-attributes-%d",
                        data->prefix, replica->offset);
 
         crm_create_nvpair_xml(xml_obj, NULL, "image", data->image);
         crm_create_nvpair_xml(xml_obj, NULL, "allow_pull", XML_BOOLEAN_TRUE);
         crm_create_nvpair_xml(xml_obj, NULL, "force_kill", XML_BOOLEAN_FALSE);
         crm_create_nvpair_xml(xml_obj, NULL, "reuse", XML_BOOLEAN_FALSE);
 
         offset += snprintf(buffer+offset, max-offset, " --restart=no");
 
         /* Set a container hostname only if we have an IP to map it to.
          * The user can set -h or --uts=host themselves if they want a nicer
          * name for logs, but this makes applications happy who need their
          * hostname to match the IP they bind to.
          */
         if (data->ip_range_start != NULL) {
             offset += snprintf(buffer+offset, max-offset, " -h %s-%d",
                                data->prefix, replica->offset);
         }
 
         offset += snprintf(buffer+offset, max-offset, " -e PCMK_stderr=1");
 
         if (data->container_network) {
 #if 0
             offset += snprintf(buffer+offset, max-offset, " --link-local-ip=%s",
                                replica->ipaddr);
 #endif
             offset += snprintf(buffer+offset, max-offset, " --net=%s",
                                data->container_network);
         }
 
         if(data->control_port) {
             offset += snprintf(buffer+offset, max-offset, " -e PCMK_remote_port=%s", data->control_port);
         } else {
             offset += snprintf(buffer+offset, max-offset, " -e PCMK_remote_port=%d", DEFAULT_REMOTE_PORT);
         }
 
         for(GListPtr pIter = data->mounts; pIter != NULL; pIter = pIter->next) {
             pe__bundle_mount_t *mount = pIter->data;
 
             if (pcmk_is_set(mount->flags, pe__bundle_mount_subdir)) {
                 char *source = crm_strdup_printf(
                     "%s/%s-%d", mount->source, data->prefix, replica->offset);
 
                 if(doffset > 0) {
                     doffset += snprintf(dbuffer+doffset, dmax-doffset, ",");
                 }
                 doffset += snprintf(dbuffer+doffset, dmax-doffset, "%s", source);
                 offset += snprintf(buffer+offset, max-offset, " -v %s:%s", source, mount->target);
                 free(source);
 
             } else {
                 offset += snprintf(buffer+offset, max-offset, " -v %s:%s", mount->source, mount->target);
             }
             if(mount->options) {
                 offset += snprintf(buffer+offset, max-offset, ":%s", mount->options);
             }
         }
 
         for(GListPtr pIter = data->ports; pIter != NULL; pIter = pIter->next) {
             pe__bundle_port_t *port = pIter->data;
 
             if (replica->ipaddr) {
                 offset += snprintf(buffer+offset, max-offset, " -p %s:%s:%s",
                                    replica->ipaddr, port->source,
                                    port->target);
             } else if(!pcmk__str_eq(data->container_network, "host", pcmk__str_casei)) {
                 // No need to do port mapping if net=host
                 offset += snprintf(buffer+offset, max-offset, " -p %s:%s", port->source, port->target);
             }
         }
 
         if (data->launcher_options) {
             offset += snprintf(buffer+offset, max-offset, " %s",
                                data->launcher_options);
         }
 
         if (data->container_host_options) {
             offset += snprintf(buffer + offset, max - offset, " %s",
                                data->container_host_options);
         }
 
         crm_create_nvpair_xml(xml_obj, NULL, "run_opts", buffer);
         free(buffer);
 
         crm_create_nvpair_xml(xml_obj, NULL, "mount_points", dbuffer);
         free(dbuffer);
 
         if (replica->child) {
             if (data->container_command) {
                 crm_create_nvpair_xml(xml_obj, NULL,
                                       "run_cmd", data->container_command);
             } else {
                 crm_create_nvpair_xml(xml_obj, NULL,
                                       "run_cmd", SBIN_DIR "/pacemaker-remoted");
             }
 
             /* TODO: Allow users to specify their own?
              *
              * We just want to know if the container is alive, we'll
              * monitor the child independently
              */
             crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
 #if 0
         /* @TODO Consider supporting the use case where we can start and stop
          * resources, but not proxy local commands (such as setting node
          * attributes), by running the local executor in stand-alone mode.
          * However, this would probably be better done via ACLs as with other
          * Pacemaker Remote nodes.
          */
         } else if ((child != NULL) && data->untrusted) {
             crm_create_nvpair_xml(xml_obj, NULL, "run_cmd",
                                   CRM_DAEMON_DIR "/pacemaker-execd");
             crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd",
                                   CRM_DAEMON_DIR "/pacemaker/cts-exec-helper -c poke");
 #endif
         } else {
             if (data->container_command) {
                 crm_create_nvpair_xml(xml_obj, NULL,
                                       "run_cmd", data->container_command);
             }
 
             /* TODO: Allow users to specify their own?
              *
              * We don't know what's in the container, so we just want
              * to know if it is alive
              */
             crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
         }
 
 
         xml_obj = create_xml_node(xml_container, "operations");
         crm_create_op_xml(xml_obj, ID(xml_container), "monitor", "60s", NULL);
 
         // TODO: Other ops? Timeouts and intervals from underlying resource?
         if (!common_unpack(xml_container, &replica->container, parent, data_set)) {
             return FALSE;
         }
         parent->children = g_list_append(parent->children, replica->container);
         return TRUE;
 }
 
 static bool
 create_podman_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
                        pe__bundle_replica_t *replica,
                        pe_working_set_t *data_set)
 {
         int offset = 0, max = 4096;
         char *buffer = calloc(1, max+1);
 
         int doffset = 0, dmax = 1024;
         char *dbuffer = calloc(1, dmax+1);
 
         char *id = NULL;
         xmlNode *xml_container = NULL;
         xmlNode *xml_obj = NULL;
 
         id = crm_strdup_printf("%s-podman-%d", data->prefix, replica->offset);
         crm_xml_sanitize_id(id);
         xml_container = create_resource(id, "heartbeat",
                                         PE__CONTAINER_AGENT_PODMAN_S);
         free(id);
 
         xml_obj = create_xml_node(xml_container, XML_TAG_ATTR_SETS);
         crm_xml_set_id(xml_obj, "%s-attributes-%d",
                        data->prefix, replica->offset);
 
         crm_create_nvpair_xml(xml_obj, NULL, "image", data->image);
         crm_create_nvpair_xml(xml_obj, NULL, "allow_pull", XML_BOOLEAN_TRUE);
         crm_create_nvpair_xml(xml_obj, NULL, "force_kill", XML_BOOLEAN_FALSE);
         crm_create_nvpair_xml(xml_obj, NULL, "reuse", XML_BOOLEAN_FALSE);
 
         // FIXME: (bandini 2018-08) podman has no restart policies
         //offset += snprintf(buffer+offset, max-offset, " --restart=no");
 
         /* Set a container hostname only if we have an IP to map it to.
          * The user can set -h or --uts=host themselves if they want a nicer
          * name for logs, but this makes applications happy who need their
          * hostname to match the IP they bind to.
          */
         if (data->ip_range_start != NULL) {
             offset += snprintf(buffer+offset, max-offset, " -h %s-%d",
                                data->prefix, replica->offset);
         }
 
         offset += snprintf(buffer+offset, max-offset, " -e PCMK_stderr=1");
 
         if (data->container_network) {
 #if 0
             // podman has no support for --link-local-ip
             offset += snprintf(buffer+offset, max-offset, " --link-local-ip=%s",
                                replica->ipaddr);
 #endif
             offset += snprintf(buffer+offset, max-offset, " --net=%s",
                                data->container_network);
         }
 
         if(data->control_port) {
             offset += snprintf(buffer+offset, max-offset, " -e PCMK_remote_port=%s", data->control_port);
         } else {
             offset += snprintf(buffer+offset, max-offset, " -e PCMK_remote_port=%d", DEFAULT_REMOTE_PORT);
         }
 
         for(GListPtr pIter = data->mounts; pIter != NULL; pIter = pIter->next) {
             pe__bundle_mount_t *mount = pIter->data;
 
             if (pcmk_is_set(mount->flags, pe__bundle_mount_subdir)) {
                 char *source = crm_strdup_printf(
                     "%s/%s-%d", mount->source, data->prefix, replica->offset);
 
                 if(doffset > 0) {
                     doffset += snprintf(dbuffer+doffset, dmax-doffset, ",");
                 }
                 doffset += snprintf(dbuffer+doffset, dmax-doffset, "%s", source);
                 offset += snprintf(buffer+offset, max-offset, " -v %s:%s", source, mount->target);
                 free(source);
 
             } else {
                 offset += snprintf(buffer+offset, max-offset, " -v %s:%s", mount->source, mount->target);
             }
             if(mount->options) {
                 offset += snprintf(buffer+offset, max-offset, ":%s", mount->options);
             }
         }
 
         for(GListPtr pIter = data->ports; pIter != NULL; pIter = pIter->next) {
             pe__bundle_port_t *port = pIter->data;
 
             if (replica->ipaddr) {
                 offset += snprintf(buffer+offset, max-offset, " -p %s:%s:%s",
                                    replica->ipaddr, port->source,
                                    port->target);
             } else if(!pcmk__str_eq(data->container_network, "host", pcmk__str_casei)) {
                 // No need to do port mapping if net=host
                 offset += snprintf(buffer+offset, max-offset, " -p %s:%s", port->source, port->target);
             }
         }
 
         if (data->launcher_options) {
             offset += snprintf(buffer+offset, max-offset, " %s",
                                data->launcher_options);
         }
 
         if (data->container_host_options) {
             offset += snprintf(buffer + offset, max - offset, " %s",
                                data->container_host_options);
         }
 
         crm_create_nvpair_xml(xml_obj, NULL, "run_opts", buffer);
         free(buffer);
 
         crm_create_nvpair_xml(xml_obj, NULL, "mount_points", dbuffer);
         free(dbuffer);
 
         if (replica->child) {
             if (data->container_command) {
                 crm_create_nvpair_xml(xml_obj, NULL,
                                       "run_cmd", data->container_command);
             } else {
                 crm_create_nvpair_xml(xml_obj, NULL,
                                       "run_cmd", SBIN_DIR "/pacemaker-remoted");
             }
 
             /* TODO: Allow users to specify their own?
              *
              * We just want to know if the container is alive, we'll
              * monitor the child independently
              */
             crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
 #if 0
         /* @TODO Consider supporting the use case where we can start and stop
          * resources, but not proxy local commands (such as setting node
          * attributes), by running the local executor in stand-alone mode.
          * However, this would probably be better done via ACLs as with other
          * Pacemaker Remote nodes.
          */
         } else if ((child != NULL) && data->untrusted) {
             crm_create_nvpair_xml(xml_obj, NULL, "run_cmd",
                                   CRM_DAEMON_DIR "/pacemaker-execd");
             crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd",
                                   CRM_DAEMON_DIR "/pacemaker/cts-exec-helper -c poke");
 #endif
         } else {
             if (data->container_command) {
                 crm_create_nvpair_xml(xml_obj, NULL,
                                       "run_cmd", data->container_command);
             }
 
             /* TODO: Allow users to specify their own?
              *
              * We don't know what's in the container, so we just want
              * to know if it is alive
              */
             crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
         }
 
 
         xml_obj = create_xml_node(xml_container, "operations");
         crm_create_op_xml(xml_obj, ID(xml_container), "monitor", "60s", NULL);
 
         // TODO: Other ops? Timeouts and intervals from underlying resource?
         if (!common_unpack(xml_container, &replica->container, parent,
                            data_set)) {
             return FALSE;
         }
         parent->children = g_list_append(parent->children, replica->container);
         return TRUE;
 }
 
 static bool
 create_rkt_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
                     pe__bundle_replica_t *replica, pe_working_set_t *data_set)
 {
         int offset = 0, max = 4096;
         char *buffer = calloc(1, max+1);
 
         int doffset = 0, dmax = 1024;
         char *dbuffer = calloc(1, dmax+1);
 
         char *id = NULL;
         xmlNode *xml_container = NULL;
         xmlNode *xml_obj = NULL;
 
         int volid = 0;
 
         id = crm_strdup_printf("%s-rkt-%d", data->prefix, replica->offset);
         crm_xml_sanitize_id(id);
         xml_container = create_resource(id, "heartbeat",
                                         PE__CONTAINER_AGENT_RKT_S);
         free(id);
 
         xml_obj = create_xml_node(xml_container, XML_TAG_ATTR_SETS);
         crm_xml_set_id(xml_obj, "%s-attributes-%d",
                        data->prefix, replica->offset);
 
         crm_create_nvpair_xml(xml_obj, NULL, "image", data->image);
         crm_create_nvpair_xml(xml_obj, NULL, "allow_pull", "true");
         crm_create_nvpair_xml(xml_obj, NULL, "force_kill", "false");
         crm_create_nvpair_xml(xml_obj, NULL, "reuse", "false");
 
         /* Set a container hostname only if we have an IP to map it to.
          * The user can set -h or --uts=host themselves if they want a nicer
          * name for logs, but this makes applications happy who need their
          * hostname to match the IP they bind to.
          */
         if (data->ip_range_start != NULL) {
             offset += snprintf(buffer+offset, max-offset, " --hostname=%s-%d",
                                data->prefix, replica->offset);
         }
 
         offset += snprintf(buffer+offset, max-offset, " --environment=PCMK_stderr=1");
 
         if (data->container_network) {
 #if 0
             offset += snprintf(buffer+offset, max-offset, " --link-local-ip=%s",
                                replica->ipaddr);
 #endif
             offset += snprintf(buffer+offset, max-offset, " --net=%s",
                                data->container_network);
         }
 
         if(data->control_port) {
             offset += snprintf(buffer+offset, max-offset, " --environment=PCMK_remote_port=%s", data->control_port);
         } else {
             offset += snprintf(buffer+offset, max-offset, " --environment=PCMK_remote_port=%d", DEFAULT_REMOTE_PORT);
         }
 
         for(GListPtr pIter = data->mounts; pIter != NULL; pIter = pIter->next) {
             pe__bundle_mount_t *mount = pIter->data;
 
             if (pcmk_is_set(mount->flags, pe__bundle_mount_subdir)) {
                 char *source = crm_strdup_printf(
                     "%s/%s-%d", mount->source, data->prefix, replica->offset);
 
                 if(doffset > 0) {
                     doffset += snprintf(dbuffer+doffset, dmax-doffset, ",");
                 }
                 doffset += snprintf(dbuffer+doffset, dmax-doffset, "%s", source);
                 offset += snprintf(buffer+offset, max-offset, " --volume vol%d,kind=host,source=%s", volid, source);
                 if(mount->options) {
                     offset += snprintf(buffer+offset, max-offset, ",%s", mount->options);
                 }
                 offset += snprintf(buffer+offset, max-offset, " --mount volume=vol%d,target=%s", volid, mount->target);
                 free(source);
 
             } else {
                 offset += snprintf(buffer+offset, max-offset, " --volume vol%d,kind=host,source=%s", volid, mount->source);
                 if(mount->options) {
                     offset += snprintf(buffer+offset, max-offset, ",%s", mount->options);
                 }
                 offset += snprintf(buffer+offset, max-offset, " --mount volume=vol%d,target=%s", volid, mount->target);
             }
             volid++;
         }
 
         for(GListPtr pIter = data->ports; pIter != NULL; pIter = pIter->next) {
             pe__bundle_port_t *port = pIter->data;
 
             if (replica->ipaddr) {
                 offset += snprintf(buffer+offset, max-offset,
                                    " --port=%s:%s:%s", port->target,
                                    replica->ipaddr, port->source);
             } else {
                 offset += snprintf(buffer+offset, max-offset, " --port=%s:%s", port->target, port->source);
             }
         }
 
         if (data->launcher_options) {
             offset += snprintf(buffer+offset, max-offset, " %s",
                                data->launcher_options);
         }
 
         if (data->container_host_options) {
             offset += snprintf(buffer + offset, max - offset, " %s",
                                data->container_host_options);
         }
 
         crm_create_nvpair_xml(xml_obj, NULL, "run_opts", buffer);
         free(buffer);
 
         crm_create_nvpair_xml(xml_obj, NULL, "mount_points", dbuffer);
         free(dbuffer);
 
         if (replica->child) {
             if (data->container_command) {
                 crm_create_nvpair_xml(xml_obj, NULL, "run_cmd",
                                       data->container_command);
             } else {
                 crm_create_nvpair_xml(xml_obj, NULL, "run_cmd",
                                       SBIN_DIR "/pacemaker-remoted");
             }
 
             /* TODO: Allow users to specify their own?
              *
              * We just want to know if the container is alive, we'll
              * monitor the child independently
              */
             crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
 #if 0
         /* @TODO Consider supporting the use case where we can start and stop
          * resources, but not proxy local commands (such as setting node
          * attributes), by running the local executor in stand-alone mode.
          * However, this would probably be better done via ACLs as with other
          * Pacemaker Remote nodes.
          */
         } else if ((child != NULL) && data->untrusted) {
             crm_create_nvpair_xml(xml_obj, NULL, "run_cmd",
                                   CRM_DAEMON_DIR "/pacemaker-execd");
             crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd",
                                   CRM_DAEMON_DIR "/pacemaker/cts-exec-helper -c poke");
 #endif
         } else {
             if (data->container_command) {
                 crm_create_nvpair_xml(xml_obj, NULL, "run_cmd",
                                       data->container_command);
             }
 
             /* TODO: Allow users to specify their own?
              *
              * We don't know what's in the container, so we just want
              * to know if it is alive
              */
             crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
         }
 
 
         xml_obj = create_xml_node(xml_container, "operations");
         crm_create_op_xml(xml_obj, ID(xml_container), "monitor", "60s", NULL);
 
         // TODO: Other ops? Timeouts and intervals from underlying resource?
 
         if (!common_unpack(xml_container, &replica->container, parent, data_set)) {
             return FALSE;
         }
         parent->children = g_list_append(parent->children, replica->container);
         return TRUE;
 }
 
 /*!
  * \brief Ban a node from a resource's (and its children's) allowed nodes list
  *
  * \param[in,out] rsc    Resource to modify
  * \param[in]     uname  Name of node to ban
  */
 static void
 disallow_node(pe_resource_t *rsc, const char *uname)
 {
     gpointer match = g_hash_table_lookup(rsc->allowed_nodes, uname);
 
     if (match) {
         ((pe_node_t *) match)->weight = -INFINITY;
         ((pe_node_t *) match)->rsc_discover_mode = pe_discover_never;
     }
     if (rsc->children) {
         GListPtr child;
 
         for (child = rsc->children; child != NULL; child = child->next) {
             disallow_node((pe_resource_t *) (child->data), uname);
         }
     }
 }
 
 static bool
 create_remote_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
                        pe__bundle_replica_t *replica,
                        pe_working_set_t *data_set)
 {
     if (replica->child && valid_network(data)) {
         GHashTableIter gIter;
         GListPtr rsc_iter = NULL;
         pe_node_t *node = NULL;
         xmlNode *xml_remote = NULL;
         char *id = crm_strdup_printf("%s-%d", data->prefix, replica->offset);
         char *port_s = NULL;
         const char *uname = NULL;
         const char *connect_name = NULL;
 
         if (pe_find_resource(data_set->resources, id) != NULL) {
             free(id);
             // The biggest hammer we have
             id = crm_strdup_printf("pcmk-internal-%s-remote-%d",
                                    replica->child->id, replica->offset);
             //@TODO return false instead of asserting?
             CRM_ASSERT(pe_find_resource(data_set->resources, id) == NULL);
         }
 
         /* REMOTE_CONTAINER_HACK: Using "#uname" as the server name when the
          * connection does not have its own IP is a magic string that we use to
          * support nested remotes (i.e. a bundle running on a remote node).
          */
         connect_name = (replica->ipaddr? replica->ipaddr : "#uname");
 
         if (data->control_port == NULL) {
             port_s = crm_itoa(DEFAULT_REMOTE_PORT);
         }
 
         /* This sets replica->container as replica->remote's container, which is
          * similar to what happens with guest nodes. This is how the scheduler
          * knows that the bundle node is fenced by recovering the container, and
          * that remote should be ordered relative to the container.
          */
         xml_remote = pe_create_remote_xml(NULL, id, replica->container->id,
                                           NULL, NULL, NULL,
                                           connect_name, (data->control_port?
                                           data->control_port : port_s));
         free(port_s);
 
         /* Abandon our created ID, and pull the copy from the XML, because we
          * need something that will get freed during data set cleanup to use as
          * the node ID and uname.
          */
         free(id);
         id = NULL;
         uname = ID(xml_remote);
 
         /* Ensure a node has been created for the guest (it may have already
          * been, if it has a permanent node attribute), and ensure its weight is
          * -INFINITY so no other resources can run on it.
          */
         node = pe_find_node(data_set->nodes, uname);
         if (node == NULL) {
             node = pe_create_node(uname, uname, "remote", "-INFINITY",
                                   data_set);
         } else {
             node->weight = -INFINITY;
         }
         node->rsc_discover_mode = pe_discover_never;
 
         /* unpack_remote_nodes() ensures that each remote node and guest node
          * has a pe_node_t entry. Ideally, it would do the same for bundle nodes.
          * Unfortunately, a bundle has to be mostly unpacked before it's obvious
          * what nodes will be needed, so we do it just above.
          *
          * Worse, that means that the node may have been utilized while
          * unpacking other resources, without our weight correction. The most
          * likely place for this to happen is when common_unpack() calls
          * resource_location() to set a default score in symmetric clusters.
          * This adds a node *copy* to each resource's allowed nodes, and these
          * copies will have the wrong weight.
          *
          * As a hacky workaround, fix those copies here.
          *
          * @TODO Possible alternative: ensure bundles are unpacked before other
          * resources, so the weight is correct before any copies are made.
          */
         for (rsc_iter = data_set->resources; rsc_iter; rsc_iter = rsc_iter->next) {
             disallow_node((pe_resource_t *) (rsc_iter->data), uname);
         }
 
         replica->node = pe__copy_node(node);
         replica->node->weight = 500;
         replica->node->rsc_discover_mode = pe_discover_exclusive;
 
         /* Ensure the node shows up as allowed and with the correct discovery set */
         if (replica->child->allowed_nodes != NULL) {
             g_hash_table_destroy(replica->child->allowed_nodes);
         }
         replica->child->allowed_nodes = g_hash_table_new_full(crm_str_hash,
                                                               g_str_equal,
                                                               NULL, free);
         g_hash_table_insert(replica->child->allowed_nodes,
                             (gpointer) replica->node->details->id,
                             pe__copy_node(replica->node));
 
         {
             pe_node_t *copy = pe__copy_node(replica->node);
             copy->weight = -INFINITY;
             g_hash_table_insert(replica->child->parent->allowed_nodes,
                                 (gpointer) replica->node->details->id, copy);
         }
         if (!common_unpack(xml_remote, &replica->remote, parent, data_set)) {
             return FALSE;
         }
 
         g_hash_table_iter_init(&gIter, replica->remote->allowed_nodes);
         while (g_hash_table_iter_next(&gIter, NULL, (void **)&node)) {
             if (pe__is_guest_or_remote_node(node)) {
                 /* Remote resources can only run on 'normal' cluster node */
                 node->weight = -INFINITY;
             }
         }
 
         replica->node->details->remote_rsc = replica->remote;
 
         // Ensure pe__is_guest_node() functions correctly immediately
         replica->remote->container = replica->container;
 
         /* A bundle's #kind is closer to "container" (guest node) than the
          * "remote" set by pe_create_node().
          */
         g_hash_table_insert(replica->node->details->attrs,
                             strdup(CRM_ATTR_KIND), strdup("container"));
 
         /* One effect of this is that setup_container() will add
          * replica->remote to replica->container's fillers, which will make
          * pe__resource_contains_guest_node() true for replica->container.
          *
          * replica->child does NOT get added to replica->container's fillers.
          * The only noticeable effect if it did would be for its fail count to
          * be taken into account when checking replica->container's migration
          * threshold.
          */
         parent->children = g_list_append(parent->children, replica->remote);
     }
     return TRUE;
 }
 
 static bool
 create_container(pe_resource_t *parent, pe__bundle_variant_data_t *data,
                  pe__bundle_replica_t *replica, pe_working_set_t *data_set)
 {
 
     switch (data->agent_type) {
         case PE__CONTAINER_AGENT_DOCKER:
             if (!create_docker_resource(parent, data, replica, data_set)) {
                 return FALSE;
             }
             break;
 
         case PE__CONTAINER_AGENT_PODMAN:
             if (!create_podman_resource(parent, data, replica, data_set)) {
                 return FALSE;
             }
             break;
 
         case PE__CONTAINER_AGENT_RKT:
             if (!create_rkt_resource(parent, data, replica, data_set)) {
                 return FALSE;
             }
             break;
         default: // PE__CONTAINER_AGENT_UNKNOWN
             return FALSE;
     }
 
     if (create_ip_resource(parent, data, replica, data_set) == FALSE) {
         return FALSE;
     }
     if(create_remote_resource(parent, data, replica, data_set) == FALSE) {
         return FALSE;
     }
     if (replica->child && replica->ipaddr) {
         add_hash_param(replica->child->meta, "external-ip", replica->ipaddr);
     }
 
     if (replica->remote) {
         /*
          * Allow the remote connection resource to be allocated to a
          * different node than the one on which the container is active.
          *
          * This makes it possible to have Pacemaker Remote nodes running
          * containers with pacemaker-remoted inside in order to start
          * services inside those containers.
          */
         pe__set_resource_flags(replica->remote, pe_rsc_allow_remote_remotes);
     }
 
     return TRUE;
 }
 
 static void
 mount_add(pe__bundle_variant_data_t *bundle_data, const char *source,
           const char *target, const char *options, uint32_t flags)
 {
     pe__bundle_mount_t *mount = calloc(1, sizeof(pe__bundle_mount_t));
 
     mount->source = strdup(source);
     mount->target = strdup(target);
     if (options) {
         mount->options = strdup(options);
     }
     mount->flags = flags;
     bundle_data->mounts = g_list_append(bundle_data->mounts, mount);
 }
 
 static void
 mount_free(pe__bundle_mount_t *mount)
 {
     free(mount->source);
     free(mount->target);
     free(mount->options);
     free(mount);
 }
 
 static void
 port_free(pe__bundle_port_t *port)
 {
     free(port->source);
     free(port->target);
     free(port);
 }
 
 static pe__bundle_replica_t *
 replica_for_remote(pe_resource_t *remote)
 {
     pe_resource_t *top = remote;
     pe__bundle_variant_data_t *bundle_data = NULL;
 
     if (top == NULL) {
         return NULL;
     }
 
     while (top->parent != NULL) {
         top = top->parent;
     }
 
     get_bundle_variant_data(bundle_data, top);
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         if (replica->remote == remote) {
             return replica;
         }
     }
     CRM_LOG_ASSERT(FALSE);
     return NULL;
 }
 
 bool
-pe__bundle_needs_remote_name(pe_resource_t *rsc)
+pe__bundle_needs_remote_name(pe_resource_t *rsc, pe_working_set_t *data_set)
 {
     const char *value;
+    GHashTable *params = NULL;
 
     if (rsc == NULL) {
         return false;
     }
 
-    value = g_hash_table_lookup(rsc->parameters, XML_RSC_ATTR_REMOTE_RA_ADDR);
+    // Use NULL node since pcmk__bundle_expand() uses that to set value
+    params = pe_rsc_params(rsc, NULL, data_set);
+    value = g_hash_table_lookup(params, XML_RSC_ATTR_REMOTE_RA_ADDR);
 
     return pcmk__str_eq(value, "#uname", pcmk__str_casei)
            && xml_contains_remote_node(rsc->xml);
 }
 
 const char *
-pe__add_bundle_remote_name(pe_resource_t *rsc, xmlNode *xml, const char *field)
+pe__add_bundle_remote_name(pe_resource_t *rsc, pe_working_set_t *data_set,
+                           xmlNode *xml, const char *field)
 {
     // REMOTE_CONTAINER_HACK: Allow remote nodes that start containers with pacemaker remote inside
 
     pe_node_t *node = NULL;
     pe__bundle_replica_t *replica = NULL;
 
-    if (!pe__bundle_needs_remote_name(rsc)) {
+    if (!pe__bundle_needs_remote_name(rsc, data_set)) {
         return NULL;
     }
 
     replica = replica_for_remote(rsc);
     if (replica == NULL) {
         return NULL;
     }
 
     node = replica->container->allocated_to;
     if (node == NULL) {
         /* If it won't be running anywhere after the
          * transition, go with where it's running now.
          */
         node = pe__current_node(replica->container);
     }
 
     if(node == NULL) {
         crm_trace("Cannot determine address for bundle connection %s", rsc->id);
         return NULL;
     }
 
     crm_trace("Setting address for bundle connection %s to bundle host %s",
               rsc->id, node->details->uname);
     if(xml != NULL && field != NULL) {
         crm_xml_add(xml, field, node->details->uname);
     }
 
     return node->details->uname;
 }
 
 #define pe__set_bundle_mount_flags(mount_xml, flags, flags_to_set) do {     \
         flags = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE,           \
                                    "Bundle mount", ID(mount_xml), flags,    \
                                    (flags_to_set), #flags_to_set);          \
     } while (0)
 
 gboolean
 pe__unpack_bundle(pe_resource_t *rsc, pe_working_set_t *data_set)
 {
     const char *value = NULL;
     xmlNode *xml_obj = NULL;
     xmlNode *xml_resource = NULL;
     pe__bundle_variant_data_t *bundle_data = NULL;
     bool need_log_mount = TRUE;
 
     CRM_ASSERT(rsc != NULL);
     pe_rsc_trace(rsc, "Processing resource %s...", rsc->id);
 
     bundle_data = calloc(1, sizeof(pe__bundle_variant_data_t));
     rsc->variant_opaque = bundle_data;
     bundle_data->prefix = strdup(rsc->id);
 
     xml_obj = first_named_child(rsc->xml, PE__CONTAINER_AGENT_DOCKER_S);
     if (xml_obj != NULL) {
         bundle_data->agent_type = PE__CONTAINER_AGENT_DOCKER;
     } else {
         xml_obj = first_named_child(rsc->xml, PE__CONTAINER_AGENT_RKT_S);
         if (xml_obj != NULL) {
             bundle_data->agent_type = PE__CONTAINER_AGENT_RKT;
         } else {
             xml_obj = first_named_child(rsc->xml, PE__CONTAINER_AGENT_PODMAN_S);
             if (xml_obj != NULL) {
                 bundle_data->agent_type = PE__CONTAINER_AGENT_PODMAN;
             } else {
                 return FALSE;
             }
         }
     }
 
     value = crm_element_value(xml_obj, XML_RSC_ATTR_PROMOTED_MAX);
     if (value == NULL) {
         // @COMPAT deprecated since 2.0.0
         value = crm_element_value(xml_obj, "masters");
     }
     bundle_data->promoted_max = crm_parse_int(value, "0");
     if (bundle_data->promoted_max < 0) {
         pe_err("%s for %s must be nonnegative integer, using 0",
                XML_RSC_ATTR_PROMOTED_MAX, rsc->id);
         bundle_data->promoted_max = 0;
     }
 
     value = crm_element_value(xml_obj, "replicas");
     if ((value == NULL) && bundle_data->promoted_max) {
         bundle_data->nreplicas = bundle_data->promoted_max;
     } else {
         bundle_data->nreplicas = crm_parse_int(value, "1");
     }
     if (bundle_data->nreplicas < 1) {
         pe_err("'replicas' for %s must be positive integer, using 1", rsc->id);
         bundle_data->nreplicas = 1;
     }
 
     /*
      * Communication between containers on the same host via the
      * floating IPs only works if the container is started with:
      *   --userland-proxy=false --ip-masq=false
      */
     value = crm_element_value(xml_obj, "replicas-per-host");
     bundle_data->nreplicas_per_host = crm_parse_int(value, "1");
     if (bundle_data->nreplicas_per_host < 1) {
         pe_err("'replicas-per-host' for %s must be positive integer, using 1",
                rsc->id);
         bundle_data->nreplicas_per_host = 1;
     }
     if (bundle_data->nreplicas_per_host == 1) {
         pe__clear_resource_flags(rsc, pe_rsc_unique);
     }
 
     bundle_data->container_command = crm_element_value_copy(xml_obj, "run-command");
     bundle_data->launcher_options = crm_element_value_copy(xml_obj, "options");
     bundle_data->image = crm_element_value_copy(xml_obj, "image");
     bundle_data->container_network = crm_element_value_copy(xml_obj, "network");
 
     xml_obj = first_named_child(rsc->xml, "network");
     if(xml_obj) {
 
         bundle_data->ip_range_start = crm_element_value_copy(xml_obj, "ip-range-start");
         bundle_data->host_netmask = crm_element_value_copy(xml_obj, "host-netmask");
         bundle_data->host_network = crm_element_value_copy(xml_obj, "host-interface");
         bundle_data->control_port = crm_element_value_copy(xml_obj, "control-port");
         value = crm_element_value(xml_obj, "add-host");
         if (crm_str_to_boolean(value, &bundle_data->add_host) != 1) {
             bundle_data->add_host = TRUE;
         }
 
         for (xmlNode *xml_child = pcmk__xe_first_child(xml_obj); xml_child != NULL;
              xml_child = pcmk__xe_next(xml_child)) {
 
             pe__bundle_port_t *port = calloc(1, sizeof(pe__bundle_port_t));
             port->source = crm_element_value_copy(xml_child, "port");
 
             if(port->source == NULL) {
                 port->source = crm_element_value_copy(xml_child, "range");
             } else {
                 port->target = crm_element_value_copy(xml_child, "internal-port");
             }
 
             if(port->source != NULL && strlen(port->source) > 0) {
                 if(port->target == NULL) {
                     port->target = strdup(port->source);
                 }
                 bundle_data->ports = g_list_append(bundle_data->ports, port);
 
             } else {
                 pe_err("Invalid port directive %s", ID(xml_child));
                 port_free(port);
             }
         }
     }
 
     xml_obj = first_named_child(rsc->xml, "storage");
     for (xmlNode *xml_child = pcmk__xe_first_child(xml_obj); xml_child != NULL;
          xml_child = pcmk__xe_next(xml_child)) {
 
         const char *source = crm_element_value(xml_child, "source-dir");
         const char *target = crm_element_value(xml_child, "target-dir");
         const char *options = crm_element_value(xml_child, "options");
         int flags = pe__bundle_mount_none;
 
         if (source == NULL) {
             source = crm_element_value(xml_child, "source-dir-root");
             pe__set_bundle_mount_flags(xml_child, flags,
                                        pe__bundle_mount_subdir);
         }
 
         if (source && target) {
             mount_add(bundle_data, source, target, options, flags);
             if (strcmp(target, "/var/log") == 0) {
                 need_log_mount = FALSE;
             }
         } else {
             pe_err("Invalid mount directive %s", ID(xml_child));
         }
     }
 
     xml_obj = first_named_child(rsc->xml, "primitive");
     if (xml_obj && valid_network(bundle_data)) {
         char *value = NULL;
         xmlNode *xml_set = NULL;
 
         xml_resource = create_xml_node(NULL, XML_CIB_TAG_INCARNATION);
 
         /* @COMPAT We no longer use the <master> tag, but we need to keep it as
          * part of the resource name, so that bundles don't restart in a rolling
          * upgrade. (It also avoids needing to change regression tests.)
          */
         crm_xml_set_id(xml_resource, "%s-%s", bundle_data->prefix,
                       (bundle_data->promoted_max? "master"
                       : (const char *)xml_resource->name));
 
         xml_set = create_xml_node(xml_resource, XML_TAG_META_SETS);
         crm_xml_set_id(xml_set, "%s-%s-meta", bundle_data->prefix, xml_resource->name);
 
         crm_create_nvpair_xml(xml_set, NULL,
                               XML_RSC_ATTR_ORDERED, XML_BOOLEAN_TRUE);
 
         value = crm_itoa(bundle_data->nreplicas);
         crm_create_nvpair_xml(xml_set, NULL,
                               XML_RSC_ATTR_INCARNATION_MAX, value);
         free(value);
 
         value = crm_itoa(bundle_data->nreplicas_per_host);
         crm_create_nvpair_xml(xml_set, NULL,
                               XML_RSC_ATTR_INCARNATION_NODEMAX, value);
         free(value);
 
         crm_create_nvpair_xml(xml_set, NULL, XML_RSC_ATTR_UNIQUE,
                               pcmk__btoa(bundle_data->nreplicas_per_host > 1));
 
         if (bundle_data->promoted_max) {
             crm_create_nvpair_xml(xml_set, NULL,
                                   XML_RSC_ATTR_PROMOTABLE, XML_BOOLEAN_TRUE);
 
             value = crm_itoa(bundle_data->promoted_max);
             crm_create_nvpair_xml(xml_set, NULL,
                                   XML_RSC_ATTR_PROMOTED_MAX, value);
             free(value);
         }
 
         //crm_xml_add(xml_obj, XML_ATTR_ID, bundle_data->prefix);
         add_node_copy(xml_resource, xml_obj);
 
     } else if(xml_obj) {
         pe_err("Cannot control %s inside %s without either ip-range-start or control-port",
                rsc->id, ID(xml_obj));
         return FALSE;
     }
 
     if(xml_resource) {
         int lpc = 0;
         GListPtr childIter = NULL;
         pe_resource_t *new_rsc = NULL;
         pe__bundle_port_t *port = NULL;
 
         int offset = 0, max = 1024;
         char *buffer = NULL;
 
         if (common_unpack(xml_resource, &new_rsc, rsc, data_set) == FALSE) {
             pe_err("Failed unpacking resource %s", ID(rsc->xml));
             if (new_rsc != NULL && new_rsc->fns != NULL) {
                 new_rsc->fns->free(new_rsc);
             }
             return FALSE;
         }
 
         bundle_data->child = new_rsc;
 
         /* Currently, we always map the default authentication key location
          * into the same location inside the container.
          *
          * Ideally, we would respect the host's PCMK_authkey_location, but:
          * - it may be different on different nodes;
          * - the actual connection will do extra checking to make sure the key
          *   file exists and is readable, that we can't do here on the DC
          * - tools such as crm_resource and crm_simulate may not have the same
          *   environment variables as the cluster, causing operation digests to
          *   differ
          *
          * Always using the default location inside the container is fine,
          * because we control the pacemaker_remote environment, and it avoids
          * having to pass another environment variable to the container.
          *
          * @TODO A better solution may be to have only pacemaker_remote use the
          * environment variable, and have the cluster nodes use a new
          * cluster option for key location. This would introduce the limitation
          * of the location being the same on all cluster nodes, but that's
          * reasonable.
          */
         mount_add(bundle_data, DEFAULT_REMOTE_KEY_LOCATION,
                   DEFAULT_REMOTE_KEY_LOCATION, NULL, pe__bundle_mount_none);
 
         if (need_log_mount) {
             mount_add(bundle_data, CRM_BUNDLE_DIR, "/var/log", NULL,
                       pe__bundle_mount_subdir);
         }
 
         port = calloc(1, sizeof(pe__bundle_port_t));
         if(bundle_data->control_port) {
             port->source = strdup(bundle_data->control_port);
         } else {
             /* If we wanted to respect PCMK_remote_port, we could use
              * crm_default_remote_port() here and elsewhere in this file instead
              * of DEFAULT_REMOTE_PORT.
              *
              * However, it gains nothing, since we control both the container
              * environment and the connection resource parameters, and the user
              * can use a different port if desired by setting control-port.
              */
             port->source = crm_itoa(DEFAULT_REMOTE_PORT);
         }
         port->target = strdup(port->source);
         bundle_data->ports = g_list_append(bundle_data->ports, port);
 
         buffer = calloc(1, max+1);
         for (childIter = bundle_data->child->children; childIter != NULL;
              childIter = childIter->next) {
 
             pe__bundle_replica_t *replica = calloc(1, sizeof(pe__bundle_replica_t));
 
             replica->child = childIter->data;
             replica->child->exclusive_discover = TRUE;
             replica->offset = lpc++;
 
             // Ensure the child's notify gets set based on the underlying primitive's value
             if (pcmk_is_set(replica->child->flags, pe_rsc_notify)) {
                 pe__set_resource_flags(bundle_data->child, pe_rsc_notify);
             }
 
             offset += allocate_ip(bundle_data, replica, buffer+offset,
                                   max-offset);
             bundle_data->replicas = g_list_append(bundle_data->replicas,
                                                   replica);
             bundle_data->attribute_target = g_hash_table_lookup(replica->child->meta,
                                                                 XML_RSC_ATTR_TARGET);
         }
         bundle_data->container_host_options = buffer;
         if (bundle_data->attribute_target) {
             g_hash_table_replace(rsc->meta, strdup(XML_RSC_ATTR_TARGET),
                                  strdup(bundle_data->attribute_target));
             g_hash_table_replace(bundle_data->child->meta,
                                  strdup(XML_RSC_ATTR_TARGET),
                                  strdup(bundle_data->attribute_target));
         }
 
     } else {
         // Just a naked container, no pacemaker-remote
         int offset = 0, max = 1024;
         char *buffer = calloc(1, max+1);
 
         for (int lpc = 0; lpc < bundle_data->nreplicas; lpc++) {
             pe__bundle_replica_t *replica = calloc(1, sizeof(pe__bundle_replica_t));
 
             replica->offset = lpc;
             offset += allocate_ip(bundle_data, replica, buffer+offset,
                                   max-offset);
             bundle_data->replicas = g_list_append(bundle_data->replicas,
                                                   replica);
         }
         bundle_data->container_host_options = buffer;
     }
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         if (!create_container(rsc, bundle_data, replica, data_set)) {
             pe_err("Failed unpacking resource %s", rsc->id);
             rsc->fns->free(rsc);
             return FALSE;
         }
     }
 
     if (bundle_data->child) {
         rsc->children = g_list_append(rsc->children, bundle_data->child);
     }
     return TRUE;
 }
 
 static int
 replica_resource_active(pe_resource_t *rsc, gboolean all)
 {
     if (rsc) {
         gboolean child_active = rsc->fns->active(rsc, all);
 
         if (child_active && !all) {
             return TRUE;
         } else if (!child_active && all) {
             return FALSE;
         }
     }
     return -1;
 }
 
 gboolean
 pe__bundle_active(pe_resource_t *rsc, gboolean all)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
     GListPtr iter = NULL;
 
     get_bundle_variant_data(bundle_data, rsc);
     for (iter = bundle_data->replicas; iter != NULL; iter = iter->next) {
         pe__bundle_replica_t *replica = iter->data;
         int rsc_active;
 
         rsc_active = replica_resource_active(replica->ip, all);
         if (rsc_active >= 0) {
             return (gboolean) rsc_active;
         }
 
         rsc_active = replica_resource_active(replica->child, all);
         if (rsc_active >= 0) {
             return (gboolean) rsc_active;
         }
 
         rsc_active = replica_resource_active(replica->container, all);
         if (rsc_active >= 0) {
             return (gboolean) rsc_active;
         }
 
         rsc_active = replica_resource_active(replica->remote, all);
         if (rsc_active >= 0) {
             return (gboolean) rsc_active;
         }
     }
 
     /* If "all" is TRUE, we've already checked that no resources were inactive,
      * so return TRUE; if "all" is FALSE, we didn't find any active resources,
      * so return FALSE.
      */
     return all;
 }
 
 /*!
  * \internal
  * \brief Find the bundle replica corresponding to a given node
  *
  * \param[in] bundle  Top-level bundle resource
  * \param[in] node    Node to search for
  *
  * \return Bundle replica if found, NULL otherwise
  */
 pe_resource_t *
 pe__find_bundle_replica(const pe_resource_t *bundle, const pe_node_t *node)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
     CRM_ASSERT(bundle && node);
 
     get_bundle_variant_data(bundle_data, bundle);
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         CRM_ASSERT(replica && replica->node);
         if (replica->node->details == node->details) {
             return replica->child;
         }
     }
     return NULL;
 }
 
 static void
 print_rsc_in_list(pe_resource_t *rsc, const char *pre_text, long options,
                   void *print_data)
 {
     if (rsc != NULL) {
         if (options & pe_print_html) {
             status_print("<li>");
         }
         rsc->fns->print(rsc, pre_text, options, print_data);
         if (options & pe_print_html) {
             status_print("</li>\n");
         }
     }
 }
 
 static const char*
 container_agent_str(enum pe__container_agent t)
 {
     switch (t) {
         case PE__CONTAINER_AGENT_DOCKER: return PE__CONTAINER_AGENT_DOCKER_S;
         case PE__CONTAINER_AGENT_RKT:    return PE__CONTAINER_AGENT_RKT_S;
         case PE__CONTAINER_AGENT_PODMAN: return PE__CONTAINER_AGENT_PODMAN_S;
         default: // PE__CONTAINER_AGENT_UNKNOWN
             break;
     }
     return PE__CONTAINER_AGENT_UNKNOWN_S;
 }
 
 static void
 bundle_print_xml(pe_resource_t *rsc, const char *pre_text, long options,
                  void *print_data)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
     char *child_text = NULL;
     CRM_CHECK(rsc != NULL, return);
 
     if (pre_text == NULL) {
         pre_text = "";
     }
     child_text = crm_strdup_printf("%s        ", pre_text);
 
     get_bundle_variant_data(bundle_data, rsc);
 
     status_print("%s<bundle ", pre_text);
     status_print("id=\"%s\" ", rsc->id);
     status_print("type=\"%s\" ", container_agent_str(bundle_data->agent_type));
     status_print("image=\"%s\" ", bundle_data->image);
     status_print("unique=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_unique));
     status_print("managed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_managed));
     status_print("failed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_failed));
     status_print(">\n");
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         CRM_ASSERT(replica);
         status_print("%s    <replica id=\"%d\">\n", pre_text, replica->offset);
         print_rsc_in_list(replica->ip, child_text, options, print_data);
         print_rsc_in_list(replica->child, child_text, options, print_data);
         print_rsc_in_list(replica->container, child_text, options, print_data);
         print_rsc_in_list(replica->remote, child_text, options, print_data);
         status_print("%s    </replica>\n", pre_text);
     }
     status_print("%s</bundle>\n", pre_text);
     free(child_text);
 }
 
 PCMK__OUTPUT_ARGS("bundle", "unsigned int", "pe_resource_t *", "GList *", "GList *")
 int
 pe__bundle_xml(pcmk__output_t *out, va_list args)
 {
     unsigned int options = va_arg(args, unsigned int);
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     GList *only_node = va_arg(args, GList *);
     GList *only_rsc = va_arg(args, GList *);
 
     pe__bundle_variant_data_t *bundle_data = NULL;
     int rc = pcmk_rc_no_output;
     gboolean printed_header = FALSE;
     gboolean print_everything = TRUE;
 
     CRM_ASSERT(rsc != NULL);
 
     get_bundle_variant_data(bundle_data, rsc);
 
     if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
         return rc;
     }
 
     print_everything = pcmk__str_in_list(only_rsc, rsc->id);
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
         char *id = crm_itoa(replica->offset);
         gboolean print_ip, print_child, print_ctnr, print_remote;
 
         CRM_ASSERT(replica);
 
         if (pcmk__rsc_filtered_by_node(replica->container, only_node)) {
             continue;
         }
 
         print_ip = replica->ip != NULL &&
                    !replica->ip->fns->is_filtered(replica->ip, only_rsc, print_everything);
         print_child = replica->child != NULL &&
                       !replica->child->fns->is_filtered(replica->child, only_rsc, print_everything);
         print_ctnr = !replica->container->fns->is_filtered(replica->container, only_rsc, print_everything);
         print_remote = replica->remote != NULL &&
                        !replica->remote->fns->is_filtered(replica->remote, only_rsc, print_everything);
 
         if (!print_everything && !print_ip && !print_child && !print_ctnr && !print_remote) {
             continue;
         }
 
         if (!printed_header) {
             printed_header = TRUE;
 
             rc = pe__name_and_nvpairs_xml(out, true, "bundle", 6,
                      "id", rsc->id,
                      "type", container_agent_str(bundle_data->agent_type),
                      "image", bundle_data->image,
                      "unique", pe__rsc_bool_str(rsc, pe_rsc_unique),
                      "managed", pe__rsc_bool_str(rsc, pe_rsc_managed),
                      "failed", pe__rsc_bool_str(rsc, pe_rsc_failed));
             CRM_ASSERT(rc == pcmk_rc_ok);
         }
 
         rc = pe__name_and_nvpairs_xml(out, true, "replica", 1, "id", id);
         free(id);
         CRM_ASSERT(rc == pcmk_rc_ok);
 
         if (print_ip) {
             out->message(out, crm_map_element_name(replica->ip->xml), options,
                          replica->ip, only_node, only_rsc);
         }
 
         if (print_child) {
             out->message(out, crm_map_element_name(replica->child->xml), options,
                          replica->child, only_node, only_rsc);
         }
 
         if (print_ctnr) {
             out->message(out, crm_map_element_name(replica->container->xml), options,
                          replica->container, only_node, only_rsc);
         }
 
         if (print_remote) {
             out->message(out, crm_map_element_name(replica->remote->xml), options,
                          replica->remote, only_node, only_rsc);
         }
 
         pcmk__output_xml_pop_parent(out); // replica
     }
 
     if (printed_header) {
         pcmk__output_xml_pop_parent(out); // bundle
     }
 
     return rc;
 }
 
 static void
 pe__bundle_replica_output_html(pcmk__output_t *out, pe__bundle_replica_t *replica,
                                pe_node_t *node, long options)
 {
     pe_resource_t *rsc = replica->child;
 
     int offset = 0;
     char buffer[LINE_MAX];
 
     if(rsc == NULL) {
         rsc = replica->container;
     }
 
     if (replica->remote) {
         offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
                            rsc_printable_id(replica->remote));
     } else {
         offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
                            rsc_printable_id(replica->container));
     }
     if (replica->ipaddr) {
         offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)",
                            replica->ipaddr);
     }
 
     pe__common_output_html(out, rsc, buffer, node, options);
 }
 
 PCMK__OUTPUT_ARGS("bundle", "unsigned int", "pe_resource_t *", "GList *", "GList *")
 int
 pe__bundle_html(pcmk__output_t *out, va_list args)
 {
     unsigned int options = va_arg(args, unsigned int);
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     GList *only_node = va_arg(args, GList *);
     GList *only_rsc = va_arg(args, GList *);
 
     pe__bundle_variant_data_t *bundle_data = NULL;
     char buffer[LINE_MAX];
     int rc = pcmk_rc_no_output;
     gboolean print_everything = TRUE;
 
     CRM_ASSERT(rsc != NULL);
 
     get_bundle_variant_data(bundle_data, rsc);
 
     if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
         return rc;
     }
 
     print_everything = pcmk__str_in_list(only_rsc, rsc->id);
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
         gboolean print_ip, print_child, print_ctnr, print_remote;
 
         CRM_ASSERT(replica);
 
         if (pcmk__rsc_filtered_by_node(replica->container, only_node)) {
             continue;
         }
 
         print_ip = replica->ip != NULL &&
                    !replica->ip->fns->is_filtered(replica->ip, only_rsc, print_everything);
         print_child = replica->child != NULL &&
                       !replica->child->fns->is_filtered(replica->child, only_rsc, print_everything);
         print_ctnr = !replica->container->fns->is_filtered(replica->container, only_rsc, print_everything);
         print_remote = replica->remote != NULL &&
                        !replica->remote->fns->is_filtered(replica->remote, only_rsc, print_everything);
 
         if (pcmk_is_set(options, pe_print_implicit) ||
             (print_everything == FALSE && (print_ip || print_child || print_ctnr || print_remote))) {
             /* The text output messages used below require pe_print_implicit to
              * be set to do anything.
              */
             unsigned int new_options = options;
 
             if (!pcmk_is_set(options, pe_print_implicit)) {
                 new_options |= pe_print_implicit;
             }
 
             if (rc == pcmk_rc_no_output) {
                 pcmk__output_create_xml_node(out, "br", NULL);
             }
 
             PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s",
                                      (bundle_data->nreplicas > 1)? " set" : "",
                                      rsc->id, bundle_data->image,
                                      pcmk_is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
                                      pcmk_is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)");
 
             pcmk__output_xml_create_parent(out, "li", NULL);
 
             if (pcmk__list_of_multiple(bundle_data->replicas)) {
                 snprintf(buffer, LINE_MAX, " Replica[%d]", replica->offset);
                 xmlNodeSetContent(pcmk__output_xml_peek_parent(out), (pcmkXmlStr) buffer);
             }
             pcmk__output_create_xml_node(out, "br", NULL);
             out->begin_list(out, NULL, NULL, NULL);
 
             if (print_ip) {
                 out->message(out, crm_map_element_name(replica->ip->xml),
                              new_options, replica->ip, only_node, only_rsc);
             }
 
             if (print_child) {
                 out->message(out, crm_map_element_name(replica->child->xml),
                              new_options, replica->child, only_node, only_rsc);
             }
 
             if (print_ctnr) {
                 out->message(out, crm_map_element_name(replica->container->xml),
                              new_options, replica->container, only_node, only_rsc);
             }
 
             if (print_remote) {
                 out->message(out, crm_map_element_name(replica->remote->xml),
                              new_options, replica->remote, only_node, only_rsc);
             }
 
             out->end_list(out);
         } else if (print_everything == FALSE && !(print_ip || print_child || print_ctnr || print_remote)) {
             continue;
         } else {
             PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s",
                                      (bundle_data->nreplicas > 1)? " set" : "",
                                      rsc->id, bundle_data->image,
                                      pcmk_is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
                                      pcmk_is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)");
 
             pe__bundle_replica_output_html(out, replica, pe__current_node(replica->container),
                                            options);
         }
 
         pcmk__output_xml_pop_parent(out);
     }
 
     PCMK__OUTPUT_LIST_FOOTER(out, rc);
     return rc;
 }
 
 static void
 pe__bundle_replica_output_text(pcmk__output_t *out, pe__bundle_replica_t *replica,
                                pe_node_t *node, long options)
 {
     pe_resource_t *rsc = replica->child;
 
     int offset = 0;
     char buffer[LINE_MAX];
 
     if(rsc == NULL) {
         rsc = replica->container;
     }
 
     if (replica->remote) {
         offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
                            rsc_printable_id(replica->remote));
     } else {
         offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
                            rsc_printable_id(replica->container));
     }
     if (replica->ipaddr) {
         offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)",
                            replica->ipaddr);
     }
 
     pe__common_output_text(out, rsc, buffer, node, options);
 }
 
 PCMK__OUTPUT_ARGS("bundle", "unsigned int", "pe_resource_t *", "GList *", "GList *")
 int
 pe__bundle_text(pcmk__output_t *out, va_list args)
 {
     unsigned int options = va_arg(args, unsigned int);
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     GList *only_node = va_arg(args, GList *);
     GList *only_rsc = va_arg(args, GList *);
 
     pe__bundle_variant_data_t *bundle_data = NULL;
     int rc = pcmk_rc_no_output;
     gboolean print_everything = TRUE;
 
     get_bundle_variant_data(bundle_data, rsc);
 
     CRM_ASSERT(rsc != NULL);
 
     if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
         return rc;
     }
 
     print_everything = pcmk__str_in_list(only_rsc, rsc->id);
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
         gboolean print_ip, print_child, print_ctnr, print_remote;
 
         CRM_ASSERT(replica);
 
         if (pcmk__rsc_filtered_by_node(replica->container, only_node)) {
             continue;
         }
 
         print_ip = replica->ip != NULL &&
                    !replica->ip->fns->is_filtered(replica->ip, only_rsc, print_everything);
         print_child = replica->child != NULL &&
                       !replica->child->fns->is_filtered(replica->child, only_rsc, print_everything);
         print_ctnr = !replica->container->fns->is_filtered(replica->container, only_rsc, print_everything);
         print_remote = replica->remote != NULL &&
                        !replica->remote->fns->is_filtered(replica->remote, only_rsc, print_everything);
 
         if (pcmk_is_set(options, pe_print_implicit) ||
             (print_everything == FALSE && (print_ip || print_child || print_ctnr || print_remote))) {
             /* The text output messages used below require pe_print_implicit to
              * be set to do anything.
              */
             unsigned int new_options = options;
 
             if (!pcmk_is_set(options, pe_print_implicit)) {
                 new_options |= pe_print_implicit;
             }
             PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s",
                                      (bundle_data->nreplicas > 1)? " set" : "",
                                      rsc->id, bundle_data->image,
                                      pcmk_is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
                                      pcmk_is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)");
 
             if (pcmk__list_of_multiple(bundle_data->replicas)) {
                 out->list_item(out, NULL, "Replica[%d]", replica->offset);
             }
 
             out->begin_list(out, NULL, NULL, NULL);
 
             if (print_ip) {
                 out->message(out, crm_map_element_name(replica->ip->xml),
                              new_options, replica->ip, only_node, only_rsc);
             }
 
             if (print_child) {
                 out->message(out, crm_map_element_name(replica->child->xml),
                              new_options, replica->child, only_node, only_rsc);
             }
 
             if (print_ctnr) {
                 out->message(out, crm_map_element_name(replica->container->xml),
                              new_options, replica->container, only_node, only_rsc);
             }
 
             if (print_remote) {
                 out->message(out, crm_map_element_name(replica->remote->xml),
                              new_options, replica->remote, only_node, only_rsc);
             }
 
             out->end_list(out);
         } else if (print_everything == FALSE && !(print_ip || print_child || print_ctnr || print_remote)) {
             continue;
         } else {
             PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s",
                                      (bundle_data->nreplicas > 1)? " set" : "",
                                      rsc->id, bundle_data->image,
                                      pcmk_is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
                                      pcmk_is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)");
 
             pe__bundle_replica_output_text(out, replica, pe__current_node(replica->container),
                                            options);
         }
     }
 
     PCMK__OUTPUT_LIST_FOOTER(out, rc);
     return rc;
 }
 
 static void
 print_bundle_replica(pe__bundle_replica_t *replica, const char *pre_text,
                      long options, void *print_data)
 {
     pe_node_t *node = NULL;
     pe_resource_t *rsc = replica->child;
 
     int offset = 0;
     char buffer[LINE_MAX];
 
     if(rsc == NULL) {
         rsc = replica->container;
     }
 
     if (replica->remote) {
         offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
                            rsc_printable_id(replica->remote));
     } else {
         offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
                            rsc_printable_id(replica->container));
     }
     if (replica->ipaddr) {
         offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)",
                            replica->ipaddr);
     }
 
     node = pe__current_node(replica->container);
     common_print(rsc, pre_text, buffer, node, options, print_data);
 }
 
 void
 pe__print_bundle(pe_resource_t *rsc, const char *pre_text, long options,
                  void *print_data)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
     char *child_text = NULL;
     CRM_CHECK(rsc != NULL, return);
 
     if (options & pe_print_xml) {
         bundle_print_xml(rsc, pre_text, options, print_data);
         return;
     }
 
     get_bundle_variant_data(bundle_data, rsc);
 
     if (pre_text == NULL) {
         pre_text = " ";
     }
 
     status_print("%sContainer bundle%s: %s [%s]%s%s\n",
                  pre_text, ((bundle_data->nreplicas > 1)? " set" : ""),
                  rsc->id, bundle_data->image,
                  pcmk_is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
                  pcmk_is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)");
     if (options & pe_print_html) {
         status_print("<br />\n<ul>\n");
     }
 
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         CRM_ASSERT(replica);
         if (options & pe_print_html) {
             status_print("<li>");
         }
 
         if (pcmk_is_set(options, pe_print_implicit)) {
             child_text = crm_strdup_printf("     %s", pre_text);
             if (pcmk__list_of_multiple(bundle_data->replicas)) {
                 status_print("  %sReplica[%d]\n", pre_text, replica->offset);
             }
             if (options & pe_print_html) {
                 status_print("<br />\n<ul>\n");
             }
             print_rsc_in_list(replica->ip, child_text, options, print_data);
             print_rsc_in_list(replica->container, child_text, options, print_data);
             print_rsc_in_list(replica->remote, child_text, options, print_data);
             print_rsc_in_list(replica->child, child_text, options, print_data);
             if (options & pe_print_html) {
                 status_print("</ul>\n");
             }
         } else {
             child_text = crm_strdup_printf("%s  ", pre_text);
             print_bundle_replica(replica, child_text, options, print_data);
         }
         free(child_text);
 
         if (options & pe_print_html) {
             status_print("</li>\n");
         }
     }
     if (options & pe_print_html) {
         status_print("</ul>\n");
     }
 }
 
 static void
 free_bundle_replica(pe__bundle_replica_t *replica)
 {
     if (replica == NULL) {
         return;
     }
 
     if (replica->node) {
         free(replica->node);
         replica->node = NULL;
     }
 
     if (replica->ip) {
         free_xml(replica->ip->xml);
         replica->ip->xml = NULL;
         replica->ip->fns->free(replica->ip);
         replica->ip = NULL;
     }
     if (replica->container) {
         free_xml(replica->container->xml);
         replica->container->xml = NULL;
         replica->container->fns->free(replica->container);
         replica->container = NULL;
     }
     if (replica->remote) {
         free_xml(replica->remote->xml);
         replica->remote->xml = NULL;
         replica->remote->fns->free(replica->remote);
         replica->remote = NULL;
     }
     free(replica->ipaddr);
     free(replica);
 }
 
 void
 pe__free_bundle(pe_resource_t *rsc)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
     CRM_CHECK(rsc != NULL, return);
 
     get_bundle_variant_data(bundle_data, rsc);
     pe_rsc_trace(rsc, "Freeing %s", rsc->id);
 
     free(bundle_data->prefix);
     free(bundle_data->image);
     free(bundle_data->control_port);
     free(bundle_data->host_network);
     free(bundle_data->host_netmask);
     free(bundle_data->ip_range_start);
     free(bundle_data->container_network);
     free(bundle_data->launcher_options);
     free(bundle_data->container_command);
     free(bundle_data->container_host_options);
 
     g_list_free_full(bundle_data->replicas,
                      (GDestroyNotify) free_bundle_replica);
     g_list_free_full(bundle_data->mounts, (GDestroyNotify)mount_free);
     g_list_free_full(bundle_data->ports, (GDestroyNotify)port_free);
     g_list_free(rsc->children);
 
     if(bundle_data->child) {
         free_xml(bundle_data->child->xml);
         bundle_data->child->xml = NULL;
         bundle_data->child->fns->free(bundle_data->child);
     }
     common_free(rsc);
 }
 
 enum rsc_role_e
 pe__bundle_resource_state(const pe_resource_t *rsc, gboolean current)
 {
     enum rsc_role_e container_role = RSC_ROLE_UNKNOWN;
     return container_role;
 }
 
 /*!
  * \brief Get the number of configured replicas in a bundle
  *
  * \param[in] rsc  Bundle resource
  *
  * \return Number of configured replicas, or 0 on error
  */
 int
 pe_bundle_replicas(const pe_resource_t *rsc)
 {
     if ((rsc == NULL) || (rsc->variant != pe_container)) {
         return 0;
     } else {
         pe__bundle_variant_data_t *bundle_data = NULL;
 
         get_bundle_variant_data(bundle_data, rsc);
         return bundle_data->nreplicas;
     }
 }
 
 void
 pe__count_bundle(pe_resource_t *rsc)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
 
     get_bundle_variant_data(bundle_data, rsc);
     for (GList *item = bundle_data->replicas; item != NULL; item = item->next) {
         pe__bundle_replica_t *replica = item->data;
 
         if (replica->ip) {
             replica->ip->fns->count(replica->ip);
         }
         if (replica->child) {
             replica->child->fns->count(replica->child);
         }
         if (replica->container) {
             replica->container->fns->count(replica->container);
         }
         if (replica->remote) {
             replica->remote->fns->count(replica->remote);
         }
     }
 }
 
 gboolean
 pe__bundle_is_filtered(pe_resource_t *rsc, GListPtr only_rsc, gboolean check_parent)
 {
     gboolean passes = FALSE;
     pe__bundle_variant_data_t *bundle_data = NULL;
 
     if (pcmk__str_in_list(only_rsc, rsc_printable_id(rsc))) {
         passes = TRUE;
     } else {
         get_bundle_variant_data(bundle_data, rsc);
 
         for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) {
             pe__bundle_replica_t *replica = gIter->data;
 
             if (replica->ip != NULL && !replica->ip->fns->is_filtered(replica->ip, only_rsc, FALSE)) {
                 passes = TRUE;
                 break;
             } else if (replica->child != NULL && !replica->child->fns->is_filtered(replica->child, only_rsc, FALSE)) {
                 passes = TRUE;
                 break;
             } else if (!replica->container->fns->is_filtered(replica->container, only_rsc, FALSE)) {
                 passes = TRUE;
                 break;
             } else if (replica->remote != NULL && !replica->remote->fns->is_filtered(replica->remote, only_rsc, FALSE)) {
                 passes = TRUE;
                 break;
             }
         }
     }
 
     return !passes;
 }
diff --git a/lib/pengine/complex.c b/lib/pengine/complex.c
index 5f484ad57b..60199c78f6 100644
--- a/lib/pengine/complex.c
+++ b/lib/pengine/complex.c
@@ -1,1044 +1,1106 @@
 /*
- * Copyright 2004-2020 the Pacemaker project contributors
+ * Copyright 2004-2021 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <crm/pengine/rules.h>
 #include <crm/pengine/internal.h>
 #include <crm/msg_xml.h>
 #include <crm/common/xml_internal.h>
 
 void populate_hash(xmlNode * nvpair_list, GHashTable * hash, const char **attrs, int attrs_length);
 
 resource_object_functions_t resource_class_functions[] = {
     {
          native_unpack,
          native_find_rsc,
          native_parameter,
          native_print,
          native_active,
          native_resource_state,
          native_location,
          native_free,
          pe__count_common,
          pe__native_is_filtered,
     },
     {
          group_unpack,
          native_find_rsc,
          native_parameter,
          group_print,
          group_active,
          group_resource_state,
          native_location,
          group_free,
          pe__count_common,
          pe__group_is_filtered,
     },
     {
          clone_unpack,
          native_find_rsc,
          native_parameter,
          clone_print,
          clone_active,
          clone_resource_state,
          native_location,
          clone_free,
          pe__count_common,
          pe__clone_is_filtered,
     },
     {
          pe__unpack_bundle,
          native_find_rsc,
          native_parameter,
          pe__print_bundle,
          pe__bundle_active,
          pe__bundle_resource_state,
          native_location,
          pe__free_bundle,
          pe__count_bundle,
          pe__bundle_is_filtered,
     }
 };
 
 static enum pe_obj_types
 get_resource_type(const char *name)
 {
     if (pcmk__str_eq(name, XML_CIB_TAG_RESOURCE, pcmk__str_casei)) {
         return pe_native;
 
     } else if (pcmk__str_eq(name, XML_CIB_TAG_GROUP, pcmk__str_casei)) {
         return pe_group;
 
     } else if (pcmk__str_eq(name, XML_CIB_TAG_INCARNATION, pcmk__str_casei)) {
         return pe_clone;
 
     } else if (pcmk__str_eq(name, XML_CIB_TAG_MASTER, pcmk__str_casei)) {
         // @COMPAT deprecated since 2.0.0
         return pe_clone;
 
     } else if (pcmk__str_eq(name, XML_CIB_TAG_CONTAINER, pcmk__str_casei)) {
         return pe_container;
     }
 
     return pe_unknown;
 }
 
 static void
 dup_attr(gpointer key, gpointer value, gpointer user_data)
 {
     add_hash_param(user_data, key, value);
 }
 
 static void
 expand_parents_fixed_nvpairs(pe_resource_t * rsc, pe_rule_eval_data_t * rule_data, GHashTable * meta_hash, pe_working_set_t * data_set)
 {
     GHashTable *parent_orig_meta = crm_str_table_new();
     pe_resource_t *p = rsc->parent;
 
     if (p == NULL) {
         return ;
     }
 
     /* Search all parent resources, get the fixed value of "meta_attributes" set only in the original xml, and stack it in the hash table. */
     /* The fixed value of the lower parent resource takes precedence and is not overwritten. */
     while(p != NULL) {
         /* A hash table for comparison is generated, including the id-ref. */
         pe__unpack_dataset_nvpairs(p->xml, XML_TAG_META_SETS,
                                rule_data, parent_orig_meta, NULL, FALSE, data_set);
         p = p->parent; 
     }
 
     /* If there is a fixed value of "meta_attributes" of the parent resource, it will be processed. */
     if (parent_orig_meta != NULL) {
         GHashTableIter iter;
         char *key = NULL;
         char *value = NULL;
 
         g_hash_table_iter_init(&iter, parent_orig_meta);
         while (g_hash_table_iter_next(&iter, (gpointer *) &key, (gpointer *) &value)) {
             /* Parameters set in the original xml of the parent resource will also try to overwrite the child resource. */
             /* Attributes that already exist in the child lease are not updated. */
             dup_attr(key, value, meta_hash);
         }
     }
 
     if (parent_orig_meta != NULL) {
         g_hash_table_destroy(parent_orig_meta);
     }
     
     return ;
 
 }
 void
 get_meta_attributes(GHashTable * meta_hash, pe_resource_t * rsc,
                     pe_node_t * node, pe_working_set_t * data_set)
 {
     pe_rsc_eval_data_t rsc_rule_data = {
         .standard = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS),
         .provider = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER),
         .agent = crm_element_value(rsc->xml, XML_EXPR_ATTR_TYPE)
     };
 
     pe_rule_eval_data_t rule_data = {
         .node_hash = NULL,
         .role = RSC_ROLE_UNKNOWN,
         .now = data_set->now,
         .match_data = NULL,
         .rsc_data = &rsc_rule_data,
         .op_data = NULL
     };
 
     if (node) {
         rule_data.node_hash = node->details->attrs;
     }
 
     for (xmlAttrPtr a = pcmk__xe_first_attr(rsc->xml); a != NULL; a = a->next) {
         const char *prop_name = (const char *) a->name;
         const char *prop_value = crm_element_value(rsc->xml, prop_name);
 
         add_hash_param(meta_hash, prop_name, prop_value);
     }
 
     pe__unpack_dataset_nvpairs(rsc->xml, XML_TAG_META_SETS, &rule_data,
                                meta_hash, NULL, FALSE, data_set);
 
     /* Set the "meta_attributes" explicitly set in the parent resource to the hash table of the child resource. */
     /* If it is already explicitly set as a child, it will not be overwritten. */
     if (rsc->parent != NULL) {
         expand_parents_fixed_nvpairs(rsc, &rule_data, meta_hash, data_set);
     }
 
     /* check the defaults */
     pe__unpack_dataset_nvpairs(data_set->rsc_defaults, XML_TAG_META_SETS,
                                &rule_data, meta_hash, NULL, FALSE, data_set);
 
     /* If there is "meta_attributes" that the parent resource has not explicitly set, set a value that is not set from rsc_default either. */
     /* The values already set up to this point will not be overwritten. */
     if (rsc->parent) {
         g_hash_table_foreach(rsc->parent->meta, dup_attr, meta_hash);
     }
 }
 
 void
 get_rsc_attributes(GHashTable * meta_hash, pe_resource_t * rsc,
                    pe_node_t * node, pe_working_set_t * data_set)
 {
     pe_rule_eval_data_t rule_data = {
         .node_hash = NULL,
         .role = RSC_ROLE_UNKNOWN,
         .now = data_set->now,
         .match_data = NULL,
         .rsc_data = NULL,
         .op_data = NULL
     };
 
     if (node) {
         rule_data.node_hash = node->details->attrs;
     }
 
     pe__unpack_dataset_nvpairs(rsc->xml, XML_TAG_ATTR_SETS, &rule_data,
                                meta_hash, NULL, FALSE, data_set);
 
     /* set anything else based on the parent */
     if (rsc->parent != NULL) {
         get_rsc_attributes(meta_hash, rsc->parent, node, data_set);
 
     } else {
         /* and finally check the defaults */
         pe__unpack_dataset_nvpairs(data_set->rsc_defaults, XML_TAG_ATTR_SETS,
                                    &rule_data, meta_hash, NULL, FALSE, data_set);
     }
 }
 
 #if ENABLE_VERSIONED_ATTRS
 void
 pe_get_versioned_attributes(xmlNode * meta_hash, pe_resource_t * rsc,
                             pe_node_t * node, pe_working_set_t * data_set)
 {
     pe_rule_eval_data_t rule_data = {
         .node_hash = (node == NULL)? NULL : node->details->attrs,
         .role = RSC_ROLE_UNKNOWN,
         .now = data_set->now,
         .match_data = NULL,
         .rsc_data = NULL,
         .op_data = NULL
     };
 
     pe_eval_versioned_attributes(data_set->input, rsc->xml, XML_TAG_ATTR_SETS,
                                  &rule_data, meta_hash, NULL);
 
     /* set anything else based on the parent */
     if (rsc->parent != NULL) {
         pe_get_versioned_attributes(meta_hash, rsc->parent, node, data_set);
 
     } else {
         /* and finally check the defaults */
         pe_eval_versioned_attributes(data_set->input, data_set->rsc_defaults,
                                      XML_TAG_ATTR_SETS, &rule_data, meta_hash,
                                      NULL);
     }
 }
 #endif
 
 static char *
 template_op_key(xmlNode * op)
 {
     const char *name = crm_element_value(op, "name");
     const char *role = crm_element_value(op, "role");
     char *key = NULL;
 
     if (pcmk__str_eq(role, RSC_ROLE_STARTED_S, pcmk__str_null_matches)
         || pcmk__str_eq(role, RSC_ROLE_SLAVE_S, pcmk__str_none)) {
         role = RSC_ROLE_UNKNOWN_S;
     }
 
     key = crm_strdup_printf("%s-%s", name, role);
     return key;
 }
 
 static gboolean
 unpack_template(xmlNode * xml_obj, xmlNode ** expanded_xml, pe_working_set_t * data_set)
 {
     xmlNode *cib_resources = NULL;
     xmlNode *template = NULL;
     xmlNode *new_xml = NULL;
     xmlNode *child_xml = NULL;
     xmlNode *rsc_ops = NULL;
     xmlNode *template_ops = NULL;
     const char *template_ref = NULL;
     const char *clone = NULL;
     const char *id = NULL;
 
     if (xml_obj == NULL) {
         pe_err("No resource object for template unpacking");
         return FALSE;
     }
 
     template_ref = crm_element_value(xml_obj, XML_CIB_TAG_RSC_TEMPLATE);
     if (template_ref == NULL) {
         return TRUE;
     }
 
     id = ID(xml_obj);
     if (id == NULL) {
         pe_err("'%s' object must have a id", crm_element_name(xml_obj));
         return FALSE;
     }
 
     if (pcmk__str_eq(template_ref, id, pcmk__str_none)) {
         pe_err("The resource object '%s' should not reference itself", id);
         return FALSE;
     }
 
     cib_resources = get_xpath_object("//"XML_CIB_TAG_RESOURCES, data_set->input, LOG_TRACE);
     if (cib_resources == NULL) {
         pe_err("No resources configured");
         return FALSE;
     }
 
     template = pcmk__xe_match(cib_resources, XML_CIB_TAG_RSC_TEMPLATE,
                               XML_ATTR_ID, template_ref);
     if (template == NULL) {
         pe_err("No template named '%s'", template_ref);
         return FALSE;
     }
 
     new_xml = copy_xml(template);
     xmlNodeSetName(new_xml, xml_obj->name);
     crm_xml_replace(new_xml, XML_ATTR_ID, id);
 
     clone = crm_element_value(xml_obj, XML_RSC_ATTR_INCARNATION);
     if(clone) {
         crm_xml_add(new_xml, XML_RSC_ATTR_INCARNATION, clone);
     }
 
     template_ops = find_xml_node(new_xml, "operations", FALSE);
 
     for (child_xml = pcmk__xe_first_child(xml_obj); child_xml != NULL;
          child_xml = pcmk__xe_next(child_xml)) {
         xmlNode *new_child = NULL;
 
         new_child = add_node_copy(new_xml, child_xml);
 
         if (pcmk__str_eq((const char *)new_child->name, "operations", pcmk__str_none)) {
             rsc_ops = new_child;
         }
     }
 
     if (template_ops && rsc_ops) {
         xmlNode *op = NULL;
         GHashTable *rsc_ops_hash = g_hash_table_new_full(crm_str_hash,
                                                          g_str_equal, free,
                                                          NULL);
 
         for (op = pcmk__xe_first_child(rsc_ops); op != NULL;
              op = pcmk__xe_next(op)) {
 
             char *key = template_op_key(op);
 
             g_hash_table_insert(rsc_ops_hash, key, op);
         }
 
         for (op = pcmk__xe_first_child(template_ops); op != NULL;
              op = pcmk__xe_next(op)) {
 
             char *key = template_op_key(op);
 
             if (g_hash_table_lookup(rsc_ops_hash, key) == NULL) {
                 add_node_copy(rsc_ops, op);
             }
 
             free(key);
         }
 
         if (rsc_ops_hash) {
             g_hash_table_destroy(rsc_ops_hash);
         }
 
         free_xml(template_ops);
     }
 
     /*free_xml(*expanded_xml); */
     *expanded_xml = new_xml;
 
     /* Disable multi-level templates for now */
     /*if(unpack_template(new_xml, expanded_xml, data_set) == FALSE) {
        free_xml(*expanded_xml);
        *expanded_xml = NULL;
 
        return FALSE;
        } */
 
     return TRUE;
 }
 
 static gboolean
 add_template_rsc(xmlNode * xml_obj, pe_working_set_t * data_set)
 {
     const char *template_ref = NULL;
     const char *id = NULL;
 
     if (xml_obj == NULL) {
         pe_err("No resource object for processing resource list of template");
         return FALSE;
     }
 
     template_ref = crm_element_value(xml_obj, XML_CIB_TAG_RSC_TEMPLATE);
     if (template_ref == NULL) {
         return TRUE;
     }
 
     id = ID(xml_obj);
     if (id == NULL) {
         pe_err("'%s' object must have a id", crm_element_name(xml_obj));
         return FALSE;
     }
 
     if (pcmk__str_eq(template_ref, id, pcmk__str_none)) {
         pe_err("The resource object '%s' should not reference itself", id);
         return FALSE;
     }
 
     if (add_tag_ref(data_set->template_rsc_sets, template_ref, id) == FALSE) {
         return FALSE;
     }
 
     return TRUE;
 }
 
 static bool
 detect_promotable(pe_resource_t *rsc)
 {
     const char *promotable = g_hash_table_lookup(rsc->meta,
                                                  XML_RSC_ATTR_PROMOTABLE);
 
     if (crm_is_true(promotable)) {
         return TRUE;
     }
 
     // @COMPAT deprecated since 2.0.0
     if (pcmk__str_eq(crm_element_name(rsc->xml), XML_CIB_TAG_MASTER, pcmk__str_casei)) {
         /* @TODO in some future version, pe_warn_once() here,
          *       then drop support in even later version
          */
         g_hash_table_insert(rsc->meta, strdup(XML_RSC_ATTR_PROMOTABLE),
                             strdup(XML_BOOLEAN_TRUE));
         return TRUE;
     }
     return FALSE;
 }
 
+static void
+free_params_table(gpointer data)
+{
+    g_hash_table_destroy((GHashTable *) data);
+}
+
+/*!
+ * \brief Get a table of resource parameters
+ *
+ * \param[in] rsc       Resource to query
+ * \param[in] node      Node for evaluating rules (NULL for defaults)
+ * \param[in] data_set  Cluster working set
+ *
+ * \return Hash table containing resource parameter names and values
+ *         (or NULL if \p rsc or \p data_set is NULL)
+ * \note The returned table will be destroyed when the resource is freed, so
+ *       callers should not destroy it.
+ */
+GHashTable *
+pe_rsc_params(pe_resource_t *rsc, pe_node_t *node, pe_working_set_t *data_set)
+{
+    GHashTable *params_on_node = NULL;
+
+    /* A NULL node is used to request the resource's default parameters
+     * (not evaluated for node), but we always want something non-NULL
+     * as a hash table key.
+     */
+    const char *node_name = "";
+
+    // Sanity check
+    if ((rsc == NULL) || (data_set == NULL)) {
+        return NULL;
+    }
+    if ((node != NULL) && (node->details->uname != NULL)) {
+        node_name = node->details->uname;
+    }
+
+    // Find the parameter table for given node
+    if (rsc->parameter_cache == NULL) {
+        rsc->parameter_cache = g_hash_table_new_full(crm_strcase_hash,
+                                                     crm_strcase_equal, free,
+                                                     free_params_table);
+    } else {
+        params_on_node = g_hash_table_lookup(rsc->parameter_cache, node_name);
+    }
+
+    // If none exists yet, create one with parameters evaluated for node
+    if (params_on_node == NULL) {
+        params_on_node = crm_str_table_new();
+        get_rsc_attributes(params_on_node, rsc, node, data_set);
+        g_hash_table_insert(rsc->parameter_cache, strdup(node_name),
+                            params_on_node);
+    }
+    return params_on_node;
+}
+
 gboolean
 common_unpack(xmlNode * xml_obj, pe_resource_t ** rsc,
               pe_resource_t * parent, pe_working_set_t * data_set)
 {
     bool isdefault = FALSE;
     xmlNode *expanded_xml = NULL;
     xmlNode *ops = NULL;
     const char *value = NULL;
     const char *rclass = NULL; /* Look for this after any templates have been expanded */
     const char *id = crm_element_value(xml_obj, XML_ATTR_ID);
     bool guest_node = FALSE;
     bool remote_node = FALSE;
     bool has_versioned_params = FALSE;
 
     pe_rule_eval_data_t rule_data = {
         .node_hash = NULL,
         .role = RSC_ROLE_UNKNOWN,
         .now = data_set->now,
         .match_data = NULL,
         .rsc_data = NULL,
         .op_data = NULL
     };
 
     crm_log_xml_trace(xml_obj, "Processing resource input...");
 
     if (id == NULL) {
         pe_err("Must specify id tag in <resource>");
         return FALSE;
 
     } else if (rsc == NULL) {
         pe_err("Nowhere to unpack resource into");
         return FALSE;
 
     }
 
     if (unpack_template(xml_obj, &expanded_xml, data_set) == FALSE) {
         return FALSE;
     }
 
     *rsc = calloc(1, sizeof(pe_resource_t));
     (*rsc)->cluster = data_set;
 
     if (expanded_xml) {
         crm_log_xml_trace(expanded_xml, "Expanded resource...");
         (*rsc)->xml = expanded_xml;
         (*rsc)->orig_xml = xml_obj;
 
     } else {
         (*rsc)->xml = xml_obj;
         (*rsc)->orig_xml = NULL;
     }
 
     /* Do not use xml_obj from here on, use (*rsc)->xml in case templates are involved */
     rclass = crm_element_value((*rsc)->xml, XML_AGENT_ATTR_CLASS);
     (*rsc)->parent = parent;
 
     ops = find_xml_node((*rsc)->xml, "operations", FALSE);
     (*rsc)->ops_xml = expand_idref(ops, data_set->input);
 
     (*rsc)->variant = get_resource_type(crm_element_name((*rsc)->xml));
     if ((*rsc)->variant == pe_unknown) {
         pe_err("Unknown resource type: %s", crm_element_name((*rsc)->xml));
         free(*rsc);
         return FALSE;
     }
 
-    (*rsc)->parameters = crm_str_table_new();
-
 #if ENABLE_VERSIONED_ATTRS
     (*rsc)->versioned_parameters = create_xml_node(NULL, XML_TAG_RSC_VER_ATTRS);
 #endif
 
     (*rsc)->meta = crm_str_table_new();
 
     (*rsc)->allowed_nodes =
         g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, free);
 
     (*rsc)->known_on = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL,
                                              free);
 
     value = crm_element_value((*rsc)->xml, XML_RSC_ATTR_INCARNATION);
     if (value) {
         (*rsc)->id = crm_strdup_printf("%s:%s", id, value);
         add_hash_param((*rsc)->meta, XML_RSC_ATTR_INCARNATION, value);
 
     } else {
         (*rsc)->id = strdup(id);
     }
 
     (*rsc)->fns = &resource_class_functions[(*rsc)->variant];
     pe_rsc_trace((*rsc), "Unpacking resource...");
 
     get_meta_attributes((*rsc)->meta, *rsc, NULL, data_set);
-    get_rsc_attributes((*rsc)->parameters, *rsc, NULL, data_set);
+    (*rsc)->parameters = pe_rsc_params(*rsc, NULL, data_set); // \deprecated
 #if ENABLE_VERSIONED_ATTRS
     pe_get_versioned_attributes((*rsc)->versioned_parameters, *rsc, NULL, data_set);
 #endif
 
     (*rsc)->flags = 0;
     pe__set_resource_flags(*rsc, pe_rsc_runnable|pe_rsc_provisional);
 
     if (!pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)) {
         pe__set_resource_flags(*rsc, pe_rsc_managed);
     }
 
     (*rsc)->rsc_cons = NULL;
     (*rsc)->rsc_tickets = NULL;
     (*rsc)->actions = NULL;
     (*rsc)->role = RSC_ROLE_STOPPED;
     (*rsc)->next_role = RSC_ROLE_UNKNOWN;
 
     (*rsc)->recovery_type = recovery_stop_start;
     (*rsc)->stickiness = 0;
     (*rsc)->migration_threshold = INFINITY;
     (*rsc)->failure_timeout = 0;
 
     value = g_hash_table_lookup((*rsc)->meta, XML_CIB_ATTR_PRIORITY);
     (*rsc)->priority = crm_parse_int(value, "0");
 
     value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_NOTIFY);
     if (crm_is_true(value)) {
         pe__set_resource_flags(*rsc, pe_rsc_notify);
     }
 
     if (xml_contains_remote_node((*rsc)->xml)) {
         (*rsc)->is_remote_node = TRUE;
         if (g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_CONTAINER)) {
             guest_node = TRUE;
         } else {
             remote_node = TRUE;
         }
     }
 
     value = g_hash_table_lookup((*rsc)->meta, XML_OP_ATTR_ALLOW_MIGRATE);
 #if ENABLE_VERSIONED_ATTRS
     has_versioned_params = xml_has_children((*rsc)->versioned_parameters);
 #endif
     if (crm_is_true(value) && has_versioned_params) {
         pe_rsc_trace((*rsc), "Migration is disabled for resources with versioned parameters");
     } else if (crm_is_true(value)) {
         pe__set_resource_flags(*rsc, pe_rsc_allow_migrate);
     } else if ((value == NULL) && remote_node && !has_versioned_params) {
         /* By default, we want remote nodes to be able
          * to float around the cluster without having to stop all the
          * resources within the remote-node before moving. Allowing
          * migration support enables this feature. If this ever causes
          * problems, migration support can be explicitly turned off with
          * allow-migrate=false.
          * We don't support migration for versioned resources, though. */
         pe__set_resource_flags(*rsc, pe_rsc_allow_migrate);
     }
 
     value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_MANAGED);
     if (value != NULL && !pcmk__str_eq("default", value, pcmk__str_casei)) {
         if (crm_is_true(value)) {
             pe__set_resource_flags(*rsc, pe_rsc_managed);
         } else {
             pe__clear_resource_flags(*rsc, pe_rsc_managed);
         }
     }
 
     value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_MAINTENANCE);
     if (crm_is_true(value)) {
         pe__clear_resource_flags(*rsc, pe_rsc_managed);
         pe__set_resource_flags(*rsc, pe_rsc_maintenance);
     }
     if (pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)) {
         pe__clear_resource_flags(*rsc, pe_rsc_managed);
         pe__set_resource_flags(*rsc, pe_rsc_maintenance);
     }
 
     if (pe_rsc_is_clone(uber_parent(*rsc))) {
         value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_UNIQUE);
         if (crm_is_true(value)) {
             pe__set_resource_flags(*rsc, pe_rsc_unique);
         }
         if (detect_promotable(*rsc)) {
             pe__set_resource_flags(*rsc, pe_rsc_promotable);
         }
     } else {
         pe__set_resource_flags(*rsc, pe_rsc_unique);
     }
 
     pe_rsc_trace((*rsc), "Options for %s", (*rsc)->id);
 
     value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_RESTART);
     if (pcmk__str_eq(value, "restart", pcmk__str_casei)) {
         (*rsc)->restart_type = pe_restart_restart;
         pe_rsc_trace((*rsc), "\tDependency restart handling: restart");
         pe_warn_once(pe_wo_restart_type,
                      "Support for restart-type is deprecated and will be removed in a future release");
 
     } else {
         (*rsc)->restart_type = pe_restart_ignore;
         pe_rsc_trace((*rsc), "\tDependency restart handling: ignore");
     }
 
     value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_MULTIPLE);
     if (pcmk__str_eq(value, "stop_only", pcmk__str_casei)) {
         (*rsc)->recovery_type = recovery_stop_only;
         pe_rsc_trace((*rsc), "\tMultiple running resource recovery: stop only");
 
     } else if (pcmk__str_eq(value, "block", pcmk__str_casei)) {
         (*rsc)->recovery_type = recovery_block;
         pe_rsc_trace((*rsc), "\tMultiple running resource recovery: block");
 
     } else {
         (*rsc)->recovery_type = recovery_stop_start;
         pe_rsc_trace((*rsc), "\tMultiple running resource recovery: stop/start");
     }
 
     value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_STICKINESS);
     if (value != NULL && !pcmk__str_eq("default", value, pcmk__str_casei)) {
         (*rsc)->stickiness = char2score(value);
     }
 
     value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_FAIL_STICKINESS);
     if (value != NULL && !pcmk__str_eq("default", value, pcmk__str_casei)) {
         (*rsc)->migration_threshold = char2score(value);
         if ((*rsc)->migration_threshold < 0) {
             /* @TODO We use 1 here to preserve previous behavior, but this
              * should probably use the default (INFINITY) or 0 (to disable)
              * instead.
              */
             pe_warn_once(pe_wo_neg_threshold,
                          XML_RSC_ATTR_FAIL_STICKINESS
                          " must be non-negative, using 1 instead");
             (*rsc)->migration_threshold = 1;
         }
     }
 
     if (pcmk__str_eq(rclass, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) {
         pe__set_working_set_flags(data_set, pe_flag_have_stonith_resource);
         pe__set_resource_flags(*rsc, pe_rsc_fence_device);
     }
 
     value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_REQUIRES);
 
   handle_requires_pref:
     if (pcmk__str_eq(value, "nothing", pcmk__str_casei)) {
 
     } else if (pcmk__str_eq(value, "quorum", pcmk__str_casei)) {
         pe__set_resource_flags(*rsc, pe_rsc_needs_quorum);
 
     } else if (pcmk__str_eq(value, "unfencing", pcmk__str_casei)) {
         if (pcmk_is_set((*rsc)->flags, pe_rsc_fence_device)) {
             pcmk__config_warn("Resetting '" XML_RSC_ATTR_REQUIRES "' for %s "
                               "to 'quorum' because fencing devices cannot "
                               "require unfencing", (*rsc)->id);
             value = "quorum";
             isdefault = TRUE;
             goto handle_requires_pref;
 
         } else if (!pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
             pcmk__config_warn("Resetting '" XML_RSC_ATTR_REQUIRES "' for %s "
                               "to 'quorum' because fencing is disabled",
                               (*rsc)->id);
             value = "quorum";
             isdefault = TRUE;
             goto handle_requires_pref;
 
         } else {
             pe__set_resource_flags(*rsc, pe_rsc_needs_fencing
                                            |pe_rsc_needs_unfencing);
         }
 
     } else if (pcmk__str_eq(value, "fencing", pcmk__str_casei)) {
         pe__set_resource_flags(*rsc, pe_rsc_needs_fencing);
         if (!pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
             pcmk__config_warn("%s requires fencing but fencing is disabled",
                               (*rsc)->id);
         }
 
     } else {
         const char *orig_value = value;
 
         isdefault = TRUE;
         if (pcmk_is_set((*rsc)->flags, pe_rsc_fence_device)) {
             value = "quorum";
 
         } else if (((*rsc)->variant == pe_native)
                    && pcmk__str_eq(crm_element_value((*rsc)->xml, XML_AGENT_ATTR_CLASS), PCMK_RESOURCE_CLASS_OCF, pcmk__str_casei)
                    && pcmk__str_eq(crm_element_value((*rsc)->xml, XML_AGENT_ATTR_PROVIDER), "pacemaker", pcmk__str_casei)
                    && pcmk__str_eq(crm_element_value((*rsc)->xml, XML_ATTR_TYPE), "remote", pcmk__str_casei)
             ) {
             value = "quorum";
 
         } else if (pcmk_is_set(data_set->flags, pe_flag_enable_unfencing)) {
             value = "unfencing";
 
         } else if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
             value = "fencing";
 
         } else if (data_set->no_quorum_policy == no_quorum_ignore) {
             value = "nothing";
 
         } else {
             value = "quorum";
         }
 
         if (orig_value != NULL) {
             pcmk__config_err("Resetting '" XML_RSC_ATTR_REQUIRES "' for %s "
                              "to '%s' because '%s' is not valid",
                               (*rsc)->id, value, orig_value);
         }
 
         goto handle_requires_pref;
     }
 
     pe_rsc_trace((*rsc), "\tRequired to start: %s%s", value, isdefault?" (default)":"");
     value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_FAIL_TIMEOUT);
     if (value != NULL) {
         // Stored as seconds
         (*rsc)->failure_timeout = (int) (crm_parse_interval_spec(value) / 1000);
     }
 
     if (remote_node) {
-        value = g_hash_table_lookup((*rsc)->parameters, XML_REMOTE_ATTR_RECONNECT_INTERVAL);
+        GHashTable *params = pe_rsc_params(*rsc, NULL, data_set);
+
+        /* Grabbing the value now means that any rules based on node attributes
+         * will evaluate to false, so such rules should not be used with
+         * reconnect_interval.
+         *
+         * @TODO Evaluate per node before using
+         */
+        value = g_hash_table_lookup(params, XML_REMOTE_ATTR_RECONNECT_INTERVAL);
         if (value) {
             /* reconnect delay works by setting failure_timeout and preventing the
              * connection from starting until the failure is cleared. */
             (*rsc)->remote_reconnect_ms = crm_parse_interval_spec(value);
             /* we want to override any default failure_timeout in use when remote
              * reconnect_interval is in use. */ 
             (*rsc)->failure_timeout = (*rsc)->remote_reconnect_ms / 1000;
         }
     }
 
     get_target_role(*rsc, &((*rsc)->next_role));
     pe_rsc_trace((*rsc), "\tDesired next state: %s",
                  (*rsc)->next_role != RSC_ROLE_UNKNOWN ? role2text((*rsc)->next_role) : "default");
 
     if ((*rsc)->fns->unpack(*rsc, data_set) == FALSE) {
         return FALSE;
     }
 
     if (pcmk_is_set(data_set->flags, pe_flag_symmetric_cluster)) {
         // This tag must stay exactly the same because it is tested elsewhere
         resource_location(*rsc, NULL, 0, "symmetric_default", data_set);
     } else if (guest_node) {
         /* remote resources tied to a container resource must always be allowed
          * to opt-in to the cluster. Whether the connection resource is actually
          * allowed to be placed on a node is dependent on the container resource */
         resource_location(*rsc, NULL, 0, "remote_connection_default", data_set);
     }
 
     pe_rsc_trace((*rsc), "\tAction notification: %s",
                  pcmk_is_set((*rsc)->flags, pe_rsc_notify)? "required" : "not required");
 
     (*rsc)->utilization = crm_str_table_new();
 
     pe__unpack_dataset_nvpairs((*rsc)->xml, XML_TAG_UTILIZATION, &rule_data,
                                (*rsc)->utilization, NULL, FALSE, data_set);
 
 /* 	data_set->resources = g_list_append(data_set->resources, (*rsc)); */
 
     if (expanded_xml) {
         if (add_template_rsc(xml_obj, data_set) == FALSE) {
             return FALSE;
         }
     }
     return TRUE;
 }
 
 void
 common_update_score(pe_resource_t * rsc, const char *id, int score)
 {
     pe_node_t *node = NULL;
 
     node = pe_hash_table_lookup(rsc->allowed_nodes, id);
     if (node != NULL) {
         pe_rsc_trace(rsc, "Updating score for %s on %s: %d + %d", rsc->id, id, node->weight, score);
         node->weight = pe__add_scores(node->weight, score);
     }
 
     if (rsc->children) {
         GListPtr gIter = rsc->children;
 
         for (; gIter != NULL; gIter = gIter->next) {
             pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
             common_update_score(child_rsc, id, score);
         }
     }
 }
 
 gboolean
 is_parent(pe_resource_t *child, pe_resource_t *rsc)
 {
     pe_resource_t *parent = child;
 
     if (parent == NULL || rsc == NULL) {
         return FALSE;
     }
     while (parent->parent != NULL) {
         if (parent->parent == rsc) {
             return TRUE;
         }
         parent = parent->parent;
     }
     return FALSE;
 }
 
 pe_resource_t *
 uber_parent(pe_resource_t * rsc)
 {
     pe_resource_t *parent = rsc;
 
     if (parent == NULL) {
         return NULL;
     }
     while (parent->parent != NULL && parent->parent->variant != pe_container) {
         parent = parent->parent;
     }
     return parent;
 }
 
 void
 common_free(pe_resource_t * rsc)
 {
     if (rsc == NULL) {
         return;
     }
 
     pe_rsc_trace(rsc, "Freeing %s %d", rsc->id, rsc->variant);
 
     g_list_free(rsc->rsc_cons);
     g_list_free(rsc->rsc_cons_lhs);
     g_list_free(rsc->rsc_tickets);
     g_list_free(rsc->dangling_migrations);
 
-    if (rsc->parameters != NULL) {
-        g_hash_table_destroy(rsc->parameters);
+    if (rsc->parameter_cache != NULL) {
+        g_hash_table_destroy(rsc->parameter_cache);
     }
 #if ENABLE_VERSIONED_ATTRS
     if (rsc->versioned_parameters != NULL) {
         free_xml(rsc->versioned_parameters);
     }
 #endif
     if (rsc->meta != NULL) {
         g_hash_table_destroy(rsc->meta);
     }
     if (rsc->utilization != NULL) {
         g_hash_table_destroy(rsc->utilization);
     }
 
     if ((rsc->parent == NULL) && pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
         free_xml(rsc->xml);
         rsc->xml = NULL;
         free_xml(rsc->orig_xml);
         rsc->orig_xml = NULL;
 
         /* if rsc->orig_xml, then rsc->xml is an expanded xml from a template */
     } else if (rsc->orig_xml) {
         free_xml(rsc->xml);
         rsc->xml = NULL;
     }
     if (rsc->running_on) {
         g_list_free(rsc->running_on);
         rsc->running_on = NULL;
     }
     if (rsc->known_on) {
         g_hash_table_destroy(rsc->known_on);
         rsc->known_on = NULL;
     }
     if (rsc->actions) {
         g_list_free(rsc->actions);
         rsc->actions = NULL;
     }
     if (rsc->allowed_nodes) {
         g_hash_table_destroy(rsc->allowed_nodes);
         rsc->allowed_nodes = NULL;
     }
     g_list_free(rsc->fillers);
     g_list_free(rsc->rsc_location);
     pe_rsc_trace(rsc, "Resource freed");
     free(rsc->id);
     free(rsc->clone_name);
     free(rsc->allocated_to);
     free(rsc->variant_opaque);
     free(rsc->pending_task);
     free(rsc);
 }
 
 /*!
  * \brief
  * \internal Find a node (and optionally count all) where resource is active
  *
  * \param[in]  rsc          Resource to check
  * \param[out] count_all    If not NULL, will be set to count of active nodes
  * \param[out] count_clean  If not NULL, will be set to count of clean nodes
  *
  * \return An active node (or NULL if resource is not active anywhere)
  *
  * \note The order of preference is: an active node that is the resource's
  *       partial migration source; if the resource's "requires" is "quorum" or
  *       "nothing", the first active node in the list that is clean and online;
  *       the first active node in the list.
  */
 pe_node_t *
 pe__find_active_on(const pe_resource_t *rsc, unsigned int *count_all,
                    unsigned int *count_clean)
 {
     pe_node_t *active = NULL;
     pe_node_t *node = NULL;
     bool keep_looking = FALSE;
     bool is_happy = FALSE;
 
     if (count_all) {
         *count_all = 0;
     }
     if (count_clean) {
         *count_clean = 0;
     }
     if (rsc == NULL) {
         return NULL;
     }
 
     for (GList *node_iter = rsc->running_on; node_iter != NULL;
          node_iter = node_iter->next) {
 
         node = node_iter->data;
         keep_looking = FALSE;
 
         is_happy = node->details->online && !node->details->unclean;
 
         if (count_all) {
             ++*count_all;
         }
         if (count_clean && is_happy) {
             ++*count_clean;
         }
         if (count_all || count_clean) {
             // If we're counting, we need to go through entire list
             keep_looking = TRUE;
         }
 
         if (rsc->partial_migration_source != NULL) {
             if (node->details == rsc->partial_migration_source->details) {
                 // This is the migration source
                 active = node;
             } else {
                 keep_looking = TRUE;
             }
         } else if (!pcmk_is_set(rsc->flags, pe_rsc_needs_fencing)) {
             if (is_happy && (!active || !active->details->online
                              || active->details->unclean)) {
                 // This is the first clean node
                 active = node;
             } else {
                 keep_looking = TRUE;
             }
         }
         if (active == NULL) {
             // This is first node in list
             active = node;
         }
 
         if (keep_looking == FALSE) {
             // Don't waste time iterating if we don't have to
             break;
         }
     }
     return active;
 }
 
 /*!
  * \brief
  * \internal Find and count active nodes according to "requires"
  *
  * \param[in]  rsc    Resource to check
  * \param[out] count  If not NULL, will be set to count of active nodes
  *
  * \return An active node (or NULL if resource is not active anywhere)
  *
  * \note This is a convenience wrapper for pe__find_active_on() where the count
  *       of all active nodes or only clean active nodes is desired according to
  *       the "requires" meta-attribute.
  */
 pe_node_t *
 pe__find_active_requires(const pe_resource_t *rsc, unsigned int *count)
 {
     if (rsc && !pcmk_is_set(rsc->flags, pe_rsc_needs_fencing)) {
         return pe__find_active_on(rsc, NULL, count);
     }
     return pe__find_active_on(rsc, count, NULL);
 }
 
 void
 pe__count_common(pe_resource_t *rsc)
 {
     if (rsc->children != NULL) {
         for (GList *item = rsc->children; item != NULL; item = item->next) {
             ((pe_resource_t *) item->data)->fns->count(item->data);
         }
 
     } else if (!pcmk_is_set(rsc->flags, pe_rsc_orphan)
                || (rsc->role > RSC_ROLE_STOPPED)) {
         rsc->cluster->ninstances++;
         if (pe__resource_is_disabled(rsc)) {
             rsc->cluster->disabled_resources++;
         }
         if (pcmk_is_set(rsc->flags, pe_rsc_block)) {
             rsc->cluster->blocked_resources++;
         }
     }
 }
diff --git a/lib/pengine/native.c b/lib/pengine/native.c
index 193be170aa..95c3da905b 100644
--- a/lib/pengine/native.c
+++ b/lib/pengine/native.c
@@ -1,1376 +1,1356 @@
 /*
  * Copyright 2004-2020 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <crm/pengine/rules.h>
 #include <crm/pengine/status.h>
 #include <crm/pengine/complex.h>
 #include <crm/pengine/internal.h>
 #include <crm/msg_xml.h>
 #include <pe_status_private.h>
 
 #define VARIANT_NATIVE 1
 #include "./variant.h"
 
 /*!
  * \internal
  * \brief Check whether a resource is active on multiple nodes
  */
 static bool
 is_multiply_active(pe_resource_t *rsc)
 {
     unsigned int count = 0;
 
     if (rsc->variant == pe_native) {
         pe__find_active_requires(rsc, &count);
     }
     return count > 1;
 }
 
 static void
 native_priority_to_node(pe_resource_t * rsc, pe_node_t * node)
 {
     int priority = 0;
 
     if (rsc->priority == 0) {
         return;
     }
 
     if (rsc->role == RSC_ROLE_MASTER) {
         // Promoted instance takes base priority + 1
         priority = rsc->priority + 1;
 
     } else {
         priority = rsc->priority;
     }
 
     node->details->priority += priority;
     pe_rsc_trace(rsc, "Node '%s' now has priority %d with %s'%s' (priority: %d%s)",
                  node->details->uname, node->details->priority,
                  rsc->role == RSC_ROLE_MASTER ? "promoted " : "",
                  rsc->id, rsc->priority,
                  rsc->role == RSC_ROLE_MASTER ? " + 1" : "");
 
     /* Priority of a resource running on a guest node is added to the cluster
      * node as well. */
     if (node->details->remote_rsc
         && node->details->remote_rsc->container) {
         GListPtr gIter = node->details->remote_rsc->container->running_on;
 
         for (; gIter != NULL; gIter = gIter->next) {
             pe_node_t *a_node = gIter->data;
 
             a_node->details->priority += priority;
             pe_rsc_trace(rsc, "Node '%s' now has priority %d with %s'%s' (priority: %d%s) "
                          "from guest node '%s'",
                          a_node->details->uname, a_node->details->priority,
                          rsc->role == RSC_ROLE_MASTER ? "promoted " : "",
                          rsc->id, rsc->priority,
                          rsc->role == RSC_ROLE_MASTER ? " + 1" : "",
                          node->details->uname);
         }
     }
 }
 
 void
 native_add_running(pe_resource_t * rsc, pe_node_t * node, pe_working_set_t * data_set)
 {
     GListPtr gIter = rsc->running_on;
 
     CRM_CHECK(node != NULL, return);
     for (; gIter != NULL; gIter = gIter->next) {
         pe_node_t *a_node = (pe_node_t *) gIter->data;
 
         CRM_CHECK(a_node != NULL, return);
         if (pcmk__str_eq(a_node->details->id, node->details->id, pcmk__str_casei)) {
             return;
         }
     }
 
     pe_rsc_trace(rsc, "Adding %s to %s %s", rsc->id, node->details->uname,
                  pcmk_is_set(rsc->flags, pe_rsc_managed)? "" : "(unmanaged)");
 
     rsc->running_on = g_list_append(rsc->running_on, node);
     if (rsc->variant == pe_native) {
         node->details->running_rsc = g_list_append(node->details->running_rsc, rsc);
 
         native_priority_to_node(rsc, node);
     }
 
     if (rsc->variant == pe_native && node->details->maintenance) {
         pe__clear_resource_flags(rsc, pe_rsc_managed);
     }
 
     if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
         pe_resource_t *p = rsc->parent;
 
         pe_rsc_info(rsc, "resource %s isn't managed", rsc->id);
         resource_location(rsc, node, INFINITY, "not_managed_default", data_set);
 
         while(p && node->details->online) {
             /* add without the additional location constraint */
             p->running_on = g_list_append(p->running_on, node);
             p = p->parent;
         }
         return;
     }
 
     if (is_multiply_active(rsc)) {
         switch (rsc->recovery_type) {
             case recovery_stop_only:
                 {
                     GHashTableIter gIter;
                     pe_node_t *local_node = NULL;
 
                     /* make sure it doesn't come up again */
                     if (rsc->allowed_nodes != NULL) {
                         g_hash_table_destroy(rsc->allowed_nodes);
                     }
                     rsc->allowed_nodes = pe__node_list2table(data_set->nodes);
                     g_hash_table_iter_init(&gIter, rsc->allowed_nodes);
                     while (g_hash_table_iter_next(&gIter, NULL, (void **)&local_node)) {
                         local_node->weight = -INFINITY;
                     }
                 }
                 break;
             case recovery_stop_start:
                 break;
             case recovery_block:
                 pe__clear_resource_flags(rsc, pe_rsc_managed);
                 pe__set_resource_flags(rsc, pe_rsc_block);
 
                 /* If the resource belongs to a group or bundle configured with
                  * multiple-active=block, block the entire entity.
                  */
                 if (rsc->parent
                     && (rsc->parent->variant == pe_group || rsc->parent->variant == pe_container)
                     && rsc->parent->recovery_type == recovery_block) {
                     GListPtr gIter = rsc->parent->children;
 
                     for (; gIter != NULL; gIter = gIter->next) {
                         pe_resource_t *child = (pe_resource_t *) gIter->data;
 
                         pe__clear_resource_flags(child, pe_rsc_managed);
                         pe__set_resource_flags(child, pe_rsc_block);
                     }
                 }
                 break;
         }
         crm_debug("%s is active on multiple nodes including %s: %s",
                   rsc->id, node->details->uname,
                   recovery2text(rsc->recovery_type));
 
     } else {
         pe_rsc_trace(rsc, "Resource %s is active on: %s", rsc->id, node->details->uname);
     }
 
     if (rsc->parent != NULL) {
         native_add_running(rsc->parent, node, data_set);
     }
 }
 
 static void
 recursive_clear_unique(pe_resource_t *rsc)
 {
     pe__clear_resource_flags(rsc, pe_rsc_unique);
     add_hash_param(rsc->meta, XML_RSC_ATTR_UNIQUE, XML_BOOLEAN_FALSE);
 
     for (GList *child = rsc->children; child != NULL; child = child->next) {
         recursive_clear_unique((pe_resource_t *) child->data);
     }
 }
 
 gboolean
 native_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
 {
     pe_resource_t *parent = uber_parent(rsc);
     native_variant_data_t *native_data = NULL;
     const char *standard = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
     uint32_t ra_caps = pcmk_get_ra_caps(standard);
 
     pe_rsc_trace(rsc, "Processing resource %s...", rsc->id);
 
     native_data = calloc(1, sizeof(native_variant_data_t));
     rsc->variant_opaque = native_data;
 
     // Only some agent standards support unique and promotable clones
     if (!pcmk_is_set(ra_caps, pcmk_ra_cap_unique)
         && pcmk_is_set(rsc->flags, pe_rsc_unique) && pe_rsc_is_clone(parent)) {
 
         /* @COMPAT We should probably reject this situation as an error (as we
          * do for promotable below) rather than warn and convert, but that would
          * be a backward-incompatible change that we should probably do with a
          * transform at a schema major version bump.
          */
         pe__force_anon(standard, parent, rsc->id, data_set);
 
         /* Clear globally-unique on the parent and all its descendents unpacked
          * so far (clearing the parent should make any future children unpacking
          * correct). We have to clear this resource explicitly because it isn't
          * hooked into the parent's children yet.
          */
         recursive_clear_unique(parent);
         recursive_clear_unique(rsc);
     }
     if (!pcmk_is_set(ra_caps, pcmk_ra_cap_promotable)
         && pcmk_is_set(parent->flags, pe_rsc_promotable)) {
 
         pe_err("Resource %s is of type %s and therefore "
                "cannot be used as a promotable clone resource",
                rsc->id, standard);
         return FALSE;
     }
     return TRUE;
 }
 
 static bool
 rsc_is_on_node(pe_resource_t *rsc, const pe_node_t *node, int flags)
 {
     pe_rsc_trace(rsc, "Checking whether %s is on %s",
                  rsc->id, node->details->uname);
 
     if (pcmk_is_set(flags, pe_find_current) && rsc->running_on) {
 
         for (GListPtr iter = rsc->running_on; iter; iter = iter->next) {
             pe_node_t *loc = (pe_node_t *) iter->data;
 
             if (loc->details == node->details) {
                 return TRUE;
             }
         }
 
     } else if (pcmk_is_set(flags, pe_find_inactive)
                && (rsc->running_on == NULL)) {
         return TRUE;
 
     } else if (!pcmk_is_set(flags, pe_find_current) && rsc->allocated_to
                && (rsc->allocated_to->details == node->details)) {
         return TRUE;
     }
     return FALSE;
 }
 
 pe_resource_t *
 native_find_rsc(pe_resource_t * rsc, const char *id, const pe_node_t *on_node,
                 int flags)
 {
     bool match = FALSE;
     pe_resource_t *result = NULL;
 
     CRM_CHECK(id && rsc && rsc->id, return NULL);
 
     if (flags & pe_find_clone) {
         const char *rid = ID(rsc->xml);
 
         if (!pe_rsc_is_clone(uber_parent(rsc))) {
             match = FALSE;
 
         } else if (!strcmp(id, rsc->id) || pcmk__str_eq(id, rid, pcmk__str_casei)) {
             match = TRUE;
         }
 
     } else if (!strcmp(id, rsc->id)) {
         match = TRUE;
 
     } else if (pcmk_is_set(flags, pe_find_renamed)
                && rsc->clone_name && strcmp(rsc->clone_name, id) == 0) {
         match = TRUE;
 
     } else if (pcmk_is_set(flags, pe_find_any)
                || (pcmk_is_set(flags, pe_find_anon)
                    && !pcmk_is_set(rsc->flags, pe_rsc_unique))) {
         match = pe_base_name_eq(rsc, id);
     }
 
     if (match && on_node) {
         bool match_node = rsc_is_on_node(rsc, on_node, flags);
 
         if (match_node == FALSE) {
             match = FALSE;
         }
     }
 
     if (match) {
         return rsc;
     }
 
     for (GListPtr gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child = (pe_resource_t *) gIter->data;
 
         result = rsc->fns->find_rsc(child, id, on_node, flags);
         if (result) {
             return result;
         }
     }
     return NULL;
 }
 
+// create is ignored
 char *
 native_parameter(pe_resource_t * rsc, pe_node_t * node, gboolean create, const char *name,
                  pe_working_set_t * data_set)
 {
     char *value_copy = NULL;
     const char *value = NULL;
-    GHashTable *hash = NULL;
-    GHashTable *local_hash = NULL;
+    GHashTable *params = NULL;
 
     CRM_CHECK(rsc != NULL, return NULL);
     CRM_CHECK(name != NULL && strlen(name) != 0, return NULL);
 
     pe_rsc_trace(rsc, "Looking up %s in %s", name, rsc->id);
-
-    if (create || g_hash_table_size(rsc->parameters) == 0) {
-        if (node != NULL) {
-            pe_rsc_trace(rsc, "Creating hash with node %s", node->details->uname);
-        } else {
-            pe_rsc_trace(rsc, "Creating default hash");
-        }
-
-        local_hash = crm_str_table_new();
-
-        get_rsc_attributes(local_hash, rsc, node, data_set);
-
-        hash = local_hash;
-    } else {
-        hash = rsc->parameters;
-    }
-
-    value = g_hash_table_lookup(hash, name);
+    params = pe_rsc_params(rsc, node, data_set);
+    value = g_hash_table_lookup(params, name);
     if (value == NULL) {
         /* try meta attributes instead */
         value = g_hash_table_lookup(rsc->meta, name);
     }
-
     if (value != NULL) {
         value_copy = strdup(value);
     }
-    if (local_hash != NULL) {
-        g_hash_table_destroy(local_hash);
-    }
     return value_copy;
 }
 
 gboolean
 native_active(pe_resource_t * rsc, gboolean all)
 {
     for (GList *gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
         pe_node_t *a_node = (pe_node_t *) gIter->data;
 
         if (a_node->details->unclean) {
             pe_rsc_trace(rsc, "Resource %s: node %s is unclean",
                          rsc->id, a_node->details->uname);
             return TRUE;
         } else if (a_node->details->online == FALSE) {
             pe_rsc_trace(rsc, "Resource %s: node %s is offline",
                          rsc->id, a_node->details->uname);
         } else {
             pe_rsc_trace(rsc, "Resource %s active on %s",
                          rsc->id, a_node->details->uname);
             return TRUE;
         }
     }
     return FALSE;
 }
 
 struct print_data_s {
     long options;
     void *print_data;
 };
 
 static const char *
 native_pending_state(pe_resource_t * rsc)
 {
     const char *pending_state = NULL;
 
     if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_START, pcmk__str_casei)) {
         pending_state = "Starting";
 
     } else if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_STOP, pcmk__str_casei)) {
         pending_state = "Stopping";
 
     } else if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_MIGRATE, pcmk__str_casei)) {
         pending_state = "Migrating";
 
     } else if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_MIGRATED, pcmk__str_casei)) {
        /* Work might be done in here. */
         pending_state = "Migrating";
 
     } else if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_PROMOTE, pcmk__str_casei)) {
         pending_state = "Promoting";
 
     } else if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_DEMOTE, pcmk__str_casei)) {
         pending_state = "Demoting";
     }
 
     return pending_state;
 }
 
 static const char *
 native_pending_task(pe_resource_t * rsc)
 {
     const char *pending_task = NULL;
 
     if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_STATUS, pcmk__str_casei)) {
         pending_task = "Monitoring";
 
     /* Pending probes are not printed, even if pending
      * operations are requested. If someone ever requests that
      * behavior, uncomment this and the corresponding part of
      * unpack.c:unpack_rsc_op().
      */
     /*
     } else if (pcmk__str_eq(rsc->pending_task, "probe", pcmk__str_casei)) {
         pending_task = "Checking";
     */
     }
 
     return pending_task;
 }
 
 static enum rsc_role_e
 native_displayable_role(pe_resource_t *rsc)
 {
     enum rsc_role_e role = rsc->role;
 
     if ((role == RSC_ROLE_STARTED)
         && pcmk_is_set(uber_parent(rsc)->flags, pe_rsc_promotable)) {
 
         role = RSC_ROLE_SLAVE;
     }
     return role;
 }
 
 static const char *
 native_displayable_state(pe_resource_t *rsc, long options)
 {
     const char *rsc_state = NULL;
 
     if (options & pe_print_pending) {
         rsc_state = native_pending_state(rsc);
     }
     if (rsc_state == NULL) {
         rsc_state = role2text(native_displayable_role(rsc));
     }
     return rsc_state;
 }
 
 static void
 native_print_xml(pe_resource_t * rsc, const char *pre_text, long options, void *print_data)
 {
     const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
     const char *prov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER);
     const char *rsc_state = native_displayable_state(rsc, options);
     const char *target_role = NULL;
 
     /* resource information. */
     status_print("%s<resource ", pre_text);
     status_print("id=\"%s\" ", rsc_printable_id(rsc));
     status_print("resource_agent=\"%s%s%s:%s\" ",
                  class,
                  prov ? "::" : "", prov ? prov : "", crm_element_value(rsc->xml, XML_ATTR_TYPE));
 
     status_print("role=\"%s\" ", rsc_state);
     if (rsc->meta) {
         target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
     }
     if (target_role) {
         status_print("target_role=\"%s\" ", target_role);
     }
     status_print("active=\"%s\" ", pcmk__btoa(rsc->fns->active(rsc, TRUE)));
     status_print("orphaned=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_orphan));
     status_print("blocked=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_block));
     status_print("managed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_managed));
     status_print("failed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_failed));
     status_print("failure_ignored=\"%s\" ",
                  pe__rsc_bool_str(rsc, pe_rsc_failure_ignored));
     status_print("nodes_running_on=\"%d\" ", g_list_length(rsc->running_on));
 
     if (options & pe_print_pending) {
         const char *pending_task = native_pending_task(rsc);
 
         if (pending_task) {
             status_print("pending=\"%s\" ", pending_task);
         }
     }
 
     /* print out the nodes this resource is running on */
     if (options & pe_print_rsconly) {
         status_print("/>\n");
         /* do nothing */
     } else if (rsc->running_on != NULL) {
         GListPtr gIter = rsc->running_on;
 
         status_print(">\n");
         for (; gIter != NULL; gIter = gIter->next) {
             pe_node_t *node = (pe_node_t *) gIter->data;
 
             status_print("%s    <node name=\"%s\" id=\"%s\" cached=\"%s\"/>\n", pre_text,
                          node->details->uname, node->details->id,
                          pcmk__btoa(node->details->online == FALSE));
         }
         status_print("%s</resource>\n", pre_text);
     } else {
         status_print("/>\n");
     }
 }
 
 // Append a flag to resource description string's flags list
 static bool
 add_output_flag(GString *s, const char *flag_desc, bool have_flags)
 {
     g_string_append(s, (have_flags? ", " : " ("));
     g_string_append(s, flag_desc);
     return true;
 }
 
 // Append a node name to resource description string's node list
 static bool
 add_output_node(GString *s, const char *node, bool have_nodes)
 {
     g_string_append(s, (have_nodes? " " : " [ "));
     g_string_append(s, node);
     return true;
 }
 
 /*!
  * \internal
  * \brief Create a string description of a resource
  *
  * \param[in] rsc          Resource to describe
  * \param[in] name         Desired identifier for the resource
  * \param[in] node         If not NULL, node that resource is "on"
  * \param[in] options      Bitmask of pe_print_*
  * \param[in] target_role  Resource's target role
  * \param[in] show_nodes   Whether to display nodes when multiply active
  *
  * \return Newly allocated string description of resource
  * \note Caller must free the result with g_free().
  */
 gchar *
 pcmk__native_output_string(pe_resource_t *rsc, const char *name, pe_node_t *node,
                            long options, const char *target_role, bool show_nodes)
 {
     const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
     const char *provider = NULL;
     const char *kind = crm_element_value(rsc->xml, XML_ATTR_TYPE);
     gchar *retval = NULL;
     GString *outstr = NULL;
     bool have_flags = false;
 
     if (rsc->variant != pe_native) {
         return NULL;
     }
 
     CRM_CHECK(name != NULL, name = "unknown");
     CRM_CHECK(kind != NULL, kind = "unknown");
     CRM_CHECK(class != NULL, class = "unknown");
 
     if (pcmk_is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_provider)) {
         provider = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER);
     }
 
     if ((node == NULL) && (rsc->lock_node != NULL)) {
         node = rsc->lock_node;
     }
     if (pcmk_is_set(options, pe_print_rsconly)
         || pcmk__list_of_multiple(rsc->running_on)) {
         node = NULL;
     }
 
     // We need a string of at least this size
     outstr = g_string_sized_new(strlen(name) + strlen(class) + strlen(kind)
                                 + (provider? (strlen(provider) + 2) : 0)
                                 + (node? strlen(node->details->uname) + 1 : 0)
                                 + 11);
 
     // Resource name and agent
     g_string_printf(outstr, "%s\t(%s%s%s:%s):\t", name, class,
                     /* @COMPAT This should be a single ':' (see CLBZ#5395) but
                      * to avoid breaking anything relying on it, we're keeping
                      * it like this until the next minor version bump.
                      */
                     (provider? "::" : ""), (provider? provider : ""), kind);
 
     // State on node
     if (pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
         g_string_append(outstr, " ORPHANED");
     }
     if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
         enum rsc_role_e role = native_displayable_role(rsc);
 
         if (role > RSC_ROLE_SLAVE) {
             g_string_append_printf(outstr, " FAILED %s", role2text(role));
         } else {
             g_string_append(outstr, " FAILED");
         }
     } else {
         g_string_append_printf(outstr, " %s", native_displayable_state(rsc, options));
     }
     if (node) {
         g_string_append_printf(outstr, " %s", node->details->uname);
     }
 
     // Flags, as: (<flag> [...])
     if (node && !(node->details->online) && node->details->unclean) {
         have_flags = add_output_flag(outstr, "UNCLEAN", have_flags);
     }
     if (node && (node == rsc->lock_node)) {
         have_flags = add_output_flag(outstr, "LOCKED", have_flags);
     }
     if (pcmk_is_set(options, pe_print_pending)) {
         const char *pending_task = native_pending_task(rsc);
 
         if (pending_task) {
             have_flags = add_output_flag(outstr, pending_task, have_flags);
         }
     }
     if (target_role) {
         enum rsc_role_e target_role_e = text2role(target_role);
 
         /* Only show target role if it limits our abilities (i.e. ignore
          * Started, as it is the default anyways, and doesn't prevent the
          * resource from becoming Master).
          */
         if (target_role_e == RSC_ROLE_STOPPED) {
             have_flags = add_output_flag(outstr, "disabled", have_flags);
 
         } else if (pcmk_is_set(uber_parent(rsc)->flags, pe_rsc_promotable)
                    && target_role_e == RSC_ROLE_SLAVE) {
             have_flags = add_output_flag(outstr, "target-role:", have_flags);
             g_string_append(outstr, target_role);
         }
     }
     if (pcmk_is_set(rsc->flags, pe_rsc_block)) {
         have_flags = add_output_flag(outstr, "blocked", have_flags);
     } else if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
         have_flags = add_output_flag(outstr, "unmanaged", have_flags);
     }
     if (pcmk_is_set(rsc->flags, pe_rsc_failure_ignored)) {
         have_flags = add_output_flag(outstr, "failure ignored", have_flags);
     }
     if (have_flags) {
         g_string_append(outstr, ")");
     }
 
     // User-supplied description
     if (pcmk_is_set(options, pe_print_rsconly)
         || pcmk__list_of_multiple(rsc->running_on)) {
         const char *desc = crm_element_value(rsc->xml, XML_ATTR_DESC);
 
         if (desc) {
             g_string_append_printf(outstr, " %s", desc);
         }
     }
 
     if (show_nodes && !pcmk_is_set(options, pe_print_rsconly)
         && pcmk__list_of_multiple(rsc->running_on)) {
         bool have_nodes = false;
 
         for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) {
             pe_node_t *n = (pe_node_t *) iter->data;
 
             have_nodes = add_output_node(outstr, n->details->uname, have_nodes);
         }
         if (have_nodes) {
             g_string_append(outstr, " ]");
         }
     }
 
     retval = outstr->str;
     g_string_free(outstr, FALSE);
     return retval;
 }
 
 int
 pe__common_output_html(pcmk__output_t *out, pe_resource_t * rsc,
                        const char *name, pe_node_t *node, long options)
 {
     const char *kind = crm_element_value(rsc->xml, XML_ATTR_TYPE);
     const char *target_role = NULL;
 
     xmlNodePtr list_node = NULL;
     const char *cl = NULL;
 
     CRM_ASSERT(rsc->variant == pe_native);
     CRM_ASSERT(kind != NULL);
 
     if (rsc->meta) {
         const char *is_internal = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INTERNAL_RSC);
 
         if (crm_is_true(is_internal)
             && !pcmk_is_set(options, pe_print_implicit)) {
 
             crm_trace("skipping print of internal resource %s", rsc->id);
             return pcmk_rc_no_output;
         }
         target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
     }
 
     if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
         cl = "rsc-managed";
 
     } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
         cl = "rsc-failed";
 
     } else if (rsc->variant == pe_native && (rsc->running_on == NULL)) {
         cl = "rsc-failed";
 
     } else if (pcmk__list_of_multiple(rsc->running_on)) {
         cl = "rsc-multiple";
 
     } else if (pcmk_is_set(rsc->flags, pe_rsc_failure_ignored)) {
         cl = "rsc-failure-ignored";
 
     } else {
         cl = "rsc-ok";
     }
 
     {
         gchar *s = pcmk__native_output_string(rsc, name, node, options,
                                               target_role, true);
 
         list_node = pcmk__output_create_html_node(out, "li", NULL, NULL, NULL);
         pcmk_create_html_node(list_node, "span", NULL, cl, s);
         g_free(s);
     }
 
     return pcmk_rc_ok;
 }
 
 int
 pe__common_output_text(pcmk__output_t *out, pe_resource_t * rsc,
                        const char *name, pe_node_t *node, long options)
 {
     const char *target_role = NULL;
 
     CRM_ASSERT(rsc->variant == pe_native);
 
     if (rsc->meta) {
         const char *is_internal = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INTERNAL_RSC);
 
         if (crm_is_true(is_internal)
             && !pcmk_is_set(options, pe_print_implicit)) {
 
             crm_trace("skipping print of internal resource %s", rsc->id);
             return pcmk_rc_no_output;
         }
         target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
     }
 
     {
         gchar *s = pcmk__native_output_string(rsc, name, node, options,
                                               target_role, true);
 
         out->list_item(out, NULL, "%s", s);
         g_free(s);
     }
 
     return pcmk_rc_ok;
 }
 
 void
 common_print(pe_resource_t * rsc, const char *pre_text, const char *name, pe_node_t *node, long options, void *print_data)
 {
     const char *target_role = NULL;
 
     CRM_ASSERT(rsc->variant == pe_native);
 
     if (rsc->meta) {
         const char *is_internal = g_hash_table_lookup(rsc->meta,
                                                       XML_RSC_ATTR_INTERNAL_RSC);
 
         if (crm_is_true(is_internal)
             && !pcmk_is_set(options, pe_print_implicit)) {
 
             crm_trace("skipping print of internal resource %s", rsc->id);
             return;
         }
         target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
     }
 
     if (options & pe_print_xml) {
         native_print_xml(rsc, pre_text, options, print_data);
         return;
     }
 
     if ((pre_text == NULL) && (options & pe_print_printf)) {
         pre_text = " ";
     }
 
     if (options & pe_print_html) {
         if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
             status_print("<font color=\"yellow\">");
 
         } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
             status_print("<font color=\"red\">");
 
         } else if (rsc->running_on == NULL) {
             status_print("<font color=\"red\">");
 
         } else if (pcmk__list_of_multiple(rsc->running_on)) {
             status_print("<font color=\"orange\">");
 
         } else if (pcmk_is_set(rsc->flags, pe_rsc_failure_ignored)) {
             status_print("<font color=\"yellow\">");
 
         } else {
             status_print("<font color=\"green\">");
         }
     }
 
     {
         gchar *resource_s = pcmk__native_output_string(rsc, name, node, options,
                                                        target_role, false);
         status_print("%s%s", (pre_text? pre_text : ""), resource_s);
         g_free(resource_s);
     }
 
 #if CURSES_ENABLED
     if (pcmk_is_set(options, pe_print_ncurses)
         && !pcmk_is_set(options, pe_print_rsconly)
         && !pcmk__list_of_multiple(rsc->running_on)) {
         /* coverity[negative_returns] False positive */
         move(-1, 0);
     }
 #endif
 
     if (pcmk_is_set(options, pe_print_html)) {
         status_print(" </font> ");
     }
 
     if (!pcmk_is_set(options, pe_print_rsconly)
         && pcmk__list_of_multiple(rsc->running_on)) {
 
         GListPtr gIter = rsc->running_on;
         int counter = 0;
 
         if (options & pe_print_html) {
             status_print("<ul>\n");
         } else if ((options & pe_print_printf)
                    || (options & pe_print_ncurses)) {
             status_print("[");
         }
 
         for (; gIter != NULL; gIter = gIter->next) {
             pe_node_t *n = (pe_node_t *) gIter->data;
 
             counter++;
 
             if (options & pe_print_html) {
                 status_print("<li>\n%s", n->details->uname);
 
             } else if ((options & pe_print_printf)
                        || (options & pe_print_ncurses)) {
                 status_print(" %s", n->details->uname);
 
             } else if ((options & pe_print_log)) {
                 status_print("\t%d : %s", counter, n->details->uname);
 
             } else {
                 status_print("%s", n->details->uname);
             }
             if (options & pe_print_html) {
                 status_print("</li>\n");
 
             }
         }
 
         if (options & pe_print_html) {
             status_print("</ul>\n");
         } else if ((options & pe_print_printf)
                    || (options & pe_print_ncurses)) {
             status_print(" ]");
         }
     }
 
     if (options & pe_print_html) {
         status_print("<br/>\n");
     } else if (options & pe_print_suppres_nl) {
         /* nothing */
     } else if ((options & pe_print_printf) || (options & pe_print_ncurses)) {
         status_print("\n");
     }
 }
 
 void
 native_print(pe_resource_t * rsc, const char *pre_text, long options, void *print_data)
 {
     pe_node_t *node = NULL;
 
     CRM_ASSERT(rsc->variant == pe_native);
     if (options & pe_print_xml) {
         native_print_xml(rsc, pre_text, options, print_data);
         return;
     }
 
     node = pe__current_node(rsc);
 
     if (node == NULL) {
         // This is set only if a non-probe action is pending on this node
         node = rsc->pending_node;
     }
 
     common_print(rsc, pre_text, rsc_printable_id(rsc), node, options, print_data);
 }
 
 PCMK__OUTPUT_ARGS("primitive", "unsigned int", "pe_resource_t *", "GList *", "GList *")
 int
 pe__resource_xml(pcmk__output_t *out, va_list args)
 {
     unsigned int options = va_arg(args, unsigned int);
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     GList *only_node G_GNUC_UNUSED = va_arg(args, GList *);
     GList *only_rsc = va_arg(args, GList *);
 
     const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
     const char *prov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER);
     const char *rsc_state = native_displayable_state(rsc, options);
 
     long is_print_pending = options & pe_print_pending;
 
     char ra_name[LINE_MAX];
     char *nodes_running_on = NULL;
     char *priority = NULL;
     int rc = pcmk_rc_no_output;
     const char *target_role = NULL;
 
     if (rsc->meta != NULL) {
        target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
     }
 
     CRM_ASSERT(rsc->variant == pe_native);
 
     if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
         return pcmk_rc_no_output;
     }
 
     /* resource information. */
     sprintf(ra_name, "%s%s%s:%s", class, prov ? "::" : "", prov ? prov : ""
            , crm_element_value(rsc->xml, XML_ATTR_TYPE));
 
     nodes_running_on = crm_itoa(g_list_length(rsc->running_on));
     priority = crm_ftoa(rsc->priority);
 
     rc = pe__name_and_nvpairs_xml(out, true, "resource", 12,
              "id", rsc_printable_id(rsc),
              "resource_agent", ra_name,
              "role", rsc_state,
              "target_role", target_role,
              "active", pcmk__btoa(rsc->fns->active(rsc, TRUE)),
              "orphaned", pe__rsc_bool_str(rsc, pe_rsc_orphan),
              "blocked", pe__rsc_bool_str(rsc, pe_rsc_block),
              "managed", pe__rsc_bool_str(rsc, pe_rsc_managed),
              "failed", pe__rsc_bool_str(rsc, pe_rsc_failed),
              "failure_ignored", pe__rsc_bool_str(rsc, pe_rsc_failure_ignored),
              "nodes_running_on", nodes_running_on,
              "pending", (is_print_pending? native_pending_task(rsc) : NULL));
     free(priority);
     free(nodes_running_on);
 
     CRM_ASSERT(rc == pcmk_rc_ok);
 
     if (rsc->running_on != NULL) {
         GList *gIter = rsc->running_on;
 
         for (; gIter != NULL; gIter = gIter->next) {
             pe_node_t *node = (pe_node_t *) gIter->data;
 
             rc = pe__name_and_nvpairs_xml(out, false, "node", 3,
                      "name", node->details->uname,
                      "id", node->details->id,
                      "cached", pcmk__btoa(node->details->online));
             CRM_ASSERT(rc == pcmk_rc_ok);
         }
     }
 
     pcmk__output_xml_pop_parent(out);
     return rc;
 }
 
 PCMK__OUTPUT_ARGS("primitive", "unsigned int", "pe_resource_t *", "GList *", "GList *")
 int
 pe__resource_html(pcmk__output_t *out, va_list args)
 {
     unsigned int options = va_arg(args, unsigned int);
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     GList *only_node G_GNUC_UNUSED = va_arg(args, GList *);
     GList *only_rsc = va_arg(args, GList *);
 
     pe_node_t *node = pe__current_node(rsc);
 
     if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
         return pcmk_rc_no_output;
     }
 
     CRM_ASSERT(rsc->variant == pe_native);
 
     if (node == NULL) {
         // This is set only if a non-probe action is pending on this node
         node = rsc->pending_node;
     }
     return pe__common_output_html(out, rsc, rsc_printable_id(rsc), node, options);
 }
 
 PCMK__OUTPUT_ARGS("primitive", "unsigned int", "pe_resource_t *", "GList *", "GList *")
 int
 pe__resource_text(pcmk__output_t *out, va_list args)
 {
     unsigned int options = va_arg(args, unsigned int);
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     GList *only_node G_GNUC_UNUSED = va_arg(args, GList *);
     GList *only_rsc = va_arg(args, GList *);
 
     pe_node_t *node = pe__current_node(rsc);
 
     CRM_ASSERT(rsc->variant == pe_native);
 
     if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
         return pcmk_rc_no_output;
     }
 
     if (node == NULL) {
         // This is set only if a non-probe action is pending on this node
         node = rsc->pending_node;
     }
     return pe__common_output_text(out, rsc, rsc_printable_id(rsc), node, options);
 }
 
 void
 native_free(pe_resource_t * rsc)
 {
     pe_rsc_trace(rsc, "Freeing resource action list (not the data)");
     common_free(rsc);
 }
 
 enum rsc_role_e
 native_resource_state(const pe_resource_t * rsc, gboolean current)
 {
     enum rsc_role_e role = rsc->next_role;
 
     if (current) {
         role = rsc->role;
     }
     pe_rsc_trace(rsc, "%s state: %s", rsc->id, role2text(role));
     return role;
 }
 
 /*!
  * \internal
  * \brief List nodes where a resource (or any of its children) is
  *
  * \param[in]  rsc      Resource to check
  * \param[out] list     List to add result to
  * \param[in]  current  0 = where known, 1 = running, 2 = running or pending
  *
  * \return If list contains only one node, that node
  */
 pe_node_t *
 native_location(const pe_resource_t *rsc, GList **list, int current)
 {
     pe_node_t *one = NULL;
     GListPtr result = NULL;
 
     if (rsc->children) {
         GListPtr gIter = rsc->children;
 
         for (; gIter != NULL; gIter = gIter->next) {
             pe_resource_t *child = (pe_resource_t *) gIter->data;
 
             child->fns->location(child, &result, current);
         }
 
     } else if (current) {
 
         if (rsc->running_on) {
             result = g_list_copy(rsc->running_on);
         }
         if ((current == 2) && rsc->pending_node
             && !pe_find_node_id(result, rsc->pending_node->details->id)) {
                 result = g_list_append(result, rsc->pending_node);
         }
 
     } else if (current == FALSE && rsc->allocated_to) {
         result = g_list_append(NULL, rsc->allocated_to);
     }
 
     if (result && (result->next == NULL)) {
         one = result->data;
     }
 
     if (list) {
         GListPtr gIter = result;
 
         for (; gIter != NULL; gIter = gIter->next) {
             pe_node_t *node = (pe_node_t *) gIter->data;
 
             if (*list == NULL || pe_find_node_id(*list, node->details->id) == NULL) {
                 *list = g_list_append(*list, node);
             }
         }
     }
 
     g_list_free(result);
     return one;
 }
 
 static void
 get_rscs_brief(GListPtr rsc_list, GHashTable * rsc_table, GHashTable * active_table)
 {
     GListPtr gIter = rsc_list;
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *rsc = (pe_resource_t *) gIter->data;
 
         const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
         const char *kind = crm_element_value(rsc->xml, XML_ATTR_TYPE);
 
         int offset = 0;
         char buffer[LINE_MAX];
 
         int *rsc_counter = NULL;
         int *active_counter = NULL;
 
         if (rsc->variant != pe_native) {
             continue;
         }
 
         offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", class);
         if (pcmk_is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_provider)) {
             const char *prov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER);
             offset += snprintf(buffer + offset, LINE_MAX - offset, "::%s", prov);
         }
         offset += snprintf(buffer + offset, LINE_MAX - offset, ":%s", kind);
         CRM_LOG_ASSERT(offset > 0);
 
         if (rsc_table) {
             rsc_counter = g_hash_table_lookup(rsc_table, buffer);
             if (rsc_counter == NULL) {
                 rsc_counter = calloc(1, sizeof(int));
                 *rsc_counter = 0;
                 g_hash_table_insert(rsc_table, strdup(buffer), rsc_counter);
             }
             (*rsc_counter)++;
         }
 
         if (active_table) {
             GListPtr gIter2 = rsc->running_on;
 
             for (; gIter2 != NULL; gIter2 = gIter2->next) {
                 pe_node_t *node = (pe_node_t *) gIter2->data;
                 GHashTable *node_table = NULL;
 
                 if (node->details->unclean == FALSE && node->details->online == FALSE) {
                     continue;
                 }
 
                 node_table = g_hash_table_lookup(active_table, node->details->uname);
                 if (node_table == NULL) {
                     node_table = crm_str_table_new();
                     g_hash_table_insert(active_table, strdup(node->details->uname), node_table);
                 }
 
                 active_counter = g_hash_table_lookup(node_table, buffer);
                 if (active_counter == NULL) {
                     active_counter = calloc(1, sizeof(int));
                     *active_counter = 0;
                     g_hash_table_insert(node_table, strdup(buffer), active_counter);
                 }
                 (*active_counter)++;
             }
         }
     }
 }
 
 static void
 destroy_node_table(gpointer data)
 {
     GHashTable *node_table = data;
 
     if (node_table) {
         g_hash_table_destroy(node_table);
     }
 }
 
 void
 print_rscs_brief(GListPtr rsc_list, const char *pre_text, long options,
                  void *print_data, gboolean print_all)
 {
     GHashTable *rsc_table = crm_str_table_new();
     GHashTable *active_table = g_hash_table_new_full(crm_str_hash, g_str_equal,
                                                      free, destroy_node_table);
     GHashTableIter hash_iter;
     char *type = NULL;
     int *rsc_counter = NULL;
 
     get_rscs_brief(rsc_list, rsc_table, active_table);
 
     g_hash_table_iter_init(&hash_iter, rsc_table);
     while (g_hash_table_iter_next(&hash_iter, (gpointer *)&type, (gpointer *)&rsc_counter)) {
         GHashTableIter hash_iter2;
         char *node_name = NULL;
         GHashTable *node_table = NULL;
         int active_counter_all = 0;
 
         g_hash_table_iter_init(&hash_iter2, active_table);
         while (g_hash_table_iter_next(&hash_iter2, (gpointer *)&node_name, (gpointer *)&node_table)) {
             int *active_counter = g_hash_table_lookup(node_table, type);
 
             if (active_counter == NULL || *active_counter == 0) {
                 continue;
 
             } else {
                 active_counter_all += *active_counter;
             }
 
             if (options & pe_print_rsconly) {
                 node_name = NULL;
             }
 
             if (options & pe_print_html) {
                 status_print("<li>\n");
             }
 
             if (print_all) {
                 status_print("%s%d/%d\t(%s):\tActive %s\n", pre_text ? pre_text : "",
                              active_counter ? *active_counter : 0,
                              rsc_counter ? *rsc_counter : 0, type,
                              active_counter && (*active_counter > 0) && node_name ? node_name : "");
             } else {
                 status_print("%s%d\t(%s):\tActive %s\n", pre_text ? pre_text : "",
                              active_counter ? *active_counter : 0, type,
                              active_counter && (*active_counter > 0) && node_name ? node_name : "");
             }
 
             if (options & pe_print_html) {
                 status_print("</li>\n");
             }
         }
 
         if (print_all && active_counter_all == 0) {
             if (options & pe_print_html) {
                 status_print("<li>\n");
             }
 
             status_print("%s%d/%d\t(%s):\tActive\n", pre_text ? pre_text : "",
                          active_counter_all,
                          rsc_counter ? *rsc_counter : 0, type);
 
             if (options & pe_print_html) {
                 status_print("</li>\n");
             }
         }
     }
 
     if (rsc_table) {
         g_hash_table_destroy(rsc_table);
         rsc_table = NULL;
     }
     if (active_table) {
         g_hash_table_destroy(active_table);
         active_table = NULL;
     }
 }
 
 int
 pe__rscs_brief_output(pcmk__output_t *out, GListPtr rsc_list, long options, gboolean print_all)
 {
     GHashTable *rsc_table = crm_str_table_new();
     GHashTable *active_table = g_hash_table_new_full(crm_str_hash, g_str_equal,
                                                      free, destroy_node_table);
     GListPtr sorted_rscs;
     int rc = pcmk_rc_no_output;
 
     get_rscs_brief(rsc_list, rsc_table, active_table);
 
     /* Make a list of the rsc_table keys so that it can be sorted.  This is to make sure
      * output order stays consistent between systems.
      */
     sorted_rscs = g_hash_table_get_keys(rsc_table);
     sorted_rscs = g_list_sort(sorted_rscs, (GCompareFunc) strcmp);
 
     for (GListPtr gIter = sorted_rscs; gIter; gIter = gIter->next) {
         char *type = (char *) gIter->data;
         int *rsc_counter = g_hash_table_lookup(rsc_table, type);
 
         GHashTableIter hash_iter2;
         char *node_name = NULL;
         GHashTable *node_table = NULL;
         int active_counter_all = 0;
 
         g_hash_table_iter_init(&hash_iter2, active_table);
         while (g_hash_table_iter_next(&hash_iter2, (gpointer *)&node_name, (gpointer *)&node_table)) {
             int *active_counter = g_hash_table_lookup(node_table, type);
 
             if (active_counter == NULL || *active_counter == 0) {
                 continue;
 
             } else {
                 active_counter_all += *active_counter;
             }
 
             if (options & pe_print_rsconly) {
                 node_name = NULL;
             }
 
             if (print_all) {
                 out->list_item(out, NULL, "%d/%d\t(%s):\tActive %s",
                                *active_counter,
                                rsc_counter ? *rsc_counter : 0, type,
                                (*active_counter > 0) && node_name ? node_name : "");
             } else {
                 out->list_item(out, NULL, "%d\t(%s):\tActive %s",
                                *active_counter, type,
                                (*active_counter > 0) && node_name ? node_name : "");
             }
 
             rc = pcmk_rc_ok;
         }
 
         if (print_all && active_counter_all == 0) {
             out->list_item(out, NULL, "%d/%d\t(%s):\tActive",
                            active_counter_all,
                            rsc_counter ? *rsc_counter : 0, type);
             rc = pcmk_rc_ok;
         }
     }
 
     if (rsc_table) {
         g_hash_table_destroy(rsc_table);
         rsc_table = NULL;
     }
     if (active_table) {
         g_hash_table_destroy(active_table);
         active_table = NULL;
     }
     if (sorted_rscs) {
         g_list_free(sorted_rscs);
     }
 
     return rc;
 }
 
 gboolean
 pe__native_is_filtered(pe_resource_t *rsc, GListPtr only_rsc, gboolean check_parent)
 {
     if (pcmk__str_in_list(only_rsc, rsc_printable_id(rsc)) ||
         pcmk__str_in_list(only_rsc, rsc->id)) {
         return FALSE;
     } else if (check_parent) {
         pe_resource_t *up = uber_parent(rsc);
 
         if (pe_rsc_is_bundled(rsc)) {
             return up->parent->fns->is_filtered(up->parent, only_rsc, FALSE);
         } else {
             return up->fns->is_filtered(up, only_rsc, FALSE);
         }
     }
 
     return TRUE;
 }
diff --git a/lib/pengine/pe_digest.c b/lib/pengine/pe_digest.c
index f55c896420..2066a53e6f 100644
--- a/lib/pengine/pe_digest.c
+++ b/lib/pengine/pe_digest.c
@@ -1,568 +1,566 @@
 /*
  * Copyright 2004-2020 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <glib.h>
 #include <stdbool.h>
 
 #include <crm/crm.h>
 #include <crm/msg_xml.h>
 #include <crm/common/xml.h>
 #include <crm/common/xml_internal.h>
 #include <crm/pengine/internal.h>
 #include "pe_status_private.h"
 
 /*!
  * \internal
  * \brief Free an operation digest cache entry
  *
  * \param[in] ptr  Pointer to cache entry to free
  *
  * \note The argument is a gpointer so this can be used as a hash table
  *       free function.
  */
 void
 pe__free_digests(gpointer ptr)
 {
     op_digest_cache_t *data = ptr;
 
     if (data != NULL) {
         free_xml(data->params_all);
         free_xml(data->params_secure);
         free_xml(data->params_restart);
 
         free(data->digest_all_calc);
         free(data->digest_restart_calc);
         free(data->digest_secure_calc);
 
         free(data);
     }
 }
 
 // Return true if XML attribute name is substring of a given string
 static bool
 attr_in_string(xmlAttrPtr a, void *user_data)
 {
     bool filter = false;
     char *name = crm_strdup_printf(" %s ", (const char *) a->name);
 
     if (strstr((const char *) user_data, name) == NULL) {
         crm_trace("Filtering %s (not found in '%s')",
                   (const char *) a->name, (const char *) user_data);
         filter = true;
     }
     free(name);
     return filter;
 }
 
 // Return true if XML attribute name is not substring of a given string
 static bool
 attr_not_in_string(xmlAttrPtr a, void *user_data)
 {
     bool filter = false;
     char *name = crm_strdup_printf(" %s ", (const char *) a->name);
 
     if (strstr((const char *) user_data, name) != NULL) {
         crm_trace("Filtering %s (found in '%s')",
                   (const char *) a->name, (const char *) user_data);
         filter = true;
     }
     free(name);
     return filter;
 }
 
 #if ENABLE_VERSIONED_ATTRS
 static void
 append_versioned_params(xmlNode *versioned_params, const char *ra_version, xmlNode *params)
 {
     GHashTable *hash = pe_unpack_versioned_parameters(versioned_params, ra_version);
     char *key = NULL;
     char *value = NULL;
     GHashTableIter iter;
 
     g_hash_table_iter_init(&iter, hash);
     while (g_hash_table_iter_next(&iter, (gpointer *) &key, (gpointer *) &value)) {
         crm_xml_add(params, key, value);
     }
     g_hash_table_destroy(hash);
 }
 
 static void
 append_all_versioned_params(pe_resource_t *rsc, pe_node_t *node,
                             pe_action_t *action, xmlNode *xml_op,
                             pe_working_set_t *data_set)
 {
     const char *ra_version = NULL;
     xmlNode *local_versioned_params = NULL;
     pe_rsc_action_details_t *details = pe_rsc_action_details(action);
 
     local_versioned_params = create_xml_node(NULL, XML_TAG_RSC_VER_ATTRS);
     pe_get_versioned_attributes(local_versioned_params, rsc, node, data_set);
     if (xml_op != NULL) {
         ra_version = crm_element_value(xml_op, XML_ATTR_RA_VERSION);
     }
     append_versioned_params(local_versioned_params, ra_version,
                             data->params_all);
     append_versioned_params(rsc->versioned_parameters, ra_version,
                             data->params_all);
     append_versioned_params(details->versioned_parameters, ra_version,
                             data->params_all);
 }
 #endif
 
 /*!
  * \internal
  * \brief Add digest of all parameters to a digest cache entry
  *
  * \param[out]    data         Digest cache entry to modify
  * \param[in]     rsc          Resource that action was for
  * \param[in]     node         Node action was performed on
+ * \param[in]     params       Resource parameters evaluated for node
  * \param[in]     task         Name of action performed
  * \param[in,out] interval_ms  Action's interval (will be reset if in overrides)
  * \param[in]     xml_op       XML of operation in CIB status (if available)
  * \param[in]     op_version   CRM feature set to use for digest calculation
  * \param[in]     overrides    Key/value table to override resource parameters
  * \param[in]     data_set     Cluster working set
  */
 static void
 calculate_main_digest(op_digest_cache_t *data, pe_resource_t *rsc,
-                      pe_node_t *node, const char *task, guint *interval_ms,
+                      pe_node_t *node, GHashTable *params,
+                      const char *task, guint *interval_ms,
                       xmlNode *xml_op, const char *op_version,
                       GHashTable *overrides, pe_working_set_t *data_set)
 {
     pe_action_t *action = NULL;
-    GHashTable *local_rsc_params = crm_str_table_new();
-
-    get_rsc_attributes(local_rsc_params, rsc, node, data_set);
 
     data->params_all = create_xml_node(NULL, XML_TAG_PARAMS);
 
     /* REMOTE_CONTAINER_HACK: Allow Pacemaker Remote nodes to run containers
      * that themselves are Pacemaker Remote nodes
      */
-    if (pe__add_bundle_remote_name(rsc, data->params_all,
+    if (pe__add_bundle_remote_name(rsc, data_set, data->params_all,
                                    XML_RSC_ATTR_REMOTE_RA_ADDR)) {
         crm_trace("Set address for bundle connection %s (on %s)",
                   rsc->id, node->details->uname);
     }
 
     // If interval was overridden, reset it
     if (overrides != NULL) {
         const char *interval_s = g_hash_table_lookup(overrides, CRM_META "_"
                                                      XML_LRM_ATTR_INTERVAL);
 
         if (interval_s != NULL) {
             long long value_ll;
 
             errno = 0;
             value_ll = crm_parse_ll(interval_s, NULL);
             if ((errno == 0) && (value_ll >= 0) && (value_ll <= G_MAXUINT)) {
                 *interval_ms = (guint) value_ll;
             }
         }
     }
 
     action = custom_action(rsc, pcmk__op_key(rsc->id, task, *interval_ms),
                            task, node, TRUE, FALSE, data_set);
     if (overrides != NULL) {
         g_hash_table_foreach(overrides, hash2field, data->params_all);
     }
-    g_hash_table_foreach(local_rsc_params, hash2field, data->params_all);
+    g_hash_table_foreach(params, hash2field, data->params_all);
     g_hash_table_foreach(action->extra, hash2field, data->params_all);
     g_hash_table_foreach(action->meta, hash2metafield, data->params_all);
 
 #if ENABLE_VERSIONED_ATTRS
     append_all_versioned_params(rsc, node, action, xml_op, data_set);
 #endif
 
     pcmk__filter_op_for_digest(data->params_all);
 
-    g_hash_table_destroy(local_rsc_params);
     pe_free_action(action);
 
     data->digest_all_calc = calculate_operation_digest(data->params_all,
                                                        op_version);
 }
 
 // Return true if XML attribute name is a Pacemaker-defined fencing parameter
 static bool
 is_fence_param(xmlAttrPtr attr, void *user_data)
 {
     return pcmk_stonith_param((const char *) attr->name);
 }
 
 /*!
  * \internal
  * \brief Add secure digest to a digest cache entry
  *
  * \param[out] data        Digest cache entry to modify
  * \param[in]  rsc         Resource that action was for
+ * \param[in]  params      Resource parameters evaluated for node
  * \param[in]  xml_op      XML of operation in CIB status (if available)
  * \param[in]  op_version  CRM feature set to use for digest calculation
  * \param[in]  overrides   Key/value hash table to override resource parameters
  */
 static void
 calculate_secure_digest(op_digest_cache_t *data, pe_resource_t *rsc,
-                        xmlNode *xml_op, const char *op_version,
-                        GHashTable *overrides)
+                        GHashTable *params, xmlNode *xml_op,
+                        const char *op_version, GHashTable *overrides)
 {
     const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
     const char *secure_list = NULL;
 
     if (xml_op == NULL) {
         secure_list = " passwd password user ";
     } else {
         secure_list = crm_element_value(xml_op, XML_LRM_ATTR_OP_SECURE);
     }
 
-    /* The controller doesn't create a digest of *all* non-sensitive
-     * parameters, only those listed in resource agent meta-data. The
-     * equivalent here is rsc->parameters.
-     */
     data->params_secure = create_xml_node(NULL, XML_TAG_PARAMS);
     if (overrides != NULL) {
         g_hash_table_foreach(overrides, hash2field, data->params_secure);
     }
 
-    g_hash_table_foreach(rsc->parameters, hash2field, data->params_secure);
+    g_hash_table_foreach(params, hash2field, data->params_secure);
     if (secure_list != NULL) {
         pcmk__xe_remove_matching_attrs(data->params_secure, attr_not_in_string,
                                        (void *) secure_list);
     }
     if (pcmk_is_set(pcmk_get_ra_caps(class),
                     pcmk_ra_cap_fence_params)) {
         /* For stonith resources, Pacemaker adds special parameters,
          * but these are not listed in fence agent meta-data, so the
          * controller will not hash them. That means we have to filter
          * them out before calculating our hash for comparison.
          */
         pcmk__xe_remove_matching_attrs(data->params_secure, is_fence_param,
                                        NULL);
     }
     pcmk__filter_op_for_digest(data->params_secure);
 
     /* CRM_meta_timeout *should* be part of a digest for recurring operations.
      * However, currently the controller does not add timeout to secure digests,
      * because it only includes parameters declared by the resource agent.
      * Remove any timeout that made it this far, to match.
      *
      * @TODO Update the controller to add the timeout (which will require
      * bumping the feature set and checking that here).
      */
     xml_remove_prop(data->params_secure, CRM_META "_" XML_ATTR_TIMEOUT);
 
     data->digest_secure_calc = calculate_operation_digest(data->params_secure,
                                                           op_version);
 }
 
 /*!
  * \internal
  * \brief Add restart digest to a digest cache entry
  *
  * \param[out] data        Digest cache entry to modify
  * \param[in]  xml_op      XML of operation in CIB status (if available)
  * \param[in]  op_version  CRM feature set to use for digest calculation
  *
  * \note This function doesn't need to handle overrides because it starts with
  *       data->params_all, which already has overrides applied.
  */
 static void
 calculate_restart_digest(op_digest_cache_t *data, xmlNode *xml_op,
                          const char *op_version)
 {
     const char *value = NULL;
 
     // We must have XML of resource operation history
     if (xml_op == NULL) {
         return;
     }
 
     // And the history must have a restart digest to compare against
     if (crm_element_value(xml_op, XML_LRM_ATTR_RESTART_DIGEST) == NULL) {
         return;
     }
 
     // Start with a copy of all parameters
     data->params_restart = copy_xml(data->params_all);
 
     // Then filter out reloadable parameters, if any
     value = crm_element_value(xml_op, XML_LRM_ATTR_OP_RESTART);
     if (value != NULL) {
         pcmk__xe_remove_matching_attrs(data->params_restart, attr_in_string,
                                        (void *) value);
     }
 
     value = crm_element_value(xml_op, XML_ATTR_CRM_VERSION);
     data->digest_restart_calc = calculate_operation_digest(data->params_restart,
                                                            value);
 }
 
 /*!
  * \internal
  * \brief Create a new digest cache entry with calculated digests
  *
  * \param[in]     rsc          Resource that action was for
  * \param[in]     task         Name of action performed
  * \param[in,out] interval_ms  Action's interval (will be reset if in overrides)
  * \param[in]     node         Node action was performed on
  * \param[in]     xml_op       XML of operation in CIB status (if available)
  * \param[in]     overrides    Key/value table to override resource parameters
  * \param[in]     calc_secure  Whether to calculate secure digest
  * \param[in]     data_set     Cluster working set
  *
  * \return Pointer to new digest cache entry (or NULL on memory error)
  * \note It is the caller's responsibility to free the result using
  *       pe__free_digests().
  */
 op_digest_cache_t *
 pe__calculate_digests(pe_resource_t *rsc, const char *task, guint *interval_ms,
                       pe_node_t *node, xmlNode *xml_op, GHashTable *overrides,
                       bool calc_secure, pe_working_set_t *data_set)
 {
     op_digest_cache_t *data = calloc(1, sizeof(op_digest_cache_t));
     const char *op_version = CRM_FEATURE_SET;
+    GHashTable *params = NULL;
 
     if (data == NULL) {
         return NULL;
     }
     if (xml_op != NULL) {
         op_version = crm_element_value(xml_op, XML_ATTR_CRM_VERSION);
     }
 
-    calculate_main_digest(data, rsc, node, task, interval_ms, xml_op,
+    params = pe_rsc_params(rsc, node, data_set);
+    calculate_main_digest(data, rsc, node, params, task, interval_ms, xml_op,
                           op_version, overrides, data_set);
     if (calc_secure) {
-        calculate_secure_digest(data, rsc, xml_op, op_version, overrides);
+        calculate_secure_digest(data, rsc, params, xml_op, op_version,
+                                overrides);
     }
     calculate_restart_digest(data, xml_op, op_version);
     return data;
 }
 
 /*!
  * \internal
  * \brief Calculate action digests and store in node's digest cache
  *
  * \param[in] rsc          Resource that action was for
  * \param[in] task         Name of action performed
  * \param[in] interval_ms  Action's interval
  * \param[in] node         Node action was performed on
  * \param[in] xml_op       XML of operation in CIB status (if available)
  * \param[in] calc_secure  Whether to calculate secure digest
  * \param[in] data_set     Cluster working set
  *
  * \return Pointer to node's digest cache entry
  */
 static op_digest_cache_t *
 rsc_action_digest(pe_resource_t *rsc, const char *task, guint interval_ms,
                   pe_node_t *node, xmlNode *xml_op, bool calc_secure,
                   pe_working_set_t *data_set)
 {
     op_digest_cache_t *data = NULL;
     char *key = pcmk__op_key(rsc->id, task, interval_ms);
 
     data = g_hash_table_lookup(node->details->digest_cache, key);
     if (data == NULL) {
         data = pe__calculate_digests(rsc, task, &interval_ms, node, xml_op,
                                      NULL, calc_secure, data_set);
         CRM_ASSERT(data != NULL);
         g_hash_table_insert(node->details->digest_cache, strdup(key), data);
     }
     free(key);
     return data;
 }
 
 /*!
  * \internal
  * \brief Calculate operation digests and compare against an XML history entry
  *
  * \param[in] rsc       Resource to check
  * \param[in] xml_op    Resource history XML
  * \param[in] node      Node to use for digest calculation
  * \param[in] data_set  Cluster working set
  *
  * \return Pointer to node's digest cache entry, with comparison result set
  */
 op_digest_cache_t *
 rsc_action_digest_cmp(pe_resource_t * rsc, xmlNode * xml_op, pe_node_t * node,
                       pe_working_set_t * data_set)
 {
     op_digest_cache_t *data = NULL;
     guint interval_ms = 0;
 
     const char *op_version;
     const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
     const char *digest_all;
     const char *digest_restart;
 
     CRM_ASSERT(node != NULL);
 
     op_version = crm_element_value(xml_op, XML_ATTR_CRM_VERSION);
     digest_all = crm_element_value(xml_op, XML_LRM_ATTR_OP_DIGEST);
     digest_restart = crm_element_value(xml_op, XML_LRM_ATTR_RESTART_DIGEST);
 
     crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms);
     data = rsc_action_digest(rsc, task, interval_ms, node, xml_op,
                              pcmk_is_set(data_set->flags, pe_flag_sanitized),
                              data_set);
 
     data->rc = RSC_DIGEST_MATCH;
     if (digest_restart && data->digest_restart_calc && strcmp(data->digest_restart_calc, digest_restart) != 0) {
         pe_rsc_info(rsc, "Parameters to %ums-interval %s action for %s on %s "
                          "changed: hash was %s vs. now %s (restart:%s) %s",
                     interval_ms, task, rsc->id, node->details->uname,
                     crm_str(digest_restart), data->digest_restart_calc,
                     op_version,
                     crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC));
         data->rc = RSC_DIGEST_RESTART;
 
     } else if (digest_all == NULL) {
         /* it is unknown what the previous op digest was */
         data->rc = RSC_DIGEST_UNKNOWN;
 
     } else if (strcmp(digest_all, data->digest_all_calc) != 0) {
         pe_rsc_info(rsc, "Parameters to %ums-interval %s action for %s on %s "
                          "changed: hash was %s vs. now %s (%s:%s) %s",
                     interval_ms, task, rsc->id, node->details->uname,
                     crm_str(digest_all), data->digest_all_calc,
                     (interval_ms > 0)? "reschedule" : "reload",
                     op_version,
                     crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC));
         data->rc = RSC_DIGEST_ALL;
     }
     return data;
 }
 
 /*!
  * \internal
  * \brief Create an unfencing summary for use in special node attribute
  *
  * Create a string combining a fence device's resource ID, agent type, and
  * parameter digest (whether for all parameters or just non-private parameters).
  * This can be stored in a special node attribute, allowing us to detect changes
  * in either the agent type or parameters, to know whether unfencing must be
  * redone or can be safely skipped when the device's history is cleaned.
  *
  * \param[in] rsc_id        Fence device resource ID
  * \param[in] agent_type    Fence device agent
  * \param[in] param_digest  Fence device parameter digest
  *
  * \return Newly allocated string with unfencing digest
  * \note The caller is responsible for freeing the result.
  */
 static inline char *
 create_unfencing_summary(const char *rsc_id, const char *agent_type,
                          const char *param_digest)
 {
     return crm_strdup_printf("%s:%s:%s", rsc_id, agent_type, param_digest);
 }
 
 /*!
  * \internal
  * \brief Check whether a node can skip unfencing
  *
  * Check whether a fence device's current definition matches a node's
  * stored summary of when it was last unfenced by the device.
  *
  * \param[in] rsc_id        Fence device's resource ID
  * \param[in] agent         Fence device's agent type
  * \param[in] digest_calc   Fence device's current parameter digest
  * \param[in] node_summary  Value of node's special unfencing node attribute
  *                          (a comma-separated list of unfencing summaries for
  *                          all devices that have unfenced this node)
  *
  * \return TRUE if digest matches, FALSE otherwise
  */
 static bool
 unfencing_digest_matches(const char *rsc_id, const char *agent,
                          const char *digest_calc, const char *node_summary)
 {
     bool matches = FALSE;
 
     if (rsc_id && agent && digest_calc && node_summary) {
         char *search_secure = create_unfencing_summary(rsc_id, agent,
                                                        digest_calc);
 
         /* The digest was calculated including the device ID and agent,
          * so there is no risk of collision using strstr().
          */
         matches = (strstr(node_summary, search_secure) != NULL);
         crm_trace("Calculated unfencing digest '%s' %sfound in '%s'",
                   search_secure, matches? "" : "not ", node_summary);
         free(search_secure);
     }
     return matches;
 }
 
 /* Magic string to use as action name for digest cache entries used for
  * unfencing checks. This is not a real action name (i.e. "on"), so
  * check_action_definition() won't confuse these entries with real actions.
  */
 #define STONITH_DIGEST_TASK "stonith-on"
 
 /*!
  * \internal
  * \brief Calculate fence device digests and digest comparison result
  *
  * \param[in] rsc       Fence device resource
  * \param[in] agent     Fence device's agent type
  * \param[in] node      Node with digest cache to use
  * \param[in] data_set  Cluster working set
  *
  * \return Node's digest cache entry
  */
 op_digest_cache_t *
 pe__compare_fencing_digest(pe_resource_t *rsc, const char *agent,
                            pe_node_t *node, pe_working_set_t *data_set)
 {
     const char *node_summary = NULL;
 
     // Calculate device's current parameter digests
     op_digest_cache_t *data = rsc_action_digest(rsc, STONITH_DIGEST_TASK, 0U,
                                                 node, NULL, TRUE, data_set);
 
     // Check whether node has special unfencing summary node attribute
     node_summary = pe_node_attribute_raw(node, CRM_ATTR_DIGESTS_ALL);
     if (node_summary == NULL) {
         data->rc = RSC_DIGEST_UNKNOWN;
         return data;
     }
 
     // Check whether full parameter digest matches
     if (unfencing_digest_matches(rsc->id, agent, data->digest_all_calc,
                                  node_summary)) {
         data->rc = RSC_DIGEST_MATCH;
         return data;
     }
 
     // Check whether secure parameter digest matches
     node_summary = pe_node_attribute_raw(node, CRM_ATTR_DIGESTS_SECURE);
     if (unfencing_digest_matches(rsc->id, agent, data->digest_secure_calc,
                                  node_summary)) {
         data->rc = RSC_DIGEST_MATCH;
         if (pcmk_is_set(data_set->flags, pe_flag_stdout)) {
             printf("Only 'private' parameters to %s for unfencing %s changed\n",
                    rsc->id, node->details->uname);
         }
         return data;
     }
 
     // Parameters don't match
     data->rc = RSC_DIGEST_ALL;
     if (pcmk_is_set(data_set->flags, (pe_flag_sanitized|pe_flag_stdout))
         && data->digest_secure_calc) {
         char *digest = create_unfencing_summary(rsc->id, agent,
                                                 data->digest_secure_calc);
 
         printf("Parameters to %s for unfencing %s changed, try '%s'\n",
                rsc->id, node->details->uname, digest);
         free(digest);
     }
     return data;
 }
diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c
index a15bb923d8..281bc88796 100644
--- a/lib/pengine/unpack.c
+++ b/lib/pengine/unpack.c
@@ -1,4008 +1,4008 @@
 /*
  * Copyright 2004-2020 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <stdio.h>
 #include <string.h>
 #include <glib.h>
 #include <time.h>
 
 #include <crm/crm.h>
 #include <crm/services.h>
 #include <crm/msg_xml.h>
 #include <crm/common/xml.h>
 #include <crm/common/xml_internal.h>
 
 #include <crm/common/util.h>
 #include <crm/pengine/rules.h>
 #include <crm/pengine/internal.h>
 #include <crm/common/iso8601_internal.h>
 #include <pe_status_private.h>
 
 CRM_TRACE_INIT_DATA(pe_status);
 
 /* This uses pcmk__set_flags_as()/pcmk__clear_flags_as() directly rather than
  * use pe__set_working_set_flags()/pe__clear_working_set_flags() so that the
  * flag is stringified more readably in log messages.
  */
 #define set_config_flag(data_set, option, flag) do {                        \
         const char *scf_value = pe_pref((data_set)->config_hash, (option)); \
         if (scf_value != NULL) {                                            \
             if (crm_is_true(scf_value)) {                                   \
                 (data_set)->flags = pcmk__set_flags_as(__func__, __LINE__,  \
                                     LOG_TRACE, "Working set",               \
                                     crm_system_name, (data_set)->flags,     \
                                     (flag), #flag);                         \
             } else {                                                        \
                 (data_set)->flags = pcmk__clear_flags_as(__func__, __LINE__,\
                                     LOG_TRACE, "Working set",               \
                                     crm_system_name, (data_set)->flags,     \
                                     (flag), #flag);                         \
             }                                                               \
         }                                                                   \
     } while(0)
 
 static void unpack_rsc_op(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
                           xmlNode **last_failure,
                           enum action_fail_response *failed,
                           pe_working_set_t *data_set);
 static void determine_remote_online_status(pe_working_set_t *data_set,
                                            pe_node_t *this_node);
 static void add_node_attrs(xmlNode *attrs, pe_node_t *node, bool overwrite,
                            pe_working_set_t *data_set);
 static void determine_online_status(xmlNode *node_state, pe_node_t *this_node,
                                     pe_working_set_t *data_set);
 
 static void unpack_lrm_resources(pe_node_t *node, xmlNode *lrm_state,
                                  pe_working_set_t *data_set);
 
 
 // Bitmask for warnings we only want to print once
 uint32_t pe_wo = 0;
 
 static gboolean
 is_dangling_guest_node(pe_node_t *node)
 {
     /* we are looking for a remote-node that was supposed to be mapped to a
      * container resource, but all traces of that container have disappeared 
      * from both the config and the status section. */
     if (pe__is_guest_or_remote_node(node) &&
         node->details->remote_rsc &&
         node->details->remote_rsc->container == NULL &&
         pcmk_is_set(node->details->remote_rsc->flags,
                     pe_rsc_orphan_container_filler)) {
         return TRUE;
     }
 
     return FALSE;
 }
 
 
 /*!
  * \brief Schedule a fence action for a node
  *
  * \param[in,out] data_set  Current working set of cluster
  * \param[in,out] node      Node to fence
  * \param[in]     reason    Text description of why fencing is needed
  * \param[in]     priority_delay  Whether to consider `priority-fencing-delay`
  */
 void
 pe_fence_node(pe_working_set_t * data_set, pe_node_t * node,
               const char *reason, bool priority_delay)
 {
     CRM_CHECK(node, return);
 
     /* A guest node is fenced by marking its container as failed */
     if (pe__is_guest_node(node)) {
         pe_resource_t *rsc = node->details->remote_rsc->container;
 
         if (!pcmk_is_set(rsc->flags, pe_rsc_failed)) {
             if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
                 crm_notice("Not fencing guest node %s "
                            "(otherwise would because %s): "
                            "its guest resource %s is unmanaged",
                            node->details->uname, reason, rsc->id);
             } else {
                 crm_warn("Guest node %s will be fenced "
                          "(by recovering its guest resource %s): %s",
                          node->details->uname, rsc->id, reason);
 
                 /* We don't mark the node as unclean because that would prevent the
                  * node from running resources. We want to allow it to run resources
                  * in this transition if the recovery succeeds.
                  */
                 node->details->remote_requires_reset = TRUE;
                 pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop);
             }
         }
 
     } else if (is_dangling_guest_node(node)) {
         crm_info("Cleaning up dangling connection for guest node %s: "
                  "fencing was already done because %s, "
                  "and guest resource no longer exists",
                  node->details->uname, reason);
         pe__set_resource_flags(node->details->remote_rsc,
                                pe_rsc_failed|pe_rsc_stop);
 
     } else if (pe__is_remote_node(node)) {
         pe_resource_t *rsc = node->details->remote_rsc;
 
         if ((rsc != NULL) && !pcmk_is_set(rsc->flags, pe_rsc_managed)) {
             crm_notice("Not fencing remote node %s "
                        "(otherwise would because %s): connection is unmanaged",
                        node->details->uname, reason);
         } else if(node->details->remote_requires_reset == FALSE) {
             node->details->remote_requires_reset = TRUE;
             crm_warn("Remote node %s %s: %s",
                      node->details->uname,
                      pe_can_fence(data_set, node)? "will be fenced" : "is unclean",
                      reason);
         }
         node->details->unclean = TRUE;
         // No need to apply `priority-fencing-delay` for remote nodes
         pe_fence_op(node, NULL, TRUE, reason, FALSE, data_set);
 
     } else if (node->details->unclean) {
         crm_trace("Cluster node %s %s because %s",
                   node->details->uname,
                   pe_can_fence(data_set, node)? "would also be fenced" : "also is unclean",
                   reason);
 
     } else {
         crm_warn("Cluster node %s %s: %s",
                  node->details->uname,
                  pe_can_fence(data_set, node)? "will be fenced" : "is unclean",
                  reason);
         node->details->unclean = TRUE;
         pe_fence_op(node, NULL, TRUE, reason, priority_delay, data_set);
     }
 }
 
 // @TODO xpaths can't handle templates, rules, or id-refs
 
 // nvpair with provides or requires set to unfencing
 #define XPATH_UNFENCING_NVPAIR XML_CIB_TAG_NVPAIR                \
     "[(@" XML_NVPAIR_ATTR_NAME "='" PCMK_STONITH_PROVIDES "'"    \
     "or @" XML_NVPAIR_ATTR_NAME "='" XML_RSC_ATTR_REQUIRES "') " \
     "and @" XML_NVPAIR_ATTR_VALUE "='unfencing']"
 
 // unfencing in rsc_defaults or any resource
 #define XPATH_ENABLE_UNFENCING \
     "/" XML_TAG_CIB "/" XML_CIB_TAG_CONFIGURATION "/" XML_CIB_TAG_RESOURCES   \
     "//" XML_TAG_META_SETS "/" XPATH_UNFENCING_NVPAIR                                               \
     "|/" XML_TAG_CIB "/" XML_CIB_TAG_CONFIGURATION "/" XML_CIB_TAG_RSCCONFIG  \
     "/" XML_TAG_META_SETS "/" XPATH_UNFENCING_NVPAIR
 
 static void
 set_if_xpath(uint64_t flag, const char *xpath, pe_working_set_t *data_set)
 {
     xmlXPathObjectPtr result = NULL;
 
     if (!pcmk_is_set(data_set->flags, flag)) {
         result = xpath_search(data_set->input, xpath);
         if (result && (numXpathResults(result) > 0)) {
             pe__set_working_set_flags(data_set, flag);
         }
         freeXpathObject(result);
     }
 }
 
 gboolean
 unpack_config(xmlNode * config, pe_working_set_t * data_set)
 {
     const char *value = NULL;
     GHashTable *config_hash = crm_str_table_new();
 
     pe_rule_eval_data_t rule_data = {
         .node_hash = NULL,
         .role = RSC_ROLE_UNKNOWN,
         .now = data_set->now,
         .match_data = NULL,
         .rsc_data = NULL,
         .op_data = NULL
     };
 
     data_set->config_hash = config_hash;
 
     pe__unpack_dataset_nvpairs(config, XML_CIB_TAG_PROPSET, &rule_data, config_hash,
                                CIB_OPTIONS_FIRST, FALSE, data_set);
 
     verify_pe_options(data_set->config_hash);
 
     set_config_flag(data_set, "enable-startup-probes", pe_flag_startup_probes);
     if (!pcmk_is_set(data_set->flags, pe_flag_startup_probes)) {
         crm_info("Startup probes: disabled (dangerous)");
     }
 
     value = pe_pref(data_set->config_hash, XML_ATTR_HAVE_WATCHDOG);
     if (value && crm_is_true(value)) {
         crm_info("Watchdog-based self-fencing will be performed via SBD if "
                  "fencing is required and stonith-watchdog-timeout is nonzero");
         pe__set_working_set_flags(data_set, pe_flag_have_stonith_resource);
     }
 
     /* Set certain flags via xpath here, so they can be used before the relevant
      * configuration sections are unpacked.
      */
     set_if_xpath(pe_flag_enable_unfencing, XPATH_ENABLE_UNFENCING, data_set);
 
     value = pe_pref(data_set->config_hash, "stonith-timeout");
     data_set->stonith_timeout = (int) crm_parse_interval_spec(value);
     crm_debug("STONITH timeout: %d", data_set->stonith_timeout);
 
     set_config_flag(data_set, "stonith-enabled", pe_flag_stonith_enabled);
     crm_debug("STONITH of failed nodes is %s",
               pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)? "enabled" : "disabled");
 
     data_set->stonith_action = pe_pref(data_set->config_hash, "stonith-action");
     if (!strcmp(data_set->stonith_action, "poweroff")) {
         pe_warn_once(pe_wo_poweroff,
                      "Support for stonith-action of 'poweroff' is deprecated "
                      "and will be removed in a future release (use 'off' instead)");
         data_set->stonith_action = "off";
     }
     crm_trace("STONITH will %s nodes", data_set->stonith_action);
 
     set_config_flag(data_set, "concurrent-fencing", pe_flag_concurrent_fencing);
     crm_debug("Concurrent fencing is %s",
               pcmk_is_set(data_set->flags, pe_flag_concurrent_fencing)? "enabled" : "disabled");
 
     value = pe_pref(data_set->config_hash,
                     XML_CONFIG_ATTR_PRIORITY_FENCING_DELAY);
     if (value) {
         data_set->priority_fencing_delay = crm_parse_interval_spec(value) / 1000;
         crm_trace("Priority fencing delay is %ds", data_set->priority_fencing_delay);
     }
 
     set_config_flag(data_set, "stop-all-resources", pe_flag_stop_everything);
     crm_debug("Stop all active resources: %s",
               pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_stop_everything)));
 
     set_config_flag(data_set, "symmetric-cluster", pe_flag_symmetric_cluster);
     if (pcmk_is_set(data_set->flags, pe_flag_symmetric_cluster)) {
         crm_debug("Cluster is symmetric" " - resources can run anywhere by default");
     }
 
     value = pe_pref(data_set->config_hash, "no-quorum-policy");
 
     if (pcmk__str_eq(value, "ignore", pcmk__str_casei)) {
         data_set->no_quorum_policy = no_quorum_ignore;
 
     } else if (pcmk__str_eq(value, "freeze", pcmk__str_casei)) {
         data_set->no_quorum_policy = no_quorum_freeze;
 
     } else if (pcmk__str_eq(value, "demote", pcmk__str_casei)) {
         data_set->no_quorum_policy = no_quorum_demote;
 
     } else if (pcmk__str_eq(value, "suicide", pcmk__str_casei)) {
         if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
             int do_panic = 0;
 
             crm_element_value_int(data_set->input, XML_ATTR_QUORUM_PANIC,
                                   &do_panic);
             if (do_panic || pcmk_is_set(data_set->flags, pe_flag_have_quorum)) {
                 data_set->no_quorum_policy = no_quorum_suicide;
             } else {
                 crm_notice("Resetting no-quorum-policy to 'stop': cluster has never had quorum");
                 data_set->no_quorum_policy = no_quorum_stop;
             }
         } else {
             pcmk__config_err("Resetting no-quorum-policy to 'stop' because "
                              "fencing is disabled");
             data_set->no_quorum_policy = no_quorum_stop;
         }
 
     } else {
         data_set->no_quorum_policy = no_quorum_stop;
     }
 
     switch (data_set->no_quorum_policy) {
         case no_quorum_freeze:
             crm_debug("On loss of quorum: Freeze resources");
             break;
         case no_quorum_stop:
             crm_debug("On loss of quorum: Stop ALL resources");
             break;
         case no_quorum_demote:
             crm_debug("On loss of quorum: "
                       "Demote promotable resources and stop other resources");
             break;
         case no_quorum_suicide:
             crm_notice("On loss of quorum: Fence all remaining nodes");
             break;
         case no_quorum_ignore:
             crm_notice("On loss of quorum: Ignore");
             break;
     }
 
     set_config_flag(data_set, "stop-orphan-resources", pe_flag_stop_rsc_orphans);
     crm_trace("Orphan resources are %s",
               pcmk_is_set(data_set->flags, pe_flag_stop_rsc_orphans)? "stopped" : "ignored");
 
     set_config_flag(data_set, "stop-orphan-actions", pe_flag_stop_action_orphans);
     crm_trace("Orphan resource actions are %s",
               pcmk_is_set(data_set->flags, pe_flag_stop_action_orphans)? "stopped" : "ignored");
 
     set_config_flag(data_set, "remove-after-stop", pe_flag_remove_after_stop);
     crm_trace("Stopped resources are removed from the status section: %s",
               pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_remove_after_stop)));
 
     set_config_flag(data_set, "maintenance-mode", pe_flag_maintenance_mode);
     crm_trace("Maintenance mode: %s",
               pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)));
 
     set_config_flag(data_set, "start-failure-is-fatal", pe_flag_start_failure_fatal);
     crm_trace("Start failures are %s",
               pcmk_is_set(data_set->flags, pe_flag_start_failure_fatal)? "always fatal" : "handled by failcount");
 
     if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
         set_config_flag(data_set, "startup-fencing", pe_flag_startup_fencing);
     }
     if (pcmk_is_set(data_set->flags, pe_flag_startup_fencing)) {
         crm_trace("Unseen nodes will be fenced");
     } else {
         pe_warn_once(pe_wo_blind, "Blind faith: not fencing unseen nodes");
     }
 
     pcmk__score_red = char2score(pe_pref(data_set->config_hash, "node-health-red"));
     pcmk__score_green = char2score(pe_pref(data_set->config_hash, "node-health-green"));
     pcmk__score_yellow = char2score(pe_pref(data_set->config_hash, "node-health-yellow"));
 
     crm_debug("Node scores: 'red' = %s, 'yellow' = %s, 'green' = %s",
              pe_pref(data_set->config_hash, "node-health-red"),
              pe_pref(data_set->config_hash, "node-health-yellow"),
              pe_pref(data_set->config_hash, "node-health-green"));
 
     data_set->placement_strategy = pe_pref(data_set->config_hash, "placement-strategy");
     crm_trace("Placement strategy: %s", data_set->placement_strategy);
 
     set_config_flag(data_set, "shutdown-lock", pe_flag_shutdown_lock);
     crm_trace("Resources will%s be locked to cleanly shut down nodes",
               (pcmk_is_set(data_set->flags, pe_flag_shutdown_lock)? "" : " not"));
     if (pcmk_is_set(data_set->flags, pe_flag_shutdown_lock)) {
         value = pe_pref(data_set->config_hash,
                         XML_CONFIG_ATTR_SHUTDOWN_LOCK_LIMIT);
         data_set->shutdown_lock = crm_parse_interval_spec(value) / 1000;
         crm_trace("Shutdown locks expire after %us", data_set->shutdown_lock);
     }
 
     return TRUE;
 }
 
 pe_node_t *
 pe_create_node(const char *id, const char *uname, const char *type,
                const char *score, pe_working_set_t * data_set)
 {
     pe_node_t *new_node = NULL;
 
     if (pe_find_node(data_set->nodes, uname) != NULL) {
         pcmk__config_warn("More than one node entry has name '%s'", uname);
     }
 
     new_node = calloc(1, sizeof(pe_node_t));
     if (new_node == NULL) {
         return NULL;
     }
 
     new_node->weight = char2score(score);
     new_node->fixed = FALSE;
     new_node->details = calloc(1, sizeof(struct pe_node_shared_s));
 
     if (new_node->details == NULL) {
         free(new_node);
         return NULL;
     }
 
     crm_trace("Creating node for entry %s/%s", uname, id);
     new_node->details->id = id;
     new_node->details->uname = uname;
     new_node->details->online = FALSE;
     new_node->details->shutdown = FALSE;
     new_node->details->rsc_discovery_enabled = TRUE;
     new_node->details->running_rsc = NULL;
     new_node->details->type = node_ping;
 
     if (pcmk__str_eq(type, "remote", pcmk__str_casei)) {
         new_node->details->type = node_remote;
         pe__set_working_set_flags(data_set, pe_flag_have_remote_nodes);
     } else if (pcmk__str_eq(type, "member", pcmk__str_null_matches | pcmk__str_casei)) {
         new_node->details->type = node_member;
     }
 
     new_node->details->attrs = crm_str_table_new();
 
     if (pe__is_guest_or_remote_node(new_node)) {
         g_hash_table_insert(new_node->details->attrs, strdup(CRM_ATTR_KIND),
                             strdup("remote"));
     } else {
         g_hash_table_insert(new_node->details->attrs, strdup(CRM_ATTR_KIND),
                             strdup("cluster"));
     }
 
     new_node->details->utilization = crm_str_table_new();
 
     new_node->details->digest_cache = g_hash_table_new_full(crm_str_hash,
                                                             g_str_equal, free,
                                                             pe__free_digests);
 
     data_set->nodes = g_list_insert_sorted(data_set->nodes, new_node, sort_node_uname);
     return new_node;
 }
 
 static const char *
 expand_remote_rsc_meta(xmlNode *xml_obj, xmlNode *parent, pe_working_set_t *data)
 {
     xmlNode *attr_set = NULL;
     xmlNode *attr = NULL;
 
     const char *container_id = ID(xml_obj);
     const char *remote_name = NULL;
     const char *remote_server = NULL;
     const char *remote_port = NULL;
     const char *connect_timeout = "60s";
     const char *remote_allow_migrate=NULL;
     const char *is_managed = NULL;
 
     for (attr_set = pcmk__xe_first_child(xml_obj); attr_set != NULL;
          attr_set = pcmk__xe_next(attr_set)) {
 
         if (!pcmk__str_eq((const char *)attr_set->name, XML_TAG_META_SETS,
                           pcmk__str_casei)) {
             continue;
         }
 
         for (attr = pcmk__xe_first_child(attr_set); attr != NULL;
              attr = pcmk__xe_next(attr)) {
             const char *value = crm_element_value(attr, XML_NVPAIR_ATTR_VALUE);
             const char *name = crm_element_value(attr, XML_NVPAIR_ATTR_NAME);
 
             if (pcmk__str_eq(name, XML_RSC_ATTR_REMOTE_NODE, pcmk__str_casei)) {
                 remote_name = value;
             } else if (pcmk__str_eq(name, "remote-addr", pcmk__str_casei)) {
                 remote_server = value;
             } else if (pcmk__str_eq(name, "remote-port", pcmk__str_casei)) {
                 remote_port = value;
             } else if (pcmk__str_eq(name, "remote-connect-timeout", pcmk__str_casei)) {
                 connect_timeout = value;
             } else if (pcmk__str_eq(name, "remote-allow-migrate", pcmk__str_casei)) {
                 remote_allow_migrate=value;
             } else if (pcmk__str_eq(name, XML_RSC_ATTR_MANAGED, pcmk__str_casei)) {
                 is_managed = value;
             }
         }
     }
 
     if (remote_name == NULL) {
         return NULL;
     }
 
     if (pe_find_resource(data->resources, remote_name) != NULL) {
         return NULL;
     }
 
     pe_create_remote_xml(parent, remote_name, container_id,
                          remote_allow_migrate, is_managed,
                          connect_timeout, remote_server, remote_port);
     return remote_name;
 }
 
 static void
 handle_startup_fencing(pe_working_set_t *data_set, pe_node_t *new_node)
 {
     if ((new_node->details->type == node_remote) && (new_node->details->remote_rsc == NULL)) {
         /* Ignore fencing for remote nodes that don't have a connection resource
          * associated with them. This happens when remote node entries get left
          * in the nodes section after the connection resource is removed.
          */
         return;
     }
 
     if (pcmk_is_set(data_set->flags, pe_flag_startup_fencing)) {
         // All nodes are unclean until we've seen their status entry
         new_node->details->unclean = TRUE;
 
     } else {
         // Blind faith ...
         new_node->details->unclean = FALSE;
     }
 
     /* We need to be able to determine if a node's status section
      * exists or not separate from whether the node is unclean. */
     new_node->details->unseen = TRUE;
 }
 
 gboolean
 unpack_nodes(xmlNode * xml_nodes, pe_working_set_t * data_set)
 {
     xmlNode *xml_obj = NULL;
     pe_node_t *new_node = NULL;
     const char *id = NULL;
     const char *uname = NULL;
     const char *type = NULL;
     const char *score = NULL;
 
     pe_rule_eval_data_t rule_data = {
         .node_hash = NULL,
         .role = RSC_ROLE_UNKNOWN,
         .now = data_set->now,
         .match_data = NULL,
         .rsc_data = NULL,
         .op_data = NULL
     };
 
     for (xml_obj = pcmk__xe_first_child(xml_nodes); xml_obj != NULL;
          xml_obj = pcmk__xe_next(xml_obj)) {
 
         if (pcmk__str_eq((const char *)xml_obj->name, XML_CIB_TAG_NODE, pcmk__str_none)) {
             new_node = NULL;
 
             id = crm_element_value(xml_obj, XML_ATTR_ID);
             uname = crm_element_value(xml_obj, XML_ATTR_UNAME);
             type = crm_element_value(xml_obj, XML_ATTR_TYPE);
             score = crm_element_value(xml_obj, XML_RULE_ATTR_SCORE);
             crm_trace("Processing node %s/%s", uname, id);
 
             if (id == NULL) {
                 pcmk__config_err("Ignoring <" XML_CIB_TAG_NODE
                                  "> entry in configuration without id");
                 continue;
             }
             new_node = pe_create_node(id, uname, type, score, data_set);
 
             if (new_node == NULL) {
                 return FALSE;
             }
 
 /* 		if(data_set->have_quorum == FALSE */
 /* 		   && data_set->no_quorum_policy == no_quorum_stop) { */
 /* 			/\* start shutting resources down *\/ */
 /* 			new_node->weight = -INFINITY; */
 /* 		} */
 
             handle_startup_fencing(data_set, new_node);
 
             add_node_attrs(xml_obj, new_node, FALSE, data_set);
             pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_UTILIZATION, &rule_data,
                                        new_node->details->utilization, NULL,
                                        FALSE, data_set);
 
             crm_trace("Done with node %s", crm_element_value(xml_obj, XML_ATTR_UNAME));
         }
     }
 
     if (data_set->localhost && pe_find_node(data_set->nodes, data_set->localhost) == NULL) {
         crm_info("Creating a fake local node");
         pe_create_node(data_set->localhost, data_set->localhost, NULL, 0,
                        data_set);
     }
 
     return TRUE;
 }
 
 static void
 setup_container(pe_resource_t * rsc, pe_working_set_t * data_set)
 {
     const char *container_id = NULL;
 
     if (rsc->children) {
         GListPtr gIter = rsc->children;
 
         for (; gIter != NULL; gIter = gIter->next) {
             pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
             setup_container(child_rsc, data_set);
         }
         return;
     }
 
     container_id = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_CONTAINER);
     if (container_id && !pcmk__str_eq(container_id, rsc->id, pcmk__str_casei)) {
         pe_resource_t *container = pe_find_resource(data_set->resources, container_id);
 
         if (container) {
             rsc->container = container;
             pe__set_resource_flags(container, pe_rsc_is_container);
             container->fillers = g_list_append(container->fillers, rsc);
             pe_rsc_trace(rsc, "Resource %s's container is %s", rsc->id, container_id);
         } else {
             pe_err("Resource %s: Unknown resource container (%s)", rsc->id, container_id);
         }
     }
 }
 
 gboolean
 unpack_remote_nodes(xmlNode * xml_resources, pe_working_set_t * data_set)
 {
     xmlNode *xml_obj = NULL;
 
     /* Create remote nodes and guest nodes from the resource configuration
      * before unpacking resources.
      */
     for (xml_obj = pcmk__xe_first_child(xml_resources); xml_obj != NULL;
          xml_obj = pcmk__xe_next(xml_obj)) {
 
         const char *new_node_id = NULL;
 
         /* Check for remote nodes, which are defined by ocf:pacemaker:remote
          * primitives.
          */
         if (xml_contains_remote_node(xml_obj)) {
             new_node_id = ID(xml_obj);
             /* The "pe_find_node" check is here to make sure we don't iterate over
              * an expanded node that has already been added to the node list. */
             if (new_node_id && pe_find_node(data_set->nodes, new_node_id) == NULL) {
                 crm_trace("Found remote node %s defined by resource %s",
                           new_node_id, ID(xml_obj));
                 pe_create_node(new_node_id, new_node_id, "remote", NULL,
                                data_set);
             }
             continue;
         }
 
         /* Check for guest nodes, which are defined by special meta-attributes
          * of a primitive of any type (for example, VirtualDomain or Xen).
          */
         if (pcmk__str_eq((const char *)xml_obj->name, XML_CIB_TAG_RESOURCE, pcmk__str_none)) {
             /* This will add an ocf:pacemaker:remote primitive to the
              * configuration for the guest node's connection, to be unpacked
              * later.
              */
             new_node_id = expand_remote_rsc_meta(xml_obj, xml_resources, data_set);
             if (new_node_id && pe_find_node(data_set->nodes, new_node_id) == NULL) {
                 crm_trace("Found guest node %s in resource %s",
                           new_node_id, ID(xml_obj));
                 pe_create_node(new_node_id, new_node_id, "remote", NULL,
                                data_set);
             }
             continue;
         }
 
         /* Check for guest nodes inside a group. Clones are currently not
          * supported as guest nodes.
          */
         if (pcmk__str_eq((const char *)xml_obj->name, XML_CIB_TAG_GROUP, pcmk__str_none)) {
             xmlNode *xml_obj2 = NULL;
             for (xml_obj2 = pcmk__xe_first_child(xml_obj); xml_obj2 != NULL;
                  xml_obj2 = pcmk__xe_next(xml_obj2)) {
 
                 new_node_id = expand_remote_rsc_meta(xml_obj2, xml_resources, data_set);
 
                 if (new_node_id && pe_find_node(data_set->nodes, new_node_id) == NULL) {
                     crm_trace("Found guest node %s in resource %s inside group %s",
                               new_node_id, ID(xml_obj2), ID(xml_obj));
                     pe_create_node(new_node_id, new_node_id, "remote", NULL,
                                    data_set);
                 }
             }
         }
     }
     return TRUE;
 }
 
 /* Call this after all the nodes and resources have been
  * unpacked, but before the status section is read.
  *
  * A remote node's online status is reflected by the state
  * of the remote node's connection resource. We need to link
  * the remote node to this connection resource so we can have
  * easy access to the connection resource during the scheduler calculations.
  */
 static void
 link_rsc2remotenode(pe_working_set_t *data_set, pe_resource_t *new_rsc)
 {
     pe_node_t *remote_node = NULL;
 
     if (new_rsc->is_remote_node == FALSE) {
         return;
     }
 
     if (pcmk_is_set(data_set->flags, pe_flag_quick_location)) {
         /* remote_nodes and remote_resources are not linked in quick location calculations */
         return;
     }
 
     remote_node = pe_find_node(data_set->nodes, new_rsc->id);
     CRM_CHECK(remote_node != NULL, return;);
 
     pe_rsc_trace(new_rsc, "Linking remote connection resource %s to node %s",
                  new_rsc->id, remote_node->details->uname);
     remote_node->details->remote_rsc = new_rsc;
 
     if (new_rsc->container == NULL) {
         /* Handle start-up fencing for remote nodes (as opposed to guest nodes)
          * the same as is done for cluster nodes.
          */
         handle_startup_fencing(data_set, remote_node);
 
     } else {
         /* pe_create_node() marks the new node as "remote" or "cluster"; now
          * that we know the node is a guest node, update it correctly.
          */
         g_hash_table_replace(remote_node->details->attrs, strdup(CRM_ATTR_KIND),
                              strdup("container"));
     }
 }
 
 static void
 destroy_tag(gpointer data)
 {
     pe_tag_t *tag = data;
 
     if (tag) {
         free(tag->id);
         g_list_free_full(tag->refs, free);
         free(tag);
     }
 }
 
 /*!
  * \internal
  * \brief Parse configuration XML for resource information
  *
  * \param[in]     xml_resources  Top of resource configuration XML
  * \param[in,out] data_set       Where to put resource information
  *
  * \return TRUE
  *
  * \note unpack_remote_nodes() MUST be called before this, so that the nodes can
  *       be used when common_unpack() calls resource_location()
  */
 gboolean
 unpack_resources(xmlNode * xml_resources, pe_working_set_t * data_set)
 {
     xmlNode *xml_obj = NULL;
     GListPtr gIter = NULL;
 
     data_set->template_rsc_sets = g_hash_table_new_full(crm_str_hash,
                                                         g_str_equal, free,
                                                         destroy_tag);
 
     for (xml_obj = pcmk__xe_first_child(xml_resources); xml_obj != NULL;
          xml_obj = pcmk__xe_next(xml_obj)) {
 
         pe_resource_t *new_rsc = NULL;
 
         if (pcmk__str_eq((const char *)xml_obj->name, XML_CIB_TAG_RSC_TEMPLATE, pcmk__str_none)) {
             const char *template_id = ID(xml_obj);
 
             if (template_id && g_hash_table_lookup_extended(data_set->template_rsc_sets,
                                                             template_id, NULL, NULL) == FALSE) {
                 /* Record the template's ID for the knowledge of its existence anyway. */
                 g_hash_table_insert(data_set->template_rsc_sets, strdup(template_id), NULL);
             }
             continue;
         }
 
         crm_trace("Beginning unpack... <%s id=%s... >", crm_element_name(xml_obj), ID(xml_obj));
         if (common_unpack(xml_obj, &new_rsc, NULL, data_set) && (new_rsc != NULL)) {
             data_set->resources = g_list_append(data_set->resources, new_rsc);
             pe_rsc_trace(new_rsc, "Added resource %s", new_rsc->id);
 
         } else {
             pcmk__config_err("Ignoring <%s> resource '%s' "
                              "because configuration is invalid",
                              crm_element_name(xml_obj), crm_str(ID(xml_obj)));
             if (new_rsc != NULL && new_rsc->fns != NULL) {
                 new_rsc->fns->free(new_rsc);
             }
         }
     }
 
     for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *rsc = (pe_resource_t *) gIter->data;
 
         setup_container(rsc, data_set);
         link_rsc2remotenode(data_set, rsc);
     }
 
     data_set->resources = g_list_sort(data_set->resources, sort_rsc_priority);
     if (pcmk_is_set(data_set->flags, pe_flag_quick_location)) {
         /* Ignore */
 
     } else if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)
                && !pcmk_is_set(data_set->flags, pe_flag_have_stonith_resource)) {
 
         pcmk__config_err("Resource start-up disabled since no STONITH resources have been defined");
         pcmk__config_err("Either configure some or disable STONITH with the stonith-enabled option");
         pcmk__config_err("NOTE: Clusters with shared data need STONITH to ensure data integrity");
     }
 
     return TRUE;
 }
 
 gboolean
 unpack_tags(xmlNode * xml_tags, pe_working_set_t * data_set)
 {
     xmlNode *xml_tag = NULL;
 
     data_set->tags = g_hash_table_new_full(crm_str_hash, g_str_equal, free,
                                            destroy_tag);
 
     for (xml_tag = pcmk__xe_first_child(xml_tags); xml_tag != NULL;
          xml_tag = pcmk__xe_next(xml_tag)) {
 
         xmlNode *xml_obj_ref = NULL;
         const char *tag_id = ID(xml_tag);
 
         if (!pcmk__str_eq((const char *)xml_tag->name, XML_CIB_TAG_TAG, pcmk__str_none)) {
             continue;
         }
 
         if (tag_id == NULL) {
             pcmk__config_err("Ignoring <%s> without " XML_ATTR_ID,
                              crm_element_name(xml_tag));
             continue;
         }
 
         for (xml_obj_ref = pcmk__xe_first_child(xml_tag); xml_obj_ref != NULL;
              xml_obj_ref = pcmk__xe_next(xml_obj_ref)) {
 
             const char *obj_ref = ID(xml_obj_ref);
 
             if (!pcmk__str_eq((const char *)xml_obj_ref->name, XML_CIB_TAG_OBJ_REF, pcmk__str_none)) {
                 continue;
             }
 
             if (obj_ref == NULL) {
                 pcmk__config_err("Ignoring <%s> for tag '%s' without " XML_ATTR_ID,
                                  crm_element_name(xml_obj_ref), tag_id);
                 continue;
             }
 
             if (add_tag_ref(data_set->tags, tag_id, obj_ref) == FALSE) {
                 return FALSE;
             }
         }
     }
 
     return TRUE;
 }
 
 /* The ticket state section:
  * "/cib/status/tickets/ticket_state" */
 static gboolean
 unpack_ticket_state(xmlNode * xml_ticket, pe_working_set_t * data_set)
 {
     const char *ticket_id = NULL;
     const char *granted = NULL;
     const char *last_granted = NULL;
     const char *standby = NULL;
     xmlAttrPtr xIter = NULL;
 
     pe_ticket_t *ticket = NULL;
 
     ticket_id = ID(xml_ticket);
     if (pcmk__str_empty(ticket_id)) {
         return FALSE;
     }
 
     crm_trace("Processing ticket state for %s", ticket_id);
 
     ticket = g_hash_table_lookup(data_set->tickets, ticket_id);
     if (ticket == NULL) {
         ticket = ticket_new(ticket_id, data_set);
         if (ticket == NULL) {
             return FALSE;
         }
     }
 
     for (xIter = xml_ticket->properties; xIter; xIter = xIter->next) {
         const char *prop_name = (const char *)xIter->name;
         const char *prop_value = crm_element_value(xml_ticket, prop_name);
 
         if (pcmk__str_eq(prop_name, XML_ATTR_ID, pcmk__str_none)) {
             continue;
         }
         g_hash_table_replace(ticket->state, strdup(prop_name), strdup(prop_value));
     }
 
     granted = g_hash_table_lookup(ticket->state, "granted");
     if (granted && crm_is_true(granted)) {
         ticket->granted = TRUE;
         crm_info("We have ticket '%s'", ticket->id);
     } else {
         ticket->granted = FALSE;
         crm_info("We do not have ticket '%s'", ticket->id);
     }
 
     last_granted = g_hash_table_lookup(ticket->state, "last-granted");
     if (last_granted) {
         ticket->last_granted = crm_parse_int(last_granted, 0);
     }
 
     standby = g_hash_table_lookup(ticket->state, "standby");
     if (standby && crm_is_true(standby)) {
         ticket->standby = TRUE;
         if (ticket->granted) {
             crm_info("Granted ticket '%s' is in standby-mode", ticket->id);
         }
     } else {
         ticket->standby = FALSE;
     }
 
     crm_trace("Done with ticket state for %s", ticket_id);
 
     return TRUE;
 }
 
 static gboolean
 unpack_tickets_state(xmlNode * xml_tickets, pe_working_set_t * data_set)
 {
     xmlNode *xml_obj = NULL;
 
     for (xml_obj = pcmk__xe_first_child(xml_tickets); xml_obj != NULL;
          xml_obj = pcmk__xe_next(xml_obj)) {
 
         if (!pcmk__str_eq((const char *)xml_obj->name, XML_CIB_TAG_TICKET_STATE, pcmk__str_none)) {
             continue;
         }
         unpack_ticket_state(xml_obj, data_set);
     }
 
     return TRUE;
 }
 
 static void
 unpack_handle_remote_attrs(pe_node_t *this_node, xmlNode *state, pe_working_set_t * data_set) 
 {
     const char *resource_discovery_enabled = NULL;
     xmlNode *attrs = NULL;
     pe_resource_t *rsc = NULL;
 
     if (!pcmk__str_eq((const char *)state->name, XML_CIB_TAG_STATE, pcmk__str_none)) {
         return;
     }
 
     if ((this_node == NULL) || !pe__is_guest_or_remote_node(this_node)) {
         return;
     }
     crm_trace("Processing remote node id=%s, uname=%s", this_node->details->id, this_node->details->uname);
 
     this_node->details->remote_maintenance =
         crm_atoi(crm_element_value(state, XML_NODE_IS_MAINTENANCE), "0");
 
     rsc = this_node->details->remote_rsc;
     if (this_node->details->remote_requires_reset == FALSE) {
         this_node->details->unclean = FALSE;
         this_node->details->unseen = FALSE;
     }
     attrs = find_xml_node(state, XML_TAG_TRANSIENT_NODEATTRS, FALSE);
     add_node_attrs(attrs, this_node, TRUE, data_set);
 
     if (pe__shutdown_requested(this_node)) {
         crm_info("Node %s is shutting down", this_node->details->uname);
         this_node->details->shutdown = TRUE;
         if (rsc) {
             rsc->next_role = RSC_ROLE_STOPPED;
         }
     }
  
     if (crm_is_true(pe_node_attribute_raw(this_node, "standby"))) {
         crm_info("Node %s is in standby-mode", this_node->details->uname);
         this_node->details->standby = TRUE;
     }
 
     if (crm_is_true(pe_node_attribute_raw(this_node, "maintenance")) ||
         ((rsc != NULL) && !pcmk_is_set(rsc->flags, pe_rsc_managed))) {
         crm_info("Node %s is in maintenance-mode", this_node->details->uname);
         this_node->details->maintenance = TRUE;
     }
 
     resource_discovery_enabled = pe_node_attribute_raw(this_node, XML_NODE_ATTR_RSC_DISCOVERY);
     if (resource_discovery_enabled && !crm_is_true(resource_discovery_enabled)) {
         if (pe__is_remote_node(this_node)
             && !pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
             crm_warn("Ignoring %s attribute on remote node %s because stonith is disabled",
                      XML_NODE_ATTR_RSC_DISCOVERY, this_node->details->uname);
         } else {
             /* This is either a remote node with fencing enabled, or a guest
              * node. We don't care whether fencing is enabled when fencing guest
              * nodes, because they are "fenced" by recovering their containing
              * resource.
              */
             crm_info("Node %s has resource discovery disabled", this_node->details->uname);
             this_node->details->rsc_discovery_enabled = FALSE;
         }
     }
 }
 
 static bool
 unpack_node_loop(xmlNode * status, bool fence, pe_working_set_t * data_set) 
 {
     bool changed = false;
     xmlNode *lrm_rsc = NULL;
 
     for (xmlNode *state = pcmk__xe_first_child(status); state != NULL;
          state = pcmk__xe_next(state)) {
 
         const char *id = NULL;
         const char *uname = NULL;
         pe_node_t *this_node = NULL;
         bool process = FALSE;
 
         if (!pcmk__str_eq((const char *)state->name, XML_CIB_TAG_STATE, pcmk__str_none)) {
             continue;
         }
 
         id = crm_element_value(state, XML_ATTR_ID);
         uname = crm_element_value(state, XML_ATTR_UNAME);
         this_node = pe_find_node_any(data_set->nodes, id, uname);
 
         if (this_node == NULL) {
             crm_info("Node %s is unknown", id);
             continue;
 
         } else if (this_node->details->unpacked) {
             crm_trace("Node %s was already processed", id);
             continue;
 
         } else if (!pe__is_guest_or_remote_node(this_node)
                    && pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
             // A redundant test, but preserves the order for regression tests
             process = TRUE;
 
         } else if (pe__is_guest_or_remote_node(this_node)) {
             bool check = FALSE;
             pe_resource_t *rsc = this_node->details->remote_rsc;
 
             if(fence) {
                 check = TRUE;
 
             } else if(rsc == NULL) {
                 /* Not ready yet */
 
             } else if (pe__is_guest_node(this_node)
                        && rsc->role == RSC_ROLE_STARTED
                        && rsc->container->role == RSC_ROLE_STARTED) {
                 /* Both the connection and its containing resource need to be
                  * known to be up before we process resources running in it.
                  */
                 check = TRUE;
                 crm_trace("Checking node %s/%s/%s status %d/%d/%d", id, rsc->id, rsc->container->id, fence, rsc->role, RSC_ROLE_STARTED);
 
             } else if (!pe__is_guest_node(this_node)
                        && ((rsc->role == RSC_ROLE_STARTED)
                            || pcmk_is_set(data_set->flags, pe_flag_shutdown_lock))) {
                 check = TRUE;
                 crm_trace("Checking node %s/%s status %d/%d/%d", id, rsc->id, fence, rsc->role, RSC_ROLE_STARTED);
             }
 
             if (check) {
                 determine_remote_online_status(data_set, this_node);
                 unpack_handle_remote_attrs(this_node, state, data_set);
                 process = TRUE;
             }
 
         } else if (this_node->details->online) {
             process = TRUE;
 
         } else if (fence) {
             process = TRUE;
 
         } else if (pcmk_is_set(data_set->flags, pe_flag_shutdown_lock)) {
             process = TRUE;
         }
 
         if(process) {
             crm_trace("Processing lrm resource entries on %shealthy%s node: %s",
                       fence?"un":"",
                       (pe__is_guest_or_remote_node(this_node)? " remote" : ""),
                       this_node->details->uname);
             changed = TRUE;
             this_node->details->unpacked = TRUE;
 
             lrm_rsc = find_xml_node(state, XML_CIB_TAG_LRM, FALSE);
             lrm_rsc = find_xml_node(lrm_rsc, XML_LRM_TAG_RESOURCES, FALSE);
             unpack_lrm_resources(this_node, lrm_rsc, data_set);
         }
     }
     return changed;
 }
 
 /* remove nodes that are down, stopping */
 /* create positive rsc_to_node constraints between resources and the nodes they are running on */
 /* anything else? */
 gboolean
 unpack_status(xmlNode * status, pe_working_set_t * data_set)
 {
     const char *id = NULL;
     const char *uname = NULL;
 
     xmlNode *state = NULL;
     pe_node_t *this_node = NULL;
 
     crm_trace("Beginning unpack");
 
     if (data_set->tickets == NULL) {
         data_set->tickets = g_hash_table_new_full(crm_str_hash, g_str_equal,
                                                   free, destroy_ticket);
     }
 
     for (state = pcmk__xe_first_child(status); state != NULL;
          state = pcmk__xe_next(state)) {
 
         if (pcmk__str_eq((const char *)state->name, XML_CIB_TAG_TICKETS, pcmk__str_none)) {
             unpack_tickets_state((xmlNode *) state, data_set);
 
         } else if (pcmk__str_eq((const char *)state->name, XML_CIB_TAG_STATE, pcmk__str_none)) {
             xmlNode *attrs = NULL;
             const char *resource_discovery_enabled = NULL;
 
             id = crm_element_value(state, XML_ATTR_ID);
             uname = crm_element_value(state, XML_ATTR_UNAME);
             this_node = pe_find_node_any(data_set->nodes, id, uname);
 
             if (uname == NULL) {
                 /* error */
                 continue;
 
             } else if (this_node == NULL) {
                 pcmk__config_warn("Ignoring recorded node status for '%s' "
                                   "because no longer in configuration", uname);
                 continue;
 
             } else if (pe__is_guest_or_remote_node(this_node)) {
                 /* online state for remote nodes is determined by the
                  * rsc state after all the unpacking is done. we do however
                  * need to mark whether or not the node has been fenced as this plays
                  * a role during unpacking cluster node resource state */
                 this_node->details->remote_was_fenced = 
                     crm_atoi(crm_element_value(state, XML_NODE_IS_FENCED), "0");
                 continue;
             }
 
             crm_trace("Processing node id=%s, uname=%s", id, uname);
 
             /* Mark the node as provisionally clean
              * - at least we have seen it in the current cluster's lifetime
              */
             this_node->details->unclean = FALSE;
             this_node->details->unseen = FALSE;
             attrs = find_xml_node(state, XML_TAG_TRANSIENT_NODEATTRS, FALSE);
             add_node_attrs(attrs, this_node, TRUE, data_set);
 
             if (crm_is_true(pe_node_attribute_raw(this_node, "standby"))) {
                 crm_info("Node %s is in standby-mode", this_node->details->uname);
                 this_node->details->standby = TRUE;
             }
 
             if (crm_is_true(pe_node_attribute_raw(this_node, "maintenance"))) {
                 crm_info("Node %s is in maintenance-mode", this_node->details->uname);
                 this_node->details->maintenance = TRUE;
             }
 
             resource_discovery_enabled = pe_node_attribute_raw(this_node, XML_NODE_ATTR_RSC_DISCOVERY);
             if (resource_discovery_enabled && !crm_is_true(resource_discovery_enabled)) {
                 crm_warn("ignoring %s attribute on node %s, disabling resource discovery is not allowed on cluster nodes",
                     XML_NODE_ATTR_RSC_DISCOVERY, this_node->details->uname);
             }
 
             crm_trace("determining node state");
             determine_online_status(state, this_node, data_set);
 
             if (!pcmk_is_set(data_set->flags, pe_flag_have_quorum)
                 && this_node->details->online
                 && (data_set->no_quorum_policy == no_quorum_suicide)) {
                 /* Everything else should flow from this automatically
                  * (at least until the scheduler becomes able to migrate off
                  * healthy resources)
                  */
                 pe_fence_node(data_set, this_node, "cluster does not have quorum", FALSE);
             }
         }
     }
 
 
     while(unpack_node_loop(status, FALSE, data_set)) {
         crm_trace("Start another loop");
     }
 
     // Now catch any nodes we didn't see
     unpack_node_loop(status,
                      pcmk_is_set(data_set->flags, pe_flag_stonith_enabled),
                      data_set);
 
     /* Now that we know where resources are, we can schedule stops of containers
      * with failed bundle connections
      */
     if (data_set->stop_needed != NULL) {
         for (GList *item = data_set->stop_needed; item; item = item->next) {
             pe_resource_t *container = item->data;
             pe_node_t *node = pe__current_node(container);
 
             if (node) {
                 stop_action(container, node, FALSE);
             }
         }
         g_list_free(data_set->stop_needed);
         data_set->stop_needed = NULL;
     }
 
     for (GListPtr gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
         pe_node_t *this_node = gIter->data;
 
         if (this_node == NULL) {
             continue;
         } else if (!pe__is_guest_or_remote_node(this_node)) {
             continue;
         } else if(this_node->details->unpacked) {
             continue;
         }
         determine_remote_online_status(data_set, this_node);
     }
 
     return TRUE;
 }
 
 static gboolean
 determine_online_status_no_fencing(pe_working_set_t * data_set, xmlNode * node_state,
                                    pe_node_t * this_node)
 {
     gboolean online = FALSE;
     const char *join = crm_element_value(node_state, XML_NODE_JOIN_STATE);
     const char *is_peer = crm_element_value(node_state, XML_NODE_IS_PEER);
     const char *in_cluster = crm_element_value(node_state, XML_NODE_IN_CLUSTER);
     const char *exp_state = crm_element_value(node_state, XML_NODE_EXPECTED);
 
     if (!crm_is_true(in_cluster)) {
         crm_trace("Node is down: in_cluster=%s", crm_str(in_cluster));
 
     } else if (pcmk__str_eq(is_peer, ONLINESTATUS, pcmk__str_casei)) {
         if (pcmk__str_eq(join, CRMD_JOINSTATE_MEMBER, pcmk__str_casei)) {
             online = TRUE;
         } else {
             crm_debug("Node is not ready to run resources: %s", join);
         }
 
     } else if (this_node->details->expected_up == FALSE) {
         crm_trace("Controller is down: in_cluster=%s", crm_str(in_cluster));
         crm_trace("\tis_peer=%s, join=%s, expected=%s",
                   crm_str(is_peer), crm_str(join), crm_str(exp_state));
 
     } else {
         /* mark it unclean */
         pe_fence_node(data_set, this_node, "peer is unexpectedly down", FALSE);
         crm_info("\tin_cluster=%s, is_peer=%s, join=%s, expected=%s",
                  crm_str(in_cluster), crm_str(is_peer), crm_str(join), crm_str(exp_state));
     }
     return online;
 }
 
 static gboolean
 determine_online_status_fencing(pe_working_set_t * data_set, xmlNode * node_state,
                                 pe_node_t * this_node)
 {
     gboolean online = FALSE;
     gboolean do_terminate = FALSE;
     bool crmd_online = FALSE;
     const char *join = crm_element_value(node_state, XML_NODE_JOIN_STATE);
     const char *is_peer = crm_element_value(node_state, XML_NODE_IS_PEER);
     const char *in_cluster = crm_element_value(node_state, XML_NODE_IN_CLUSTER);
     const char *exp_state = crm_element_value(node_state, XML_NODE_EXPECTED);
     const char *terminate = pe_node_attribute_raw(this_node, "terminate");
 
 /*
   - XML_NODE_IN_CLUSTER    ::= true|false
   - XML_NODE_IS_PEER       ::= online|offline
   - XML_NODE_JOIN_STATE    ::= member|down|pending|banned
   - XML_NODE_EXPECTED      ::= member|down
 */
 
     if (crm_is_true(terminate)) {
         do_terminate = TRUE;
 
     } else if (terminate != NULL && strlen(terminate) > 0) {
         /* could be a time() value */
         char t = terminate[0];
 
         if (t != '0' && isdigit(t)) {
             do_terminate = TRUE;
         }
     }
 
     crm_trace("%s: in_cluster=%s, is_peer=%s, join=%s, expected=%s, term=%d",
               this_node->details->uname, crm_str(in_cluster), crm_str(is_peer),
               crm_str(join), crm_str(exp_state), do_terminate);
 
     online = crm_is_true(in_cluster);
     crmd_online = pcmk__str_eq(is_peer, ONLINESTATUS, pcmk__str_casei);
     if (exp_state == NULL) {
         exp_state = CRMD_JOINSTATE_DOWN;
     }
 
     if (this_node->details->shutdown) {
         crm_debug("%s is shutting down", this_node->details->uname);
 
         /* Slightly different criteria since we can't shut down a dead peer */
         online = crmd_online;
 
     } else if (in_cluster == NULL) {
         pe_fence_node(data_set, this_node, "peer has not been seen by the cluster", FALSE);
 
     } else if (pcmk__str_eq(join, CRMD_JOINSTATE_NACK, pcmk__str_casei)) {
         pe_fence_node(data_set, this_node, "peer failed the pacemaker membership criteria", FALSE);
 
     } else if (do_terminate == FALSE && pcmk__str_eq(exp_state, CRMD_JOINSTATE_DOWN, pcmk__str_casei)) {
 
         if (crm_is_true(in_cluster) || crmd_online) {
             crm_info("- Node %s is not ready to run resources", this_node->details->uname);
             this_node->details->standby = TRUE;
             this_node->details->pending = TRUE;
 
         } else {
             crm_trace("%s is down or still coming up", this_node->details->uname);
         }
 
     } else if (do_terminate && pcmk__str_eq(join, CRMD_JOINSTATE_DOWN, pcmk__str_casei)
                && crm_is_true(in_cluster) == FALSE && !crmd_online) {
         crm_info("Node %s was just shot", this_node->details->uname);
         online = FALSE;
 
     } else if (crm_is_true(in_cluster) == FALSE) {
         // Consider `priority-fencing-delay` for lost nodes
         pe_fence_node(data_set, this_node, "peer is no longer part of the cluster", TRUE);
 
     } else if (!crmd_online) {
         pe_fence_node(data_set, this_node, "peer process is no longer available", FALSE);
 
         /* Everything is running at this point, now check join state */
     } else if (do_terminate) {
         pe_fence_node(data_set, this_node, "termination was requested", FALSE);
 
     } else if (pcmk__str_eq(join, CRMD_JOINSTATE_MEMBER, pcmk__str_casei)) {
         crm_info("Node %s is active", this_node->details->uname);
 
     } else if (pcmk__strcase_any_of(join, CRMD_JOINSTATE_PENDING, CRMD_JOINSTATE_DOWN, NULL)) {
         crm_info("Node %s is not ready to run resources", this_node->details->uname);
         this_node->details->standby = TRUE;
         this_node->details->pending = TRUE;
 
     } else {
         pe_fence_node(data_set, this_node, "peer was in an unknown state", FALSE);
         crm_warn("%s: in-cluster=%s, is-peer=%s, join=%s, expected=%s, term=%d, shutdown=%d",
                  this_node->details->uname, crm_str(in_cluster), crm_str(is_peer),
                  crm_str(join), crm_str(exp_state), do_terminate, this_node->details->shutdown);
     }
 
     return online;
 }
 
 static void
 determine_remote_online_status(pe_working_set_t * data_set, pe_node_t * this_node)
 {
     pe_resource_t *rsc = this_node->details->remote_rsc;
     pe_resource_t *container = NULL;
     pe_node_t *host = NULL;
 
     /* If there is a node state entry for a (former) Pacemaker Remote node
      * but no resource creating that node, the node's connection resource will
      * be NULL. Consider it an offline remote node in that case.
      */
     if (rsc == NULL) {
         this_node->details->online = FALSE;
         goto remote_online_done;
     }
 
     container = rsc->container;
 
     if (container && pcmk__list_of_1(rsc->running_on)) {
         host = rsc->running_on->data;
     }
 
     /* If the resource is currently started, mark it online. */
     if (rsc->role == RSC_ROLE_STARTED) {
         crm_trace("%s node %s presumed ONLINE because connection resource is started",
                   (container? "Guest" : "Remote"), this_node->details->id);
         this_node->details->online = TRUE;
     }
 
     /* consider this node shutting down if transitioning start->stop */
     if (rsc->role == RSC_ROLE_STARTED && rsc->next_role == RSC_ROLE_STOPPED) {
         crm_trace("%s node %s shutting down because connection resource is stopping",
                   (container? "Guest" : "Remote"), this_node->details->id);
         this_node->details->shutdown = TRUE;
     }
 
     /* Now check all the failure conditions. */
     if(container && pcmk_is_set(container->flags, pe_rsc_failed)) {
         crm_trace("Guest node %s UNCLEAN because guest resource failed",
                   this_node->details->id);
         this_node->details->online = FALSE;
         this_node->details->remote_requires_reset = TRUE;
 
     } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
         crm_trace("%s node %s OFFLINE because connection resource failed",
                   (container? "Guest" : "Remote"), this_node->details->id);
         this_node->details->online = FALSE;
 
     } else if (rsc->role == RSC_ROLE_STOPPED
         || (container && container->role == RSC_ROLE_STOPPED)) {
 
         crm_trace("%s node %s OFFLINE because its resource is stopped",
                   (container? "Guest" : "Remote"), this_node->details->id);
         this_node->details->online = FALSE;
         this_node->details->remote_requires_reset = FALSE;
 
     } else if (host && (host->details->online == FALSE)
                && host->details->unclean) {
         crm_trace("Guest node %s UNCLEAN because host is unclean",
                   this_node->details->id);
         this_node->details->online = FALSE;
         this_node->details->remote_requires_reset = TRUE;
     }
 
 remote_online_done:
     crm_trace("Remote node %s online=%s",
         this_node->details->id, this_node->details->online ? "TRUE" : "FALSE");
 }
 
 static void
 determine_online_status(xmlNode * node_state, pe_node_t * this_node, pe_working_set_t * data_set)
 {
     gboolean online = FALSE;
     const char *exp_state = crm_element_value(node_state, XML_NODE_EXPECTED);
 
     CRM_CHECK(this_node != NULL, return);
 
     this_node->details->shutdown = FALSE;
     this_node->details->expected_up = FALSE;
 
     if (pe__shutdown_requested(this_node)) {
         this_node->details->shutdown = TRUE;
 
     } else if (pcmk__str_eq(exp_state, CRMD_JOINSTATE_MEMBER, pcmk__str_casei)) {
         this_node->details->expected_up = TRUE;
     }
 
     if (this_node->details->type == node_ping) {
         this_node->details->unclean = FALSE;
         online = FALSE;         /* As far as resource management is concerned,
                                  * the node is safely offline.
                                  * Anyone caught abusing this logic will be shot
                                  */
 
     } else if (!pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
         online = determine_online_status_no_fencing(data_set, node_state, this_node);
 
     } else {
         online = determine_online_status_fencing(data_set, node_state, this_node);
     }
 
     if (online) {
         this_node->details->online = TRUE;
 
     } else {
         /* remove node from contention */
         this_node->fixed = TRUE;
         this_node->weight = -INFINITY;
     }
 
     if (online && this_node->details->shutdown) {
         /* don't run resources here */
         this_node->fixed = TRUE;
         this_node->weight = -INFINITY;
     }
 
     if (this_node->details->type == node_ping) {
         crm_info("Node %s is not a pacemaker node", this_node->details->uname);
 
     } else if (this_node->details->unclean) {
         pe_proc_warn("Node %s is unclean", this_node->details->uname);
 
     } else if (this_node->details->online) {
         crm_info("Node %s is %s", this_node->details->uname,
                  this_node->details->shutdown ? "shutting down" :
                  this_node->details->pending ? "pending" :
                  this_node->details->standby ? "standby" :
                  this_node->details->maintenance ? "maintenance" : "online");
 
     } else {
         crm_trace("Node %s is offline", this_node->details->uname);
     }
 }
 
 /*!
  * \internal
  * \brief Find the end of a resource's name, excluding any clone suffix
  *
  * \param[in] id  Resource ID to check
  *
  * \return Pointer to last character of resource's base name
  */
 const char *
 pe_base_name_end(const char *id)
 {
     if (!pcmk__str_empty(id)) {
         const char *end = id + strlen(id) - 1;
 
         for (const char *s = end; s > id; --s) {
             switch (*s) {
                 case '0':
                 case '1':
                 case '2':
                 case '3':
                 case '4':
                 case '5':
                 case '6':
                 case '7':
                 case '8':
                 case '9':
                     break;
                 case ':':
                     return (s == end)? s : (s - 1);
                 default:
                     return end;
             }
         }
         return end;
     }
     return NULL;
 }
 
 /*!
  * \internal
  * \brief Get a resource name excluding any clone suffix
  *
  * \param[in] last_rsc_id  Resource ID to check
  *
  * \return Pointer to newly allocated string with resource's base name
  * \note It is the caller's responsibility to free() the result.
  *       This asserts on error, so callers can assume result is not NULL.
  */
 char *
 clone_strip(const char *last_rsc_id)
 {
     const char *end = pe_base_name_end(last_rsc_id);
     char *basename = NULL;
 
     CRM_ASSERT(end);
     basename = strndup(last_rsc_id, end - last_rsc_id + 1);
     CRM_ASSERT(basename);
     return basename;
 }
 
 /*!
  * \internal
  * \brief Get the name of the first instance of a cloned resource
  *
  * \param[in] last_rsc_id  Resource ID to check
  *
  * \return Pointer to newly allocated string with resource's base name plus :0
  * \note It is the caller's responsibility to free() the result.
  *       This asserts on error, so callers can assume result is not NULL.
  */
 char *
 clone_zero(const char *last_rsc_id)
 {
     const char *end = pe_base_name_end(last_rsc_id);
     size_t base_name_len = end - last_rsc_id + 1;
     char *zero = NULL;
 
     CRM_ASSERT(end);
     zero = calloc(base_name_len + 3, sizeof(char));
     CRM_ASSERT(zero);
     memcpy(zero, last_rsc_id, base_name_len);
     zero[base_name_len] = ':';
     zero[base_name_len + 1] = '0';
     return zero;
 }
 
 static pe_resource_t *
 create_fake_resource(const char *rsc_id, xmlNode * rsc_entry, pe_working_set_t * data_set)
 {
     pe_resource_t *rsc = NULL;
     xmlNode *xml_rsc = create_xml_node(NULL, XML_CIB_TAG_RESOURCE);
 
     copy_in_properties(xml_rsc, rsc_entry);
     crm_xml_add(xml_rsc, XML_ATTR_ID, rsc_id);
     crm_log_xml_debug(xml_rsc, "Orphan resource");
 
     if (!common_unpack(xml_rsc, &rsc, NULL, data_set)) {
         return NULL;
     }
 
     if (xml_contains_remote_node(xml_rsc)) {
         pe_node_t *node;
 
         crm_debug("Detected orphaned remote node %s", rsc_id);
         node = pe_find_node(data_set->nodes, rsc_id);
         if (node == NULL) {
 	        node = pe_create_node(rsc_id, rsc_id, "remote", NULL, data_set);
         }
         link_rsc2remotenode(data_set, rsc);
 
         if (node) {
             crm_trace("Setting node %s as shutting down due to orphaned connection resource", rsc_id);
             node->details->shutdown = TRUE;
         }
     }
 
     if (crm_element_value(rsc_entry, XML_RSC_ATTR_CONTAINER)) {
         /* This orphaned rsc needs to be mapped to a container. */
         crm_trace("Detected orphaned container filler %s", rsc_id);
         pe__set_resource_flags(rsc, pe_rsc_orphan_container_filler);
     }
     pe__set_resource_flags(rsc, pe_rsc_orphan);
     data_set->resources = g_list_append(data_set->resources, rsc);
     return rsc;
 }
 
 /*!
  * \internal
  * \brief Create orphan instance for anonymous clone resource history
  */
 static pe_resource_t *
 create_anonymous_orphan(pe_resource_t *parent, const char *rsc_id,
                         pe_node_t *node, pe_working_set_t *data_set)
 {
     pe_resource_t *top = pe__create_clone_child(parent, data_set);
 
     // find_rsc() because we might be a cloned group
     pe_resource_t *orphan = top->fns->find_rsc(top, rsc_id, NULL, pe_find_clone);
 
     pe_rsc_debug(parent, "Created orphan %s for %s: %s on %s",
                  top->id, parent->id, rsc_id, node->details->uname);
     return orphan;
 }
 
 /*!
  * \internal
  * \brief Check a node for an instance of an anonymous clone
  *
  * Return a child instance of the specified anonymous clone, in order of
  * preference: (1) the instance running on the specified node, if any;
  * (2) an inactive instance (i.e. within the total of clone-max instances);
  * (3) a newly created orphan (i.e. clone-max instances are already active).
  *
  * \param[in] data_set  Cluster information
  * \param[in] node      Node on which to check for instance
  * \param[in] parent    Clone to check
  * \param[in] rsc_id    Name of cloned resource in history (without instance)
  */
 static pe_resource_t *
 find_anonymous_clone(pe_working_set_t * data_set, pe_node_t * node, pe_resource_t * parent,
                      const char *rsc_id)
 {
     GListPtr rIter = NULL;
     pe_resource_t *rsc = NULL;
     pe_resource_t *inactive_instance = NULL;
     gboolean skip_inactive = FALSE;
 
     CRM_ASSERT(parent != NULL);
     CRM_ASSERT(pe_rsc_is_clone(parent));
     CRM_ASSERT(!pcmk_is_set(parent->flags, pe_rsc_unique));
 
     // Check for active (or partially active, for cloned groups) instance
     pe_rsc_trace(parent, "Looking for %s on %s in %s", rsc_id, node->details->uname, parent->id);
     for (rIter = parent->children; rsc == NULL && rIter; rIter = rIter->next) {
         GListPtr locations = NULL;
         pe_resource_t *child = rIter->data;
 
         /* Check whether this instance is already known to be active or pending
          * anywhere, at this stage of unpacking. Because this function is called
          * for a resource before the resource's individual operation history
          * entries are unpacked, locations will generally not contain the
          * desired node.
          *
          * However, there are three exceptions:
          * (1) when child is a cloned group and we have already unpacked the
          *     history of another member of the group on the same node;
          * (2) when we've already unpacked the history of another numbered
          *     instance on the same node (which can happen if globally-unique
          *     was flipped from true to false); and
          * (3) when we re-run calculations on the same data set as part of a
          *     simulation.
          */
         child->fns->location(child, &locations, 2);
         if (locations) {
             /* We should never associate the same numbered anonymous clone
              * instance with multiple nodes, and clone instances can't migrate,
              * so there must be only one location, regardless of history.
              */
             CRM_LOG_ASSERT(locations->next == NULL);
 
             if (((pe_node_t *)locations->data)->details == node->details) {
                 /* This child instance is active on the requested node, so check
                  * for a corresponding configured resource. We use find_rsc()
                  * instead of child because child may be a cloned group, and we
                  * need the particular member corresponding to rsc_id.
                  *
                  * If the history entry is orphaned, rsc will be NULL.
                  */
                 rsc = parent->fns->find_rsc(child, rsc_id, NULL, pe_find_clone);
                 if (rsc) {
                     /* If there are multiple instance history entries for an
                      * anonymous clone in a single node's history (which can
                      * happen if globally-unique is switched from true to
                      * false), we want to consider the instances beyond the
                      * first as orphans, even if there are inactive instance
                      * numbers available.
                      */
                     if (rsc->running_on) {
                         crm_notice("Active (now-)anonymous clone %s has "
                                    "multiple (orphan) instance histories on %s",
                                    parent->id, node->details->uname);
                         skip_inactive = TRUE;
                         rsc = NULL;
                     } else {
                         pe_rsc_trace(parent, "Resource %s, active", rsc->id);
                     }
                 }
             }
             g_list_free(locations);
 
         } else {
             pe_rsc_trace(parent, "Resource %s, skip inactive", child->id);
             if (!skip_inactive && !inactive_instance
                 && !pcmk_is_set(child->flags, pe_rsc_block)) {
                 // Remember one inactive instance in case we don't find active
                 inactive_instance = parent->fns->find_rsc(child, rsc_id, NULL,
                                                           pe_find_clone);
 
                 /* ... but don't use it if it was already associated with a
                  * pending action on another node
                  */
                 if (inactive_instance && inactive_instance->pending_node
                     && (inactive_instance->pending_node->details != node->details)) {
                     inactive_instance = NULL;
                 }
             }
         }
     }
 
     if ((rsc == NULL) && !skip_inactive && (inactive_instance != NULL)) {
         pe_rsc_trace(parent, "Resource %s, empty slot", inactive_instance->id);
         rsc = inactive_instance;
     }
 
     /* If the resource has "requires" set to "quorum" or "nothing", and we don't
      * have a clone instance for every node, we don't want to consume a valid
      * instance number for unclean nodes. Such instances may appear to be active
      * according to the history, but should be considered inactive, so we can
      * start an instance elsewhere. Treat such instances as orphans.
      *
      * An exception is instances running on guest nodes -- since guest node
      * "fencing" is actually just a resource stop, requires shouldn't apply.
      *
      * @TODO Ideally, we'd use an inactive instance number if it is not needed
      * for any clean instances. However, we don't know that at this point.
      */
     if ((rsc != NULL) && !pcmk_is_set(rsc->flags, pe_rsc_needs_fencing)
         && (!node->details->online || node->details->unclean)
         && !pe__is_guest_node(node)
         && !pe__is_universal_clone(parent, data_set)) {
 
         rsc = NULL;
     }
 
     if (rsc == NULL) {
         rsc = create_anonymous_orphan(parent, rsc_id, node, data_set);
         pe_rsc_trace(parent, "Resource %s, orphan", rsc->id);
     }
     return rsc;
 }
 
 static pe_resource_t *
 unpack_find_resource(pe_working_set_t * data_set, pe_node_t * node, const char *rsc_id,
                      xmlNode * rsc_entry)
 {
     pe_resource_t *rsc = NULL;
     pe_resource_t *parent = NULL;
 
     crm_trace("looking for %s", rsc_id);
     rsc = pe_find_resource(data_set->resources, rsc_id);
 
     if (rsc == NULL) {
         /* If we didn't find the resource by its name in the operation history,
          * check it again as a clone instance. Even when clone-max=0, we create
          * a single :0 orphan to match against here.
          */
         char *clone0_id = clone_zero(rsc_id);
         pe_resource_t *clone0 = pe_find_resource(data_set->resources, clone0_id);
 
         if (clone0 && !pcmk_is_set(clone0->flags, pe_rsc_unique)) {
             rsc = clone0;
             parent = uber_parent(clone0);
             crm_trace("%s found as %s (%s)", rsc_id, clone0_id, parent->id);
         } else {
             crm_trace("%s is not known as %s either (orphan)",
                       rsc_id, clone0_id);
         }
         free(clone0_id);
 
     } else if (rsc->variant > pe_native) {
         crm_trace("Resource history for %s is orphaned because it is no longer primitive",
                   rsc_id);
         return NULL;
 
     } else {
         parent = uber_parent(rsc);
     }
 
     if (pe_rsc_is_anon_clone(parent)) {
 
         if (pe_rsc_is_bundled(parent)) {
             rsc = pe__find_bundle_replica(parent->parent, node);
         } else {
             char *base = clone_strip(rsc_id);
 
             rsc = find_anonymous_clone(data_set, node, parent, base);
             free(base);
             CRM_ASSERT(rsc != NULL);
         }
     }
 
     if (rsc && !pcmk__str_eq(rsc_id, rsc->id, pcmk__str_casei)
         && !pcmk__str_eq(rsc_id, rsc->clone_name, pcmk__str_casei)) {
 
         free(rsc->clone_name);
         rsc->clone_name = strdup(rsc_id);
         pe_rsc_debug(rsc, "Internally renamed %s on %s to %s%s",
                      rsc_id, node->details->uname, rsc->id,
                      (pcmk_is_set(rsc->flags, pe_rsc_orphan)? " (ORPHAN)" : ""));
     }
     return rsc;
 }
 
 static pe_resource_t *
 process_orphan_resource(xmlNode * rsc_entry, pe_node_t * node, pe_working_set_t * data_set)
 {
     pe_resource_t *rsc = NULL;
     const char *rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID);
 
     crm_debug("Detected orphan resource %s on %s", rsc_id, node->details->uname);
     rsc = create_fake_resource(rsc_id, rsc_entry, data_set);
 
     if (!pcmk_is_set(data_set->flags, pe_flag_stop_rsc_orphans)) {
         pe__clear_resource_flags(rsc, pe_rsc_managed);
 
     } else {
         CRM_CHECK(rsc != NULL, return NULL);
         pe_rsc_trace(rsc, "Added orphan %s", rsc->id);
         resource_location(rsc, NULL, -INFINITY, "__orphan_do_not_run__", data_set);
     }
     return rsc;
 }
 
 static void
 process_rsc_state(pe_resource_t * rsc, pe_node_t * node,
                   enum action_fail_response on_fail,
                   xmlNode * migrate_op, pe_working_set_t * data_set)
 {
     pe_node_t *tmpnode = NULL;
     char *reason = NULL;
 
     CRM_ASSERT(rsc);
     pe_rsc_trace(rsc, "Resource %s is %s on %s: on_fail=%s",
                  rsc->id, role2text(rsc->role), node->details->uname, fail2text(on_fail));
 
     /* process current state */
     if (rsc->role != RSC_ROLE_UNKNOWN) {
         pe_resource_t *iter = rsc;
 
         while (iter) {
             if (g_hash_table_lookup(iter->known_on, node->details->id) == NULL) {
                 pe_node_t *n = pe__copy_node(node);
 
                 pe_rsc_trace(rsc, "%s (aka. %s) known on %s", rsc->id, rsc->clone_name,
                              n->details->uname);
                 g_hash_table_insert(iter->known_on, (gpointer) n->details->id, n);
             }
             if (pcmk_is_set(iter->flags, pe_rsc_unique)) {
                 break;
             }
             iter = iter->parent;
         }
     }
 
     /* If a managed resource is believed to be running, but node is down ... */
     if (rsc->role > RSC_ROLE_STOPPED
         && node->details->online == FALSE
         && node->details->maintenance == FALSE
         && pcmk_is_set(rsc->flags, pe_rsc_managed)) {
 
         gboolean should_fence = FALSE;
 
         /* If this is a guest node, fence it (regardless of whether fencing is
          * enabled, because guest node fencing is done by recovery of the
          * container resource rather than by the fencer). Mark the resource
          * we're processing as failed. When the guest comes back up, its
          * operation history in the CIB will be cleared, freeing the affected
          * resource to run again once we are sure we know its state.
          */
         if (pe__is_guest_node(node)) {
             pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop);
             should_fence = TRUE;
 
         } else if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
             if (pe__is_remote_node(node) && node->details->remote_rsc
                 && !pcmk_is_set(node->details->remote_rsc->flags, pe_rsc_failed)) {
 
                 /* Setting unseen means that fencing of the remote node will
                  * occur only if the connection resource is not going to start
                  * somewhere. This allows connection resources on a failed
                  * cluster node to move to another node without requiring the
                  * remote nodes to be fenced as well.
                  */
                 node->details->unseen = TRUE;
                 reason = crm_strdup_printf("%s is active there (fencing will be"
                                            " revoked if remote connection can "
                                            "be re-established elsewhere)",
                                            rsc->id);
             }
             should_fence = TRUE;
         }
 
         if (should_fence) {
             if (reason == NULL) {
                reason = crm_strdup_printf("%s is thought to be active there", rsc->id);
             }
             pe_fence_node(data_set, node, reason, FALSE);
         }
         free(reason);
     }
 
     if (node->details->unclean) {
         /* No extra processing needed
          * Also allows resources to be started again after a node is shot
          */
         on_fail = action_fail_ignore;
     }
 
     switch (on_fail) {
         case action_fail_ignore:
             /* nothing to do */
             break;
 
         case action_fail_demote:
             pe__set_resource_flags(rsc, pe_rsc_failed);
             demote_action(rsc, node, FALSE);
             break;
 
         case action_fail_fence:
             /* treat it as if it is still running
              * but also mark the node as unclean
              */
             reason = crm_strdup_printf("%s failed there", rsc->id);
             pe_fence_node(data_set, node, reason, FALSE);
             free(reason);
             break;
 
         case action_fail_standby:
             node->details->standby = TRUE;
             node->details->standby_onfail = TRUE;
             break;
 
         case action_fail_block:
             /* is_managed == FALSE will prevent any
              * actions being sent for the resource
              */
             pe__clear_resource_flags(rsc, pe_rsc_managed);
             pe__set_resource_flags(rsc, pe_rsc_block);
             break;
 
         case action_fail_migrate:
             /* make sure it comes up somewhere else
              * or not at all
              */
             resource_location(rsc, node, -INFINITY, "__action_migration_auto__", data_set);
             break;
 
         case action_fail_stop:
             rsc->next_role = RSC_ROLE_STOPPED;
             break;
 
         case action_fail_recover:
             if (rsc->role != RSC_ROLE_STOPPED && rsc->role != RSC_ROLE_UNKNOWN) {
                 pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop);
                 stop_action(rsc, node, FALSE);
             }
             break;
 
         case action_fail_restart_container:
             pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop);
             if (rsc->container && pe_rsc_is_bundled(rsc)) {
                 /* A bundle's remote connection can run on a different node than
                  * the bundle's container. We don't necessarily know where the
                  * container is running yet, so remember it and add a stop
                  * action for it later.
                  */
                 data_set->stop_needed = g_list_prepend(data_set->stop_needed,
                                                        rsc->container);
             } else if (rsc->container) {
                 stop_action(rsc->container, node, FALSE);
             } else if (rsc->role != RSC_ROLE_STOPPED && rsc->role != RSC_ROLE_UNKNOWN) {
                 stop_action(rsc, node, FALSE);
             }
             break;
 
         case action_fail_reset_remote:
             pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop);
             if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
                 tmpnode = NULL;
                 if (rsc->is_remote_node) {
                     tmpnode = pe_find_node(data_set->nodes, rsc->id);
                 }
                 if (tmpnode &&
                     pe__is_remote_node(tmpnode) &&
                     tmpnode->details->remote_was_fenced == 0) {
 
                     /* The remote connection resource failed in a way that
                      * should result in fencing the remote node.
                      */
                     pe_fence_node(data_set, tmpnode,
                                   "remote connection is unrecoverable", FALSE);
                 }
             }
 
             /* require the stop action regardless if fencing is occurring or not. */
             if (rsc->role > RSC_ROLE_STOPPED) {
                 stop_action(rsc, node, FALSE);
             }
 
             /* if reconnect delay is in use, prevent the connection from exiting the
              * "STOPPED" role until the failure is cleared by the delay timeout. */
             if (rsc->remote_reconnect_ms) {
                 rsc->next_role = RSC_ROLE_STOPPED;
             }
             break;
     }
 
     /* ensure a remote-node connection failure forces an unclean remote-node
      * to be fenced. By setting unseen = FALSE, the remote-node failure will
      * result in a fencing operation regardless if we're going to attempt to 
      * reconnect to the remote-node in this transition or not. */
     if (pcmk_is_set(rsc->flags, pe_rsc_failed) && rsc->is_remote_node) {
         tmpnode = pe_find_node(data_set->nodes, rsc->id);
         if (tmpnode && tmpnode->details->unclean) {
             tmpnode->details->unseen = FALSE;
         }
     }
 
     if (rsc->role != RSC_ROLE_STOPPED && rsc->role != RSC_ROLE_UNKNOWN) {
         if (pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
             if (pcmk_is_set(rsc->flags, pe_rsc_managed)) {
                 pcmk__config_warn("Detected active orphan %s running on %s",
                                   rsc->id, node->details->uname);
             } else {
                 pcmk__config_warn("Resource '%s' must be stopped manually on "
                                   "%s because cluster is configured not to "
                                   "stop active orphans",
                                   rsc->id, node->details->uname);
             }
         }
 
         native_add_running(rsc, node, data_set);
         switch (on_fail) {
             case action_fail_ignore:
                 break;
             case action_fail_demote:
             case action_fail_block:
                 pe__set_resource_flags(rsc, pe_rsc_failed);
                 break;
             default:
                 pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop);
                 break;
         }
 
     } else if (rsc->clone_name && strchr(rsc->clone_name, ':') != NULL) {
         /* Only do this for older status sections that included instance numbers
          * Otherwise stopped instances will appear as orphans
          */
         pe_rsc_trace(rsc, "Resetting clone_name %s for %s (stopped)", rsc->clone_name, rsc->id);
         free(rsc->clone_name);
         rsc->clone_name = NULL;
 
     } else {
         GList *possible_matches = pe__resource_actions(rsc, node, RSC_STOP,
                                                        FALSE);
         GListPtr gIter = possible_matches;
 
         for (; gIter != NULL; gIter = gIter->next) {
             pe_action_t *stop = (pe_action_t *) gIter->data;
 
             pe__set_action_flags(stop, pe_action_optional);
         }
 
         g_list_free(possible_matches);
     }
 }
 
 /* create active recurring operations as optional */
 static void
 process_recurring(pe_node_t * node, pe_resource_t * rsc,
                   int start_index, int stop_index,
                   GListPtr sorted_op_list, pe_working_set_t * data_set)
 {
     int counter = -1;
     const char *task = NULL;
     const char *status = NULL;
     GListPtr gIter = sorted_op_list;
 
     CRM_ASSERT(rsc);
     pe_rsc_trace(rsc, "%s: Start index %d, stop index = %d", rsc->id, start_index, stop_index);
 
     for (; gIter != NULL; gIter = gIter->next) {
         xmlNode *rsc_op = (xmlNode *) gIter->data;
 
         guint interval_ms = 0;
         char *key = NULL;
         const char *id = ID(rsc_op);
 
         counter++;
 
         if (node->details->online == FALSE) {
             pe_rsc_trace(rsc, "Skipping %s/%s: node is offline", rsc->id, node->details->uname);
             break;
 
             /* Need to check if there's a monitor for role="Stopped" */
         } else if (start_index < stop_index && counter <= stop_index) {
             pe_rsc_trace(rsc, "Skipping %s/%s: resource is not active", id, node->details->uname);
             continue;
 
         } else if (counter < start_index) {
             pe_rsc_trace(rsc, "Skipping %s/%s: old %d", id, node->details->uname, counter);
             continue;
         }
 
         crm_element_value_ms(rsc_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms);
         if (interval_ms == 0) {
             pe_rsc_trace(rsc, "Skipping %s/%s: non-recurring", id, node->details->uname);
             continue;
         }
 
         status = crm_element_value(rsc_op, XML_LRM_ATTR_OPSTATUS);
         if (pcmk__str_eq(status, "-1", pcmk__str_casei)) {
             pe_rsc_trace(rsc, "Skipping %s/%s: status", id, node->details->uname);
             continue;
         }
         task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK);
         /* create the action */
         key = pcmk__op_key(rsc->id, task, interval_ms);
         pe_rsc_trace(rsc, "Creating %s/%s", key, node->details->uname);
         custom_action(rsc, key, task, node, TRUE, TRUE, data_set);
     }
 }
 
 void
 calculate_active_ops(GListPtr sorted_op_list, int *start_index, int *stop_index)
 {
     int counter = -1;
     int implied_monitor_start = -1;
     int implied_clone_start = -1;
     const char *task = NULL;
     const char *status = NULL;
     GListPtr gIter = sorted_op_list;
 
     *stop_index = -1;
     *start_index = -1;
 
     for (; gIter != NULL; gIter = gIter->next) {
         xmlNode *rsc_op = (xmlNode *) gIter->data;
 
         counter++;
 
         task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK);
         status = crm_element_value(rsc_op, XML_LRM_ATTR_OPSTATUS);
 
         if (pcmk__str_eq(task, CRMD_ACTION_STOP, pcmk__str_casei)
             && pcmk__str_eq(status, "0", pcmk__str_casei)) {
             *stop_index = counter;
 
         } else if (pcmk__strcase_any_of(task, CRMD_ACTION_START, CRMD_ACTION_MIGRATED, NULL)) {
             *start_index = counter;
 
         } else if ((implied_monitor_start <= *stop_index) && pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_casei)) {
             const char *rc = crm_element_value(rsc_op, XML_LRM_ATTR_RC);
 
             if (pcmk__strcase_any_of(rc, "0", "8", NULL)) {
                 implied_monitor_start = counter;
             }
         } else if (pcmk__strcase_any_of(task, CRMD_ACTION_PROMOTE, CRMD_ACTION_DEMOTE, NULL)) {
             implied_clone_start = counter;
         }
     }
 
     if (*start_index == -1) {
         if (implied_clone_start != -1) {
             *start_index = implied_clone_start;
         } else if (implied_monitor_start != -1) {
             *start_index = implied_monitor_start;
         }
     }
 }
 
 // If resource history entry has shutdown lock, remember lock node and time
 static void
 unpack_shutdown_lock(xmlNode *rsc_entry, pe_resource_t *rsc, pe_node_t *node,
                      pe_working_set_t *data_set)
 {
     time_t lock_time = 0;   // When lock started (i.e. node shutdown time)
 
     if ((crm_element_value_epoch(rsc_entry, XML_CONFIG_ATTR_SHUTDOWN_LOCK,
                                  &lock_time) == pcmk_ok) && (lock_time != 0)) {
 
         if ((data_set->shutdown_lock > 0)
             && (get_effective_time(data_set)
                 > (lock_time + data_set->shutdown_lock))) {
             pe_rsc_info(rsc, "Shutdown lock for %s on %s expired",
                         rsc->id, node->details->uname);
             pe__clear_resource_history(rsc, node, data_set);
         } else {
             rsc->lock_node = node;
             rsc->lock_time = lock_time;
         }
     }
 }
 
 static pe_resource_t *
 unpack_lrm_rsc_state(pe_node_t * node, xmlNode * rsc_entry, pe_working_set_t * data_set)
 {
     GListPtr gIter = NULL;
     int stop_index = -1;
     int start_index = -1;
     enum rsc_role_e req_role = RSC_ROLE_UNKNOWN;
 
     const char *task = NULL;
     const char *rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID);
 
     pe_resource_t *rsc = NULL;
     GListPtr op_list = NULL;
     GListPtr sorted_op_list = NULL;
 
     xmlNode *migrate_op = NULL;
     xmlNode *rsc_op = NULL;
     xmlNode *last_failure = NULL;
 
     enum action_fail_response on_fail = action_fail_ignore;
     enum rsc_role_e saved_role = RSC_ROLE_UNKNOWN;
 
     crm_trace("[%s] Processing %s on %s",
               crm_element_name(rsc_entry), rsc_id, node->details->uname);
 
     /* extract operations */
     op_list = NULL;
     sorted_op_list = NULL;
 
     for (rsc_op = pcmk__xe_first_child(rsc_entry); rsc_op != NULL;
          rsc_op = pcmk__xe_next(rsc_op)) {
 
         if (pcmk__str_eq((const char *)rsc_op->name, XML_LRM_TAG_RSC_OP,
                          pcmk__str_none)) {
             op_list = g_list_prepend(op_list, rsc_op);
         }
     }
 
     if (!pcmk_is_set(data_set->flags, pe_flag_shutdown_lock)) {
         if (op_list == NULL) {
             // If there are no operations, there is nothing to do
             return NULL;
         }
     }
 
     /* find the resource */
     rsc = unpack_find_resource(data_set, node, rsc_id, rsc_entry);
     if (rsc == NULL) {
         if (op_list == NULL) {
             // If there are no operations, there is nothing to do
             return NULL;
         } else {
             rsc = process_orphan_resource(rsc_entry, node, data_set);
         }
     }
     CRM_ASSERT(rsc != NULL);
 
     // Check whether the resource is "shutdown-locked" to this node
     if (pcmk_is_set(data_set->flags, pe_flag_shutdown_lock)) {
         unpack_shutdown_lock(rsc_entry, rsc, node, data_set);
     }
 
     /* process operations */
     saved_role = rsc->role;
     rsc->role = RSC_ROLE_UNKNOWN;
     sorted_op_list = g_list_sort(op_list, sort_op_by_callid);
 
     for (gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) {
         xmlNode *rsc_op = (xmlNode *) gIter->data;
 
         task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK);
         if (pcmk__str_eq(task, CRMD_ACTION_MIGRATED, pcmk__str_casei)) {
             migrate_op = rsc_op;
         }
 
         unpack_rsc_op(rsc, node, rsc_op, &last_failure, &on_fail, data_set);
     }
 
     /* create active recurring operations as optional */
     calculate_active_ops(sorted_op_list, &start_index, &stop_index);
     process_recurring(node, rsc, start_index, stop_index, sorted_op_list, data_set);
 
     /* no need to free the contents */
     g_list_free(sorted_op_list);
 
     process_rsc_state(rsc, node, on_fail, migrate_op, data_set);
 
     if (get_target_role(rsc, &req_role)) {
         if (rsc->next_role == RSC_ROLE_UNKNOWN || req_role < rsc->next_role) {
             pe_rsc_debug(rsc, "%s: Overwriting calculated next role %s"
                          " with requested next role %s",
                          rsc->id, role2text(rsc->next_role), role2text(req_role));
             rsc->next_role = req_role;
 
         } else if (req_role > rsc->next_role) {
             pe_rsc_info(rsc, "%s: Not overwriting calculated next role %s"
                         " with requested next role %s",
                         rsc->id, role2text(rsc->next_role), role2text(req_role));
         }
     }
 
     if (saved_role > rsc->role) {
         rsc->role = saved_role;
     }
 
     return rsc;
 }
 
 static void
 handle_orphaned_container_fillers(xmlNode * lrm_rsc_list, pe_working_set_t * data_set)
 {
     xmlNode *rsc_entry = NULL;
     for (rsc_entry = pcmk__xe_first_child(lrm_rsc_list); rsc_entry != NULL;
          rsc_entry = pcmk__xe_next(rsc_entry)) {
 
         pe_resource_t *rsc;
         pe_resource_t *container;
         const char *rsc_id;
         const char *container_id;
 
         if (!pcmk__str_eq((const char *)rsc_entry->name, XML_LRM_TAG_RESOURCE, pcmk__str_casei)) {
             continue;
         }
 
         container_id = crm_element_value(rsc_entry, XML_RSC_ATTR_CONTAINER);
         rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID);
         if (container_id == NULL || rsc_id == NULL) {
             continue;
         }
 
         container = pe_find_resource(data_set->resources, container_id);
         if (container == NULL) {
             continue;
         }
 
         rsc = pe_find_resource(data_set->resources, rsc_id);
         if (rsc == NULL ||
             !pcmk_is_set(rsc->flags, pe_rsc_orphan_container_filler) ||
             rsc->container != NULL) {
             continue;
         }
 
         pe_rsc_trace(rsc, "Mapped container of orphaned resource %s to %s",
                      rsc->id, container_id);
         rsc->container = container;
         container->fillers = g_list_append(container->fillers, rsc);
     }
 }
 
 static void
 unpack_lrm_resources(pe_node_t *node, xmlNode *lrm_rsc_list,
                      pe_working_set_t *data_set)
 {
     xmlNode *rsc_entry = NULL;
     gboolean found_orphaned_container_filler = FALSE;
 
     for (rsc_entry = pcmk__xe_first_child(lrm_rsc_list); rsc_entry != NULL;
          rsc_entry = pcmk__xe_next(rsc_entry)) {
 
         if (pcmk__str_eq((const char *)rsc_entry->name, XML_LRM_TAG_RESOURCE, pcmk__str_none)) {
             pe_resource_t *rsc = unpack_lrm_rsc_state(node, rsc_entry, data_set);
             if (!rsc) {
                 continue;
             }
             if (pcmk_is_set(rsc->flags, pe_rsc_orphan_container_filler)) {
                 found_orphaned_container_filler = TRUE;
             }
         }
     }
 
     /* now that all the resource state has been unpacked for this node
      * we have to go back and map any orphaned container fillers to their
      * container resource */
     if (found_orphaned_container_filler) {
         handle_orphaned_container_fillers(lrm_rsc_list, data_set);
     }
 }
 
 static void
 set_active(pe_resource_t * rsc)
 {
     pe_resource_t *top = uber_parent(rsc);
 
     if (top && pcmk_is_set(top->flags, pe_rsc_promotable)) {
         rsc->role = RSC_ROLE_SLAVE;
     } else {
         rsc->role = RSC_ROLE_STARTED;
     }
 }
 
 static void
 set_node_score(gpointer key, gpointer value, gpointer user_data)
 {
     pe_node_t *node = value;
     int *score = user_data;
 
     node->weight = *score;
 }
 
 #define STATUS_PATH_MAX 1024
 static xmlNode *
 find_lrm_op(const char *resource, const char *op, const char *node, const char *source,
             bool success_only, pe_working_set_t *data_set)
 {
     int offset = 0;
     char xpath[STATUS_PATH_MAX];
     xmlNode *xml = NULL;
 
     offset += snprintf(xpath + offset, STATUS_PATH_MAX - offset, "//node_state[@uname='%s']", node);
     offset +=
         snprintf(xpath + offset, STATUS_PATH_MAX - offset, "//" XML_LRM_TAG_RESOURCE "[@id='%s']",
                  resource);
 
     /* Need to check against transition_magic too? */
     if (source && pcmk__str_eq(op, CRMD_ACTION_MIGRATE, pcmk__str_casei)) {
         offset +=
             snprintf(xpath + offset, STATUS_PATH_MAX - offset,
                      "/" XML_LRM_TAG_RSC_OP "[@operation='%s' and @migrate_target='%s']", op,
                      source);
     } else if (source && pcmk__str_eq(op, CRMD_ACTION_MIGRATED, pcmk__str_casei)) {
         offset +=
             snprintf(xpath + offset, STATUS_PATH_MAX - offset,
                      "/" XML_LRM_TAG_RSC_OP "[@operation='%s' and @migrate_source='%s']", op,
                      source);
     } else {
         offset +=
             snprintf(xpath + offset, STATUS_PATH_MAX - offset,
                      "/" XML_LRM_TAG_RSC_OP "[@operation='%s']", op);
     }
 
     CRM_LOG_ASSERT(offset > 0);
     xml = get_xpath_object(xpath, data_set->input, LOG_DEBUG);
 
     if (xml && success_only) {
         int rc = PCMK_OCF_UNKNOWN_ERROR;
         int status = PCMK_LRM_OP_ERROR;
 
         crm_element_value_int(xml, XML_LRM_ATTR_RC, &rc);
         crm_element_value_int(xml, XML_LRM_ATTR_OPSTATUS, &status);
         if ((rc != PCMK_OCF_OK) || (status != PCMK_LRM_OP_DONE)) {
             return NULL;
         }
     }
     return xml;
 }
 
 static int
 pe__call_id(xmlNode *op_xml)
 {
     int id = 0;
 
     if (op_xml) {
         crm_element_value_int(op_xml, XML_LRM_ATTR_CALLID, &id);
     }
     return id;
 }
 
 /*!
  * \brief Check whether a stop happened on the same node after some event
  *
  * \param[in] rsc       Resource being checked
  * \param[in] node      Node being checked
  * \param[in] xml_op    Event that stop is being compared to
  * \param[in] data_set  Cluster working set
  *
  * \return TRUE if stop happened after event, FALSE otherwise
  *
  * \note This is really unnecessary, but kept as a safety mechanism. We
  *       currently don't save more than one successful event in history, so this
  *       only matters when processing really old CIB files that we don't
  *       technically support anymore, or as preparation for logging an extended
  *       history in the future.
  */
 static bool
 stop_happened_after(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
                     pe_working_set_t *data_set)
 {
     xmlNode *stop_op = find_lrm_op(rsc->id, CRMD_ACTION_STOP,
                                    node->details->uname, NULL, TRUE, data_set);
 
     return (stop_op && (pe__call_id(stop_op) > pe__call_id(xml_op)));
 }
 
 static void
 unpack_migrate_to_success(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
                           pe_working_set_t *data_set)
 {
     /* A successful migration sequence is:
      *    migrate_to on source node
      *    migrate_from on target node
      *    stop on source node
      *
      * If a migrate_to is followed by a stop, the entire migration (successful
      * or failed) is complete, and we don't care what happened on the target.
      *
      * If no migrate_from has happened, the migration is considered to be
      * "partial". If the migrate_from failed, make sure the resource gets
      * stopped on both source and target (if up).
      *
      * If the migrate_to and migrate_from both succeeded (which also implies the
      * resource is no longer running on the source), but there is no stop, the
      * migration is considered to be "dangling". Schedule a stop on the source
      * in this case.
      */
     int from_rc = 0;
     int from_status = 0;
     pe_node_t *target_node = NULL;
     pe_node_t *source_node = NULL;
     xmlNode *migrate_from = NULL;
     const char *source = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_SOURCE);
     const char *target = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_TARGET);
 
     // Sanity check
     CRM_CHECK(source && target && !strcmp(source, node->details->uname), return);
 
     if (stop_happened_after(rsc, node, xml_op, data_set)) {
         return;
     }
 
     // Clones are not allowed to migrate, so role can't be master
     rsc->role = RSC_ROLE_STARTED;
 
     target_node = pe_find_node(data_set->nodes, target);
     source_node = pe_find_node(data_set->nodes, source);
 
     // Check whether there was a migrate_from action on the target
     migrate_from = find_lrm_op(rsc->id, CRMD_ACTION_MIGRATED, target,
                                source, FALSE, data_set);
     if (migrate_from) {
         crm_element_value_int(migrate_from, XML_LRM_ATTR_RC, &from_rc);
         crm_element_value_int(migrate_from, XML_LRM_ATTR_OPSTATUS, &from_status);
         pe_rsc_trace(rsc, "%s op on %s exited with status=%d, rc=%d",
                      ID(migrate_from), target, from_status, from_rc);
     }
 
     if (migrate_from && from_rc == PCMK_OCF_OK
         && from_status == PCMK_LRM_OP_DONE) {
         /* The migrate_to and migrate_from both succeeded, so mark the migration
          * as "dangling". This will be used to schedule a stop action on the
          * source without affecting the target.
          */
         pe_rsc_trace(rsc, "Detected dangling migration op: %s on %s", ID(xml_op),
                      source);
         rsc->role = RSC_ROLE_STOPPED;
         rsc->dangling_migrations = g_list_prepend(rsc->dangling_migrations, node);
 
     } else if (migrate_from && (from_status != PCMK_LRM_OP_PENDING)) { // Failed
         if (target_node && target_node->details->online) {
             pe_rsc_trace(rsc, "Marking active on %s %p %d", target, target_node,
                          target_node->details->online);
             native_add_running(rsc, target_node, data_set);
         }
 
     } else { // Pending, or complete but erased
         if (target_node && target_node->details->online) {
             pe_rsc_trace(rsc, "Marking active on %s %p %d", target, target_node,
                          target_node->details->online);
 
             native_add_running(rsc, target_node, data_set);
             if (source_node && source_node->details->online) {
                 /* This is a partial migration: the migrate_to completed
                  * successfully on the source, but the migrate_from has not
                  * completed. Remember the source and target; if the newly
                  * chosen target remains the same when we schedule actions
                  * later, we may continue with the migration.
                  */
                 rsc->partial_migration_target = target_node;
                 rsc->partial_migration_source = source_node;
             }
         } else {
             /* Consider it failed here - forces a restart, prevents migration */
             pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop);
             pe__clear_resource_flags(rsc, pe_rsc_allow_migrate);
         }
     }
 }
 
 // Is there an action_name in node_name's rsc history newer than call_id?
 static bool
 newer_op(pe_resource_t *rsc, const char *action_name, const char *node_name,
          int call_id, pe_working_set_t *data_set)
 {
     xmlNode *action = find_lrm_op(rsc->id, action_name, node_name, NULL, TRUE,
                                   data_set);
 
     return pe__call_id(action) > call_id;
 }
 
 static void
 unpack_migrate_to_failure(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
                           pe_working_set_t *data_set)
 {
     int target_stop_id = 0;
     int target_migrate_from_id = 0;
     xmlNode *target_stop = NULL;
     xmlNode *target_migrate_from = NULL;
     const char *source = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_SOURCE);
     const char *target = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_TARGET);
 
     // Sanity check
     CRM_CHECK(source && target && !strcmp(source, node->details->uname), return);
 
     /* If a migration failed, we have to assume the resource is active. Clones
      * are not allowed to migrate, so role can't be master.
      */
     rsc->role = RSC_ROLE_STARTED;
 
     // Check for stop on the target
     target_stop = find_lrm_op(rsc->id, CRMD_ACTION_STOP, target, NULL,
                               TRUE, data_set);
     target_stop_id = pe__call_id(target_stop);
 
     // Check for migrate_from on the target
     target_migrate_from = find_lrm_op(rsc->id, CRMD_ACTION_MIGRATED, target,
                                       source, TRUE, data_set);
     target_migrate_from_id = pe__call_id(target_migrate_from);
 
     if ((target_stop == NULL) || (target_stop_id < target_migrate_from_id)) {
         /* There was no stop on the target, or a stop that happened before a
          * migrate_from, so assume the resource is still active on the target
          * (if it is up).
          */
         pe_node_t *target_node = pe_find_node(data_set->nodes, target);
 
         pe_rsc_trace(rsc, "stop (%d) + migrate_from (%d)",
                      target_stop_id, target_migrate_from_id);
         if (target_node && target_node->details->online) {
             native_add_running(rsc, target_node, data_set);
         }
 
     } else if (target_migrate_from == NULL) {
         /* We know there was a stop on the target, but there may not have been a
          * migrate_from (the stop could have happened before migrate_from was
          * scheduled or attempted).
          *
          * That means this could be a "dangling" migration. But first, check
          * whether there is a newer successful stop, start, or migrate_from on
          * the source node -- it's possible the failed migration was followed by
          * a successful stop, full restart, or migration in the reverse
          * direction, in which case we don't want to force a stop.
          */
         int source_migrate_to_id = pe__call_id(xml_op);
 
         if (newer_op(rsc, CRMD_ACTION_MIGRATED, source, source_migrate_to_id,
                      data_set)
             || newer_op(rsc, CRMD_ACTION_START, source, source_migrate_to_id,
                      data_set)
             || newer_op(rsc, CRMD_ACTION_STOP, source, source_migrate_to_id,
                      data_set)) {
             return;
         }
 
         // Mark node as having dangling migration so we can force a stop later
         rsc->dangling_migrations = g_list_prepend(rsc->dangling_migrations, node);
     }
 }
 
 static void
 unpack_migrate_from_failure(pe_resource_t *rsc, pe_node_t *node,
                             xmlNode *xml_op, pe_working_set_t *data_set)
 {
     xmlNode *source_stop = NULL;
     xmlNode *source_migrate_to = NULL;
     const char *source = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_SOURCE);
     const char *target = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_TARGET);
 
     // Sanity check
     CRM_CHECK(source && target && !strcmp(target, node->details->uname), return);
 
     /* If a migration failed, we have to assume the resource is active. Clones
      * are not allowed to migrate, so role can't be master.
      */
     rsc->role = RSC_ROLE_STARTED;
 
     // Check for a stop on the source
     source_stop = find_lrm_op(rsc->id, CRMD_ACTION_STOP, source, NULL,
                               TRUE, data_set);
 
     // Check for a migrate_to on the source
     source_migrate_to = find_lrm_op(rsc->id, CRMD_ACTION_MIGRATE,
                                     source, target, TRUE, data_set);
 
     if ((source_stop == NULL)
         || (pe__call_id(source_stop) < pe__call_id(source_migrate_to))) {
         /* There was no stop on the source, or a stop that happened before
          * migrate_to, so assume the resource is still active on the source (if
          * it is up).
          */
         pe_node_t *source_node = pe_find_node(data_set->nodes, source);
 
         if (source_node && source_node->details->online) {
             native_add_running(rsc, source_node, data_set);
         }
     }
 }
 
 static void
 record_failed_op(xmlNode *op, const pe_node_t *node,
                  const pe_resource_t *rsc, pe_working_set_t *data_set)
 {
     xmlNode *xIter = NULL;
     const char *op_key = crm_element_value(op, XML_LRM_ATTR_TASK_KEY);
 
     if (node->details->online == FALSE) {
         return;
     }
 
     for (xIter = data_set->failed->children; xIter; xIter = xIter->next) {
         const char *key = crm_element_value(xIter, XML_LRM_ATTR_TASK_KEY);
         const char *uname = crm_element_value(xIter, XML_ATTR_UNAME);
 
         if(pcmk__str_eq(op_key, key, pcmk__str_casei) && pcmk__str_eq(uname, node->details->uname, pcmk__str_casei)) {
             crm_trace("Skipping duplicate entry %s on %s", op_key, node->details->uname);
             return;
         }
     }
 
     crm_trace("Adding entry %s on %s", op_key, node->details->uname);
     crm_xml_add(op, XML_ATTR_UNAME, node->details->uname);
     crm_xml_add(op, XML_LRM_ATTR_RSCID, rsc->id);
     add_node_copy(data_set->failed, op);
 }
 
 static const char *get_op_key(xmlNode *xml_op)
 {
     const char *key = crm_element_value(xml_op, XML_LRM_ATTR_TASK_KEY);
     if(key == NULL) {
         key = ID(xml_op);
     }
     return key;
 }
 
 static const char *
 last_change_str(xmlNode *xml_op)
 {
     time_t when;
     const char *when_s = NULL;
 
     if (crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE,
                                 &when) == pcmk_ok) {
         when_s = pcmk__epoch2str(&when);
         if (when_s) {
             // Skip day of week to make message shorter
             when_s = strchr(when_s, ' ');
             if (when_s) {
                 ++when_s;
             }
         }
     }
     return ((when_s && *when_s)? when_s : "unknown time");
 }
 
 /*!
  * \internal
  * \brief Compare two on-fail values
  *
  * \param[in] first   One on-fail value to compare
  * \param[in] second  The other on-fail value to compare
  *
  * \return A negative number if second is more severe than first, zero if they
  *         are equal, or a positive number if first is more severe than second.
  * \note This is only needed until the action_fail_response values can be
  *       renumbered at the next API compatibility break.
  */
 static int
 cmp_on_fail(enum action_fail_response first, enum action_fail_response second)
 {
     switch (first) {
         case action_fail_demote:
             switch (second) {
                 case action_fail_ignore:
                     return 1;
                 case action_fail_demote:
                     return 0;
                 default:
                     return -1;
             }
             break;
 
         case action_fail_reset_remote:
             switch (second) {
                 case action_fail_ignore:
                 case action_fail_demote:
                 case action_fail_recover:
                     return 1;
                 case action_fail_reset_remote:
                     return 0;
                 default:
                     return -1;
             }
             break;
 
         case action_fail_restart_container:
             switch (second) {
                 case action_fail_ignore:
                 case action_fail_demote:
                 case action_fail_recover:
                 case action_fail_reset_remote:
                     return 1;
                 case action_fail_restart_container:
                     return 0;
                 default:
                     return -1;
             }
             break;
 
         default:
             break;
     }
     switch (second) {
         case action_fail_demote:
             return (first == action_fail_ignore)? -1 : 1;
 
         case action_fail_reset_remote:
             switch (first) {
                 case action_fail_ignore:
                 case action_fail_demote:
                 case action_fail_recover:
                     return -1;
                 default:
                     return 1;
             }
             break;
 
         case action_fail_restart_container:
             switch (first) {
                 case action_fail_ignore:
                 case action_fail_demote:
                 case action_fail_recover:
                 case action_fail_reset_remote:
                     return -1;
                 default:
                     return 1;
             }
             break;
 
         default:
             break;
     }
     return first - second;
 }
 
 static void
 unpack_rsc_op_failure(pe_resource_t * rsc, pe_node_t * node, int rc, xmlNode * xml_op, xmlNode ** last_failure,
                       enum action_fail_response * on_fail, pe_working_set_t * data_set)
 {
     guint interval_ms = 0;
     bool is_probe = false;
     pe_action_t *action = NULL;
 
     const char *key = get_op_key(xml_op);
     const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
     const char *exit_reason = crm_element_value(xml_op,
                                                 XML_LRM_ATTR_EXIT_REASON);
 
     CRM_ASSERT(rsc);
     CRM_CHECK(task != NULL, return);
 
     *last_failure = xml_op;
 
     crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms);
     if ((interval_ms == 0) && !strcmp(task, CRMD_ACTION_STATUS)) {
         is_probe = true;
     }
 
     if (exit_reason == NULL) {
         exit_reason = "";
     }
 
     if (!pcmk_is_set(data_set->flags, pe_flag_symmetric_cluster)
         && (rc == PCMK_OCF_NOT_INSTALLED)) {
         crm_trace("Unexpected result (%s%s%s) was recorded for "
                   "%s of %s on %s at %s " CRM_XS " rc=%d id=%s",
                   services_ocf_exitcode_str(rc),
                   (*exit_reason? ": " : ""), exit_reason,
                   (is_probe? "probe" : task), rsc->id, node->details->uname,
                   last_change_str(xml_op), rc, ID(xml_op));
     } else {
         crm_warn("Unexpected result (%s%s%s) was recorded for "
                   "%s of %s on %s at %s " CRM_XS " rc=%d id=%s",
                  services_ocf_exitcode_str(rc),
                  (*exit_reason? ": " : ""), exit_reason,
                  (is_probe? "probe" : task), rsc->id, node->details->uname,
                  last_change_str(xml_op), rc, ID(xml_op));
 
         if (is_probe && (rc != PCMK_OCF_OK)
             && (rc != PCMK_OCF_NOT_RUNNING)
             && (rc != PCMK_OCF_RUNNING_MASTER)) {
 
             /* A failed (not just unexpected) probe result could mean the user
              * didn't know resources will be probed even where they can't run.
              */
             crm_notice("If it is not possible for %s to run on %s, see "
                        "the resource-discovery option for location constraints",
                        rsc->id, node->details->uname);
         }
 
         record_failed_op(xml_op, node, rsc, data_set);
     }
 
     action = custom_action(rsc, strdup(key), task, NULL, TRUE, FALSE, data_set);
     if (cmp_on_fail(*on_fail, action->on_fail) < 0) {
         pe_rsc_trace(rsc, "on-fail %s -> %s for %s (%s)", fail2text(*on_fail),
                      fail2text(action->on_fail), action->uuid, key);
         *on_fail = action->on_fail;
     }
 
     if (!strcmp(task, CRMD_ACTION_STOP)) {
         resource_location(rsc, node, -INFINITY, "__stop_fail__", data_set);
 
     } else if (!strcmp(task, CRMD_ACTION_MIGRATE)) {
         unpack_migrate_to_failure(rsc, node, xml_op, data_set);
 
     } else if (!strcmp(task, CRMD_ACTION_MIGRATED)) {
         unpack_migrate_from_failure(rsc, node, xml_op, data_set);
 
     } else if (!strcmp(task, CRMD_ACTION_PROMOTE)) {
         rsc->role = RSC_ROLE_MASTER;
 
     } else if (!strcmp(task, CRMD_ACTION_DEMOTE)) {
         if (action->on_fail == action_fail_block) {
             rsc->role = RSC_ROLE_MASTER;
             rsc->next_role = RSC_ROLE_STOPPED;
 
         } else if(rc == PCMK_OCF_NOT_RUNNING) {
             rsc->role = RSC_ROLE_STOPPED;
 
         } else {
             /* Staying in master role would put the scheduler and controller
              * into a loop. Setting slave role is not dangerous because the
              * resource will be stopped as part of recovery, and any master
              * promotion will be ordered after that stop.
              */
             rsc->role = RSC_ROLE_SLAVE;
         }
     }
 
     if(is_probe && rc == PCMK_OCF_NOT_INSTALLED) {
         /* leave stopped */
         pe_rsc_trace(rsc, "Leaving %s stopped", rsc->id);
         rsc->role = RSC_ROLE_STOPPED;
 
     } else if (rsc->role < RSC_ROLE_STARTED) {
         pe_rsc_trace(rsc, "Setting %s active", rsc->id);
         set_active(rsc);
     }
 
     pe_rsc_trace(rsc, "Resource %s: role=%s, unclean=%s, on_fail=%s, fail_role=%s",
                  rsc->id, role2text(rsc->role),
                  pcmk__btoa(node->details->unclean),
                  fail2text(action->on_fail), role2text(action->fail_role));
 
     if (action->fail_role != RSC_ROLE_STARTED && rsc->next_role < action->fail_role) {
         rsc->next_role = action->fail_role;
     }
 
     if (action->fail_role == RSC_ROLE_STOPPED) {
         int score = -INFINITY;
 
         pe_resource_t *fail_rsc = rsc;
 
         if (fail_rsc->parent) {
             pe_resource_t *parent = uber_parent(fail_rsc);
 
             if (pe_rsc_is_clone(parent)
                 && !pcmk_is_set(parent->flags, pe_rsc_unique)) {
                 /* For clone resources, if a child fails on an operation
                  * with on-fail = stop, all the resources fail.  Do this by preventing
                  * the parent from coming up again. */
                 fail_rsc = parent;
             }
         }
         crm_notice("%s will not be started under current conditions",
                    fail_rsc->id);
         /* make sure it doesn't come up again */
         if (fail_rsc->allowed_nodes != NULL) {
             g_hash_table_destroy(fail_rsc->allowed_nodes);
         }
         fail_rsc->allowed_nodes = pe__node_list2table(data_set->nodes);
         g_hash_table_foreach(fail_rsc->allowed_nodes, set_node_score, &score);
     }
 
     pe_free_action(action);
 }
 
 /*!
  * \internal
  * \brief Remap operation status based on action result
  *
  * Given an action result, determine an appropriate operation status for the
  * purposes of responding to the action (the status provided by the executor is
  * not directly usable since the executor does not know what was expected).
  *
  * \param[in,out] rsc        Resource that operation history entry is for
  * \param[in]     rc         Actual return code of operation
  * \param[in]     target_rc  Expected return code of operation
  * \param[in]     node       Node where operation was executed
  * \param[in]     xml_op     Operation history entry XML from CIB status
  * \param[in,out] on_fail    What should be done about the result
  * \param[in]     data_set   Current cluster working set
  *
  * \return Operation status based on return code and action info
  * \note This may update the resource's current and next role.
  */
 static int
 determine_op_status(
     pe_resource_t *rsc, int rc, int target_rc, pe_node_t * node, xmlNode * xml_op, enum action_fail_response * on_fail, pe_working_set_t * data_set) 
 {
     guint interval_ms = 0;
     bool is_probe = false;
     int result = PCMK_LRM_OP_DONE;
     const char *key = get_op_key(xml_op);
     const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
     const char *exit_reason = crm_element_value(xml_op,
                                                 XML_LRM_ATTR_EXIT_REASON);
 
     CRM_ASSERT(rsc);
     CRM_CHECK(task != NULL, return PCMK_LRM_OP_ERROR);
 
     if (exit_reason == NULL) {
         exit_reason = "";
     }
 
     crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms);
     if ((interval_ms == 0) && !strcmp(task, CRMD_ACTION_STATUS)) {
         is_probe = true;
         task = "probe";
     }
 
     if (target_rc < 0) {
         /* Pre-1.0 Pacemaker versions, and Pacemaker 1.1.6 or earlier with
          * Heartbeat 2.0.7 or earlier as the cluster layer, did not include the
          * target_rc in the transition key, which (along with the similar case
          * of a corrupted transition key in the CIB) will be reported to this
          * function as -1. Pacemaker 2.0+ does not support rolling upgrades from
          * those versions or processing of saved CIB files from those versions,
          * so we do not need to care much about this case.
          */
         result = PCMK_LRM_OP_ERROR;
         crm_warn("Expected result not found for %s on %s (corrupt or obsolete CIB?)",
                  key, node->details->uname);
 
     } else if (target_rc != rc) {
         result = PCMK_LRM_OP_ERROR;
         pe_rsc_debug(rsc, "%s on %s: expected %d (%s), got %d (%s%s%s)",
                      key, node->details->uname,
                      target_rc, services_ocf_exitcode_str(target_rc),
                      rc, services_ocf_exitcode_str(rc),
                      (*exit_reason? ": " : ""), exit_reason);
     }
 
     switch (rc) {
         case PCMK_OCF_OK:
             if (is_probe && (target_rc == PCMK_OCF_NOT_RUNNING)) {
                 result = PCMK_LRM_OP_DONE;
                 pe_rsc_info(rsc, "Probe found %s active on %s at %s",
                             rsc->id, node->details->uname,
                             last_change_str(xml_op));
             }
             break;
 
         case PCMK_OCF_NOT_RUNNING:
             if (is_probe || (target_rc == rc)
                 || !pcmk_is_set(rsc->flags, pe_rsc_managed)) {
 
                 result = PCMK_LRM_OP_DONE;
                 rsc->role = RSC_ROLE_STOPPED;
 
                 /* clear any previous failure actions */
                 *on_fail = action_fail_ignore;
                 rsc->next_role = RSC_ROLE_UNKNOWN;
             }
             break;
 
         case PCMK_OCF_RUNNING_MASTER:
             if (is_probe && (rc != target_rc)) {
                 result = PCMK_LRM_OP_DONE;
                 pe_rsc_info(rsc,
                             "Probe found %s active and promoted on %s at %s",
                             rsc->id, node->details->uname,
                             last_change_str(xml_op));
             }
             rsc->role = RSC_ROLE_MASTER;
             break;
 
         case PCMK_OCF_DEGRADED_MASTER:
         case PCMK_OCF_FAILED_MASTER:
             rsc->role = RSC_ROLE_MASTER;
             result = PCMK_LRM_OP_ERROR;
             break;
 
         case PCMK_OCF_NOT_CONFIGURED:
             result = PCMK_LRM_OP_ERROR_FATAL;
             break;
 
         case PCMK_OCF_UNIMPLEMENT_FEATURE:
             if (interval_ms > 0) {
                 result = PCMK_LRM_OP_NOTSUPPORTED;
                 break;
             }
             // fall through
         case PCMK_OCF_NOT_INSTALLED:
         case PCMK_OCF_INVALID_PARAM:
         case PCMK_OCF_INSUFFICIENT_PRIV:
             if (!pe_can_fence(data_set, node)
                 && !strcmp(task, CRMD_ACTION_STOP)) {
                 /* If a stop fails and we can't fence, there's nothing else we can do */
                 pe_proc_err("No further recovery can be attempted for %s "
                             "because %s on %s failed (%s%s%s) at %s "
                             CRM_XS " rc=%d id=%s", rsc->id, task,
                             node->details->uname, services_ocf_exitcode_str(rc),
                             (*exit_reason? ": " : ""), exit_reason,
                             last_change_str(xml_op), rc, ID(xml_op));
                 pe__clear_resource_flags(rsc, pe_rsc_managed);
                 pe__set_resource_flags(rsc, pe_rsc_block);
             }
             result = PCMK_LRM_OP_ERROR_HARD;
             break;
 
         default:
             if (result == PCMK_LRM_OP_DONE) {
                 crm_info("Treating unknown exit status %d from %s of %s "
                          "on %s at %s as failure",
                          rc, task, rsc->id, node->details->uname,
                          last_change_str(xml_op));
                 result = PCMK_LRM_OP_ERROR;
             }
             break;
     }
     return result;
 }
 
 // return TRUE if start or monitor last failure but parameters changed
 static bool
 should_clear_for_param_change(xmlNode *xml_op, const char *task,
                               pe_resource_t *rsc, pe_node_t *node,
                               pe_working_set_t *data_set)
 {
     if (!strcmp(task, "start") || !strcmp(task, "monitor")) {
 
-        if (pe__bundle_needs_remote_name(rsc)) {
+        if (pe__bundle_needs_remote_name(rsc, data_set)) {
             /* We haven't allocated resources yet, so we can't reliably
              * substitute addr parameters for the REMOTE_CONTAINER_HACK.
              * When that's needed, defer the check until later.
              */
             pe__add_param_check(xml_op, rsc, node, pe_check_last_failure,
                                 data_set);
 
         } else {
             op_digest_cache_t *digest_data = NULL;
 
             digest_data = rsc_action_digest_cmp(rsc, xml_op, node, data_set);
             switch (digest_data->rc) {
                 case RSC_DIGEST_UNKNOWN:
                     crm_trace("Resource %s history entry %s on %s"
                               " has no digest to compare",
                               rsc->id, get_op_key(xml_op), node->details->id);
                     break;
                 case RSC_DIGEST_MATCH:
                     break;
                 default:
                     return TRUE;
             }
         }
     }
     return FALSE;
 }
 
 // Order action after fencing of remote node, given connection rsc
 static void
 order_after_remote_fencing(pe_action_t *action, pe_resource_t *remote_conn,
                            pe_working_set_t *data_set)
 {
     pe_node_t *remote_node = pe_find_node(data_set->nodes, remote_conn->id);
 
     if (remote_node) {
         pe_action_t *fence = pe_fence_op(remote_node, NULL, TRUE, NULL,
                                          FALSE, data_set);
 
         order_actions(fence, action, pe_order_implies_then);
     }
 }
 
 static bool
 should_ignore_failure_timeout(pe_resource_t *rsc, xmlNode *xml_op,
                               const char *task, guint interval_ms,
                               bool is_last_failure, pe_working_set_t *data_set)
 {
     /* Clearing failures of recurring monitors has special concerns. The
      * executor reports only changes in the monitor result, so if the
      * monitor is still active and still getting the same failure result,
      * that will go undetected after the failure is cleared.
      *
      * Also, the operation history will have the time when the recurring
      * monitor result changed to the given code, not the time when the
      * result last happened.
      *
      * @TODO We probably should clear such failures only when the failure
      * timeout has passed since the last occurrence of the failed result.
      * However we don't record that information. We could maybe approximate
      * that by clearing only if there is a more recent successful monitor or
      * stop result, but we don't even have that information at this point
      * since we are still unpacking the resource's operation history.
      *
      * This is especially important for remote connection resources with a
      * reconnect interval, so in that case, we skip clearing failures
      * if the remote node hasn't been fenced.
      */
     if (rsc->remote_reconnect_ms
         && pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)
         && (interval_ms != 0) && pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_casei)) {
 
         pe_node_t *remote_node = pe_find_node(data_set->nodes, rsc->id);
 
         if (remote_node && !remote_node->details->remote_was_fenced) {
             if (is_last_failure) {
                 crm_info("Waiting to clear monitor failure for remote node %s"
                          " until fencing has occurred", rsc->id);
             }
             return TRUE;
         }
     }
     return FALSE;
 }
 
 /*!
  * \internal
  * \brief Check operation age and schedule failure clearing when appropriate
  *
  * This function has two distinct purposes. The first is to check whether an
  * operation history entry is expired (i.e. the resource has a failure timeout,
  * the entry is older than the timeout, and the resource either has no fail
  * count or its fail count is entirely older than the timeout). The second is to
  * schedule fail count clearing when appropriate (i.e. the operation is expired
  * and either the resource has an expired fail count or the operation is a
  * last_failure for a remote connection resource with a reconnect interval,
  * or the operation is a last_failure for a start or monitor operation and the
  * resource's parameters have changed since the operation).
  *
  * \param[in] rsc       Resource that operation happened to
  * \param[in] node      Node that operation happened on
  * \param[in] rc        Actual result of operation
  * \param[in] xml_op    Operation history entry XML
  * \param[in] data_set  Current working set
  *
  * \return TRUE if operation history entry is expired, FALSE otherwise
  */
 static bool
 check_operation_expiry(pe_resource_t *rsc, pe_node_t *node, int rc,
                        xmlNode *xml_op, pe_working_set_t *data_set)
 {
     bool expired = FALSE;
     bool is_last_failure = pcmk__ends_with(ID(xml_op), "_last_failure_0");
     time_t last_run = 0;
     guint interval_ms = 0;
     int unexpired_fail_count = 0;
     const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
     const char *clear_reason = NULL;
 
     crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms);
 
     if ((rsc->failure_timeout > 0)
         && (crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE,
                                     &last_run) == 0)) {
 
         // Resource has a failure-timeout, and history entry has a timestamp
 
         time_t now = get_effective_time(data_set);
         time_t last_failure = 0;
 
         // Is this particular operation history older than the failure timeout?
         if ((now >= (last_run + rsc->failure_timeout))
             && !should_ignore_failure_timeout(rsc, xml_op, task, interval_ms,
                                               is_last_failure, data_set)) {
             expired = TRUE;
         }
 
         // Does the resource as a whole have an unexpired fail count?
         unexpired_fail_count = pe_get_failcount(node, rsc, &last_failure,
                                                 pe_fc_effective, xml_op,
                                                 data_set);
 
         // Update scheduler recheck time according to *last* failure
         crm_trace("%s@%lld is %sexpired @%lld with unexpired_failures=%d timeout=%ds"
                   " last-failure@%lld",
                   ID(xml_op), (long long) last_run, (expired? "" : "not "),
                   (long long) now, unexpired_fail_count, rsc->failure_timeout,
                   (long long) last_failure);
         last_failure += rsc->failure_timeout + 1;
         if (unexpired_fail_count && (now < last_failure)) {
             pe__update_recheck_time(last_failure, data_set);
         }
     }
 
     if (expired) {
         if (pe_get_failcount(node, rsc, NULL, pe_fc_default, xml_op, data_set)) {
 
             // There is a fail count ignoring timeout
 
             if (unexpired_fail_count == 0) {
                 // There is no fail count considering timeout
                 clear_reason = "it expired";
 
             } else {
                 /* This operation is old, but there is an unexpired fail count.
                  * In a properly functioning cluster, this should only be
                  * possible if this operation is not a failure (otherwise the
                  * fail count should be expired too), so this is really just a
                  * failsafe.
                  */
                 expired = FALSE;
             }
 
         } else if (is_last_failure && rsc->remote_reconnect_ms) {
             /* Clear any expired last failure when reconnect interval is set,
              * even if there is no fail count.
              */
             clear_reason = "reconnect interval is set";
         }
     }
 
     if (!expired && is_last_failure
         && should_clear_for_param_change(xml_op, task, rsc, node, data_set)) {
         clear_reason = "resource parameters have changed";
     }
 
     if (clear_reason != NULL) {
         // Schedule clearing of the fail count
         pe_action_t *clear_op = pe__clear_failcount(rsc, node, clear_reason,
                                                     data_set);
 
         if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)
             && rsc->remote_reconnect_ms) {
             /* If we're clearing a remote connection due to a reconnect
              * interval, we want to wait until any scheduled fencing
              * completes.
              *
              * We could limit this to remote_node->details->unclean, but at
              * this point, that's always true (it won't be reliable until
              * after unpack_node_loop() is done).
              */
             crm_info("Clearing %s failure will wait until any scheduled "
                      "fencing of %s completes", task, rsc->id);
             order_after_remote_fencing(clear_op, rsc, data_set);
         }
     }
 
     if (expired && (interval_ms == 0) && pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_casei)) {
         switch(rc) {
             case PCMK_OCF_OK:
             case PCMK_OCF_NOT_RUNNING:
             case PCMK_OCF_RUNNING_MASTER:
             case PCMK_OCF_DEGRADED:
             case PCMK_OCF_DEGRADED_MASTER:
                 // Don't expire probes that return these values
                 expired = FALSE;
                 break;
         }
     }
 
     return expired;
 }
 
 int pe__target_rc_from_xml(xmlNode *xml_op)
 {
     int target_rc = 0;
     const char *key = crm_element_value(xml_op, XML_ATTR_TRANSITION_KEY);
 
     if (key == NULL) {
         return -1;
     }
     decode_transition_key(key, NULL, NULL, NULL, &target_rc);
     return target_rc;
 }
 
 static enum action_fail_response
 get_action_on_fail(pe_resource_t *rsc, const char *key, const char *task, pe_working_set_t * data_set) 
 {
     enum action_fail_response result = action_fail_recover;
     pe_action_t *action = custom_action(rsc, strdup(key), task, NULL, TRUE, FALSE, data_set);
 
     result = action->on_fail;
     pe_free_action(action);
 
     return result;
 }
 
 static void
 update_resource_state(pe_resource_t * rsc, pe_node_t * node, xmlNode * xml_op, const char * task, int rc,
                       xmlNode * last_failure, enum action_fail_response * on_fail, pe_working_set_t * data_set)
 {
     gboolean clear_past_failure = FALSE;
 
     CRM_ASSERT(rsc);
     CRM_ASSERT(xml_op);
 
     if (rc == PCMK_OCF_NOT_RUNNING) {
         clear_past_failure = TRUE;
 
     } else if (rc == PCMK_OCF_NOT_INSTALLED) {
         rsc->role = RSC_ROLE_STOPPED;
 
     } else if (pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_casei)) {
         if (last_failure) {
             const char *op_key = get_op_key(xml_op);
             const char *last_failure_key = get_op_key(last_failure);
 
             if (pcmk__str_eq(op_key, last_failure_key, pcmk__str_casei)) {
                 clear_past_failure = TRUE;
             }
         }
 
         if (rsc->role < RSC_ROLE_STARTED) {
             set_active(rsc);
         }
 
     } else if (pcmk__str_eq(task, CRMD_ACTION_START, pcmk__str_casei)) {
         rsc->role = RSC_ROLE_STARTED;
         clear_past_failure = TRUE;
 
     } else if (pcmk__str_eq(task, CRMD_ACTION_STOP, pcmk__str_casei)) {
         rsc->role = RSC_ROLE_STOPPED;
         clear_past_failure = TRUE;
 
     } else if (pcmk__str_eq(task, CRMD_ACTION_PROMOTE, pcmk__str_casei)) {
         rsc->role = RSC_ROLE_MASTER;
         clear_past_failure = TRUE;
 
     } else if (pcmk__str_eq(task, CRMD_ACTION_DEMOTE, pcmk__str_casei)) {
 
         if (*on_fail == action_fail_demote) {
             // Demote clears an error only if on-fail=demote
             clear_past_failure = TRUE;
         }
         rsc->role = RSC_ROLE_SLAVE;
 
     } else if (pcmk__str_eq(task, CRMD_ACTION_MIGRATED, pcmk__str_casei)) {
         rsc->role = RSC_ROLE_STARTED;
         clear_past_failure = TRUE;
 
     } else if (pcmk__str_eq(task, CRMD_ACTION_MIGRATE, pcmk__str_casei)) {
         unpack_migrate_to_success(rsc, node, xml_op, data_set);
 
     } else if (rsc->role < RSC_ROLE_STARTED) {
         pe_rsc_trace(rsc, "%s active on %s", rsc->id, node->details->uname);
         set_active(rsc);
     }
 
     /* clear any previous failure actions */
     if (clear_past_failure) {
         switch (*on_fail) {
             case action_fail_stop:
             case action_fail_fence:
             case action_fail_migrate:
             case action_fail_standby:
                 pe_rsc_trace(rsc, "%s.%s is not cleared by a completed stop",
                              rsc->id, fail2text(*on_fail));
                 break;
 
             case action_fail_block:
             case action_fail_ignore:
             case action_fail_demote:
             case action_fail_recover:
             case action_fail_restart_container:
                 *on_fail = action_fail_ignore;
                 rsc->next_role = RSC_ROLE_UNKNOWN;
                 break;
             case action_fail_reset_remote:
                 if (rsc->remote_reconnect_ms == 0) {
                     /* With no reconnect interval, the connection is allowed to
                      * start again after the remote node is fenced and
                      * completely stopped. (With a reconnect interval, we wait
                      * for the failure to be cleared entirely before attempting
                      * to reconnect.)
                      */
                     *on_fail = action_fail_ignore;
                     rsc->next_role = RSC_ROLE_UNKNOWN;
                 }
                 break;
         }
     }
 }
 
 /*!
  * \internal
  * \brief Remap informational monitor results to usual values
  *
  * Certain OCF result codes are for providing extended information to the
  * user about services that aren't yet failed but not entirely healthy either.
  * These must be treated as the "normal" result by pacemaker.
  *
  * \param[in] rc        Actual result of a monitor action
  * \param[in] xml_op    Operation history XML
  * \param[in] node      Node that operation happened on
  * \param[in] rsc       Resource that operation happened to
  * \param[in] data_set  Cluster working set
  *
  * \return Result code that pacemaker should use
  *
  * \note If the result is remapped, and the node is not shutting down or failed,
  *       the operation will be recorded in the data set's list of failed
  *       operations, to highlight it for the user.
  */
 static int
 remap_monitor_rc(int rc, xmlNode *xml_op, const pe_node_t *node,
                  const pe_resource_t *rsc, pe_working_set_t *data_set)
 {
     int remapped_rc = pcmk__effective_rc(rc);
 
     if (rc != remapped_rc) {
         crm_trace("Remapping monitor result %d to %d", rc, remapped_rc);
         if (!node->details->shutdown || node->details->online) {
             record_failed_op(xml_op, node, rsc, data_set);
         }
     }
     return remapped_rc;
 }
 
 static void
 unpack_rsc_op(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
               xmlNode **last_failure, enum action_fail_response *on_fail,
               pe_working_set_t *data_set)
 {
     int rc = 0;
     int task_id = 0;
     int target_rc = 0;
     int status = PCMK_LRM_OP_UNKNOWN;
     guint interval_ms = 0;
     const char *task = NULL;
     const char *task_key = NULL;
     const char *exit_reason = NULL;
     bool expired = FALSE;
     pe_resource_t *parent = rsc;
     enum action_fail_response failure_strategy = action_fail_recover;
 
     CRM_CHECK(rsc && node && xml_op, return);
 
     target_rc = pe__target_rc_from_xml(xml_op);
     task_key = get_op_key(xml_op);
     task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
     exit_reason = crm_element_value(xml_op, XML_LRM_ATTR_EXIT_REASON);
     if (exit_reason == NULL) {
         exit_reason = "";
     }
 
     crm_element_value_int(xml_op, XML_LRM_ATTR_RC, &rc);
     crm_element_value_int(xml_op, XML_LRM_ATTR_CALLID, &task_id);
     crm_element_value_int(xml_op, XML_LRM_ATTR_OPSTATUS, &status);
     crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms);
 
     CRM_CHECK(task != NULL, return);
     CRM_CHECK(status <= PCMK_LRM_OP_INVALID, return);
     CRM_CHECK(status >= PCMK_LRM_OP_PENDING, return);
 
     if (!strcmp(task, CRMD_ACTION_NOTIFY) ||
         !strcmp(task, CRMD_ACTION_METADATA)) {
         /* safe to ignore these */
         return;
     }
 
     if (!pcmk_is_set(rsc->flags, pe_rsc_unique)) {
         parent = uber_parent(rsc);
     }
 
     pe_rsc_trace(rsc, "Unpacking task %s/%s (call_id=%d, status=%d, rc=%d) on %s (role=%s)",
                  task_key, task, task_id, status, rc, node->details->uname, role2text(rsc->role));
 
     if (node->details->unclean) {
         pe_rsc_trace(rsc, "Node %s (where %s is running) is unclean."
                      " Further action depends on the value of the stop's on-fail attribute",
                      node->details->uname, rsc->id);
     }
 
     /* It should be possible to call remap_monitor_rc() first then call
      * check_operation_expiry() only if rc != target_rc, because there should
      * never be a fail count without at least one unexpected result in the
      * resource history. That would be more efficient by avoiding having to call
      * check_operation_expiry() for expected results.
      *
      * However, we do have such configurations in the scheduler regression
      * tests, even if it shouldn't be possible with the current code. It's
      * probably a good idea anyway, but that would require updating the test
      * inputs to something currently possible.
      */
 
     if ((status != PCMK_LRM_OP_NOT_INSTALLED)
         && check_operation_expiry(rsc, node, rc, xml_op, data_set)) {
         expired = TRUE;
     }
 
     if (!strcmp(task, CRMD_ACTION_STATUS)) {
         rc = remap_monitor_rc(rc, xml_op, node, rsc, data_set);
     }
 
     if (expired && (rc != target_rc)) {
         const char *magic = crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC);
 
         if (interval_ms == 0) {
             crm_notice("Ignoring expired %s failure on %s "
                        CRM_XS " actual=%d expected=%d magic=%s",
                        task_key, node->details->uname, rc, target_rc, magic);
             goto done;
 
         } else if(node->details->online && node->details->unclean == FALSE) {
             /* Reschedule the recurring monitor. CancelXmlOp() won't work at
              * this stage, so as a hacky workaround, forcibly change the restart
              * digest so check_action_definition() does what we want later.
              *
              * @TODO We should skip this if there is a newer successful monitor.
              *       Also, this causes rescheduling only if the history entry
              *       has an op-digest (which the expire-non-blocked-failure
              *       scheduler regression test doesn't, but that may not be a
              *       realistic scenario in production).
              */
             crm_notice("Rescheduling %s after failure expired on %s "
                        CRM_XS " actual=%d expected=%d magic=%s",
                        task_key, node->details->uname, rc, target_rc, magic);
             crm_xml_add(xml_op, XML_LRM_ATTR_RESTART_DIGEST, "calculated-failure-timeout");
             goto done;
         }
     }
 
     /* If the executor reported an operation status of anything but done or
      * error, consider that final. But for done or error, we know better whether
      * it should be treated as a failure or not, because we know the expected
      * result.
      */
     if(status == PCMK_LRM_OP_DONE || status == PCMK_LRM_OP_ERROR) {
         status = determine_op_status(rsc, rc, target_rc, node, xml_op, on_fail, data_set);
         pe_rsc_trace(rsc, "Remapped %s status to %d", task_key, status);
     }
 
     switch (status) {
         case PCMK_LRM_OP_CANCELLED:
             // Should never happen
             pe_err("Resource history contains cancellation '%s' "
                    "(%s of %s on %s at %s)",
                    ID(xml_op), task, rsc->id, node->details->uname,
                    last_change_str(xml_op));
             break;
 
         case PCMK_LRM_OP_PENDING:
             if (!strcmp(task, CRMD_ACTION_START)) {
                 pe__set_resource_flags(rsc, pe_rsc_start_pending);
                 set_active(rsc);
 
             } else if (!strcmp(task, CRMD_ACTION_PROMOTE)) {
                 rsc->role = RSC_ROLE_MASTER;
 
             } else if (!strcmp(task, CRMD_ACTION_MIGRATE) && node->details->unclean) {
                 /* If a pending migrate_to action is out on a unclean node,
                  * we have to force the stop action on the target. */
                 const char *migrate_target = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_TARGET);
                 pe_node_t *target = pe_find_node(data_set->nodes, migrate_target);
                 if (target) {
                     stop_action(rsc, target, FALSE);
                 }
             }
 
             if (rsc->pending_task == NULL) {
                 if ((interval_ms != 0) || strcmp(task, CRMD_ACTION_STATUS)) {
                     rsc->pending_task = strdup(task);
                     rsc->pending_node = node;
                 } else {
                     /* Pending probes are not printed, even if pending
                      * operations are requested. If someone ever requests that
                      * behavior, enable the below and the corresponding part of
                      * native.c:native_pending_task().
                      */
 #if 0
                     rsc->pending_task = strdup("probe");
                     rsc->pending_node = node;
 #endif
                 }
             }
             break;
 
         case PCMK_LRM_OP_DONE:
             pe_rsc_trace(rsc, "%s of %s on %s completed at %s " CRM_XS " id=%s",
                          task, rsc->id, node->details->uname,
                          last_change_str(xml_op), ID(xml_op));
             update_resource_state(rsc, node, xml_op, task, rc, *last_failure, on_fail, data_set);
             break;
 
         case PCMK_LRM_OP_NOT_INSTALLED:
             failure_strategy = get_action_on_fail(rsc, task_key, task, data_set);
             if (failure_strategy == action_fail_ignore) {
                 crm_warn("Cannot ignore failed %s of %s on %s: "
                          "Resource agent doesn't exist "
                          CRM_XS " status=%d rc=%d id=%s",
                          task, rsc->id, node->details->uname, status, rc,
                          ID(xml_op));
                 /* Also for printing it as "FAILED" by marking it as pe_rsc_failed later */
                 *on_fail = action_fail_migrate;
             }
             resource_location(parent, node, -INFINITY, "hard-error", data_set);
             unpack_rsc_op_failure(rsc, node, rc, xml_op, last_failure, on_fail, data_set);
             break;
 
         case PCMK_LRM_OP_NOT_CONNECTED:
             if (pe__is_guest_or_remote_node(node)
                 && pcmk_is_set(node->details->remote_rsc->flags, pe_rsc_managed)) {
                 /* We should never get into a situation where a managed remote
                  * connection resource is considered OK but a resource action
                  * behind the connection gets a "not connected" status. But as a
                  * fail-safe in case a bug or unusual circumstances do lead to
                  * that, ensure the remote connection is considered failed.
                  */
                 pe__set_resource_flags(node->details->remote_rsc,
                                        pe_rsc_failed|pe_rsc_stop);
             }
 
             // fall through
 
         case PCMK_LRM_OP_ERROR:
         case PCMK_LRM_OP_ERROR_HARD:
         case PCMK_LRM_OP_ERROR_FATAL:
         case PCMK_LRM_OP_TIMEOUT:
         case PCMK_LRM_OP_NOTSUPPORTED:
         case PCMK_LRM_OP_INVALID:
 
             failure_strategy = get_action_on_fail(rsc, task_key, task, data_set);
             if ((failure_strategy == action_fail_ignore)
                 || (failure_strategy == action_fail_restart_container
                     && !strcmp(task, CRMD_ACTION_STOP))) {
 
                 crm_warn("Pretending failed %s (%s%s%s) of %s on %s at %s "
                          "succeeded " CRM_XS " rc=%d id=%s",
                          task, services_ocf_exitcode_str(rc),
                          (*exit_reason? ": " : ""), exit_reason, rsc->id,
                          node->details->uname, last_change_str(xml_op), rc,
                          ID(xml_op));
 
                 update_resource_state(rsc, node, xml_op, task, target_rc, *last_failure, on_fail, data_set);
                 crm_xml_add(xml_op, XML_ATTR_UNAME, node->details->uname);
                 pe__set_resource_flags(rsc, pe_rsc_failure_ignored);
 
                 record_failed_op(xml_op, node, rsc, data_set);
 
                 if ((failure_strategy == action_fail_restart_container)
                     && cmp_on_fail(*on_fail, action_fail_recover) <= 0) {
                     *on_fail = failure_strategy;
                 }
 
             } else {
                 unpack_rsc_op_failure(rsc, node, rc, xml_op, last_failure, on_fail, data_set);
 
                 if(status == PCMK_LRM_OP_ERROR_HARD) {
                     do_crm_log(rc != PCMK_OCF_NOT_INSTALLED?LOG_ERR:LOG_NOTICE,
                                "Preventing %s from restarting on %s because "
                                "of hard failure (%s%s%s)" CRM_XS " rc=%d id=%s",
                                parent->id, node->details->uname,
                                services_ocf_exitcode_str(rc),
                                (*exit_reason? ": " : ""), exit_reason,
                                rc, ID(xml_op));
                     resource_location(parent, node, -INFINITY, "hard-error", data_set);
 
                 } else if(status == PCMK_LRM_OP_ERROR_FATAL) {
                     crm_err("Preventing %s from restarting anywhere because "
                             "of fatal failure (%s%s%s) " CRM_XS " rc=%d id=%s",
                             parent->id, services_ocf_exitcode_str(rc),
                             (*exit_reason? ": " : ""), exit_reason,
                             rc, ID(xml_op));
                     resource_location(parent, NULL, -INFINITY, "fatal-error", data_set);
                 }
             }
             break;
     }
 
   done:
     pe_rsc_trace(rsc, "Resource %s after %s: role=%s, next=%s",
                  rsc->id, task, role2text(rsc->role),
                  role2text(rsc->next_role));
 }
 
 static void
 add_node_attrs(xmlNode *xml_obj, pe_node_t *node, bool overwrite,
                pe_working_set_t *data_set)
 {
     const char *cluster_name = NULL;
 
     pe_rule_eval_data_t rule_data = {
         .node_hash = NULL,
         .role = RSC_ROLE_UNKNOWN,
         .now = data_set->now,
         .match_data = NULL,
         .rsc_data = NULL,
         .op_data = NULL
     };
 
     g_hash_table_insert(node->details->attrs,
                         strdup(CRM_ATTR_UNAME), strdup(node->details->uname));
 
     g_hash_table_insert(node->details->attrs, strdup(CRM_ATTR_ID),
                         strdup(node->details->id));
     if (pcmk__str_eq(node->details->id, data_set->dc_uuid, pcmk__str_casei)) {
         data_set->dc_node = node;
         node->details->is_dc = TRUE;
         g_hash_table_insert(node->details->attrs,
                             strdup(CRM_ATTR_IS_DC), strdup(XML_BOOLEAN_TRUE));
     } else {
         g_hash_table_insert(node->details->attrs,
                             strdup(CRM_ATTR_IS_DC), strdup(XML_BOOLEAN_FALSE));
     }
 
     cluster_name = g_hash_table_lookup(data_set->config_hash, "cluster-name");
     if (cluster_name) {
         g_hash_table_insert(node->details->attrs, strdup(CRM_ATTR_CLUSTER_NAME),
                             strdup(cluster_name));
     }
 
     pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_ATTR_SETS, &rule_data,
                                node->details->attrs, NULL, overwrite, data_set);
 
     if (pe_node_attribute_raw(node, CRM_ATTR_SITE_NAME) == NULL) {
         const char *site_name = pe_node_attribute_raw(node, "site-name");
 
         if (site_name) {
             g_hash_table_insert(node->details->attrs,
                                 strdup(CRM_ATTR_SITE_NAME),
                                 strdup(site_name));
 
         } else if (cluster_name) {
             /* Default to cluster-name if unset */
             g_hash_table_insert(node->details->attrs,
                                 strdup(CRM_ATTR_SITE_NAME),
                                 strdup(cluster_name));
         }
     }
 }
 
 static GListPtr
 extract_operations(const char *node, const char *rsc, xmlNode * rsc_entry, gboolean active_filter)
 {
     int counter = -1;
     int stop_index = -1;
     int start_index = -1;
 
     xmlNode *rsc_op = NULL;
 
     GListPtr gIter = NULL;
     GListPtr op_list = NULL;
     GListPtr sorted_op_list = NULL;
 
     /* extract operations */
     op_list = NULL;
     sorted_op_list = NULL;
 
     for (rsc_op = pcmk__xe_first_child(rsc_entry);
          rsc_op != NULL; rsc_op = pcmk__xe_next(rsc_op)) {
 
         if (pcmk__str_eq((const char *)rsc_op->name, XML_LRM_TAG_RSC_OP,
                          pcmk__str_none)) {
             crm_xml_add(rsc_op, "resource", rsc);
             crm_xml_add(rsc_op, XML_ATTR_UNAME, node);
             op_list = g_list_prepend(op_list, rsc_op);
         }
     }
 
     if (op_list == NULL) {
         /* if there are no operations, there is nothing to do */
         return NULL;
     }
 
     sorted_op_list = g_list_sort(op_list, sort_op_by_callid);
 
     /* create active recurring operations as optional */
     if (active_filter == FALSE) {
         return sorted_op_list;
     }
 
     op_list = NULL;
 
     calculate_active_ops(sorted_op_list, &start_index, &stop_index);
 
     for (gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) {
         xmlNode *rsc_op = (xmlNode *) gIter->data;
 
         counter++;
 
         if (start_index < stop_index) {
             crm_trace("Skipping %s: not active", ID(rsc_entry));
             break;
 
         } else if (counter < start_index) {
             crm_trace("Skipping %s: old", ID(rsc_op));
             continue;
         }
         op_list = g_list_append(op_list, rsc_op);
     }
 
     g_list_free(sorted_op_list);
     return op_list;
 }
 
 GListPtr
 find_operations(const char *rsc, const char *node, gboolean active_filter,
                 pe_working_set_t * data_set)
 {
     GListPtr output = NULL;
     GListPtr intermediate = NULL;
 
     xmlNode *tmp = NULL;
     xmlNode *status = find_xml_node(data_set->input, XML_CIB_TAG_STATUS, TRUE);
 
     pe_node_t *this_node = NULL;
 
     xmlNode *node_state = NULL;
 
     for (node_state = pcmk__xe_first_child(status); node_state != NULL;
          node_state = pcmk__xe_next(node_state)) {
 
         if (pcmk__str_eq((const char *)node_state->name, XML_CIB_TAG_STATE, pcmk__str_none)) {
             const char *uname = crm_element_value(node_state, XML_ATTR_UNAME);
 
             if (node != NULL && !pcmk__str_eq(uname, node, pcmk__str_casei)) {
                 continue;
             }
 
             this_node = pe_find_node(data_set->nodes, uname);
             if(this_node == NULL) {
                 CRM_LOG_ASSERT(this_node != NULL);
                 continue;
 
             } else if (pe__is_guest_or_remote_node(this_node)) {
                 determine_remote_online_status(data_set, this_node);
 
             } else {
                 determine_online_status(node_state, this_node, data_set);
             }
 
             if (this_node->details->online
                 || pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
                 /* offline nodes run no resources...
                  * unless stonith is enabled in which case we need to
                  *   make sure rsc start events happen after the stonith
                  */
                 xmlNode *lrm_rsc = NULL;
 
                 tmp = find_xml_node(node_state, XML_CIB_TAG_LRM, FALSE);
                 tmp = find_xml_node(tmp, XML_LRM_TAG_RESOURCES, FALSE);
 
                 for (lrm_rsc = pcmk__xe_first_child(tmp); lrm_rsc != NULL;
                      lrm_rsc = pcmk__xe_next(lrm_rsc)) {
 
                     if (pcmk__str_eq((const char *)lrm_rsc->name,
                                      XML_LRM_TAG_RESOURCE, pcmk__str_none)) {
 
                         const char *rsc_id = crm_element_value(lrm_rsc, XML_ATTR_ID);
 
                         if (rsc != NULL && !pcmk__str_eq(rsc_id, rsc, pcmk__str_casei)) {
                             continue;
                         }
 
                         intermediate = extract_operations(uname, rsc_id, lrm_rsc, active_filter);
                         output = g_list_concat(output, intermediate);
                     }
                 }
             }
         }
     }
 
     return output;
 }
diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c
index b07afbe45d..831f890571 100644
--- a/lib/pengine/utils.c
+++ b/lib/pengine/utils.c
@@ -1,2511 +1,2511 @@
 /*
  * Copyright 2004-2020 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 #include <crm/crm.h>
 #include <crm/msg_xml.h>
 #include <crm/common/xml.h>
 #include <crm/common/xml_internal.h>
 #include <crm/common/util.h>
 
 #include <glib.h>
 #include <stdbool.h>
 
 #include <crm/pengine/rules.h>
 #include <crm/pengine/internal.h>
 #include "pe_status_private.h"
 
 extern xmlNode *get_object_root(const char *object_type, xmlNode * the_root);
 void print_str_str(gpointer key, gpointer value, gpointer user_data);
 gboolean ghash_free_str_str(gpointer key, gpointer value, gpointer user_data);
 static void unpack_operation(pe_action_t * action, xmlNode * xml_obj, pe_resource_t * container,
                              pe_working_set_t * data_set, guint interval_ms);
 static xmlNode *find_rsc_op_entry_helper(pe_resource_t * rsc, const char *key,
                                          gboolean include_disabled);
 
 #if ENABLE_VERSIONED_ATTRS
 pe_rsc_action_details_t *
 pe_rsc_action_details(pe_action_t *action)
 {
     pe_rsc_action_details_t *details;
 
     CRM_CHECK(action != NULL, return NULL);
 
     if (action->action_details == NULL) {
         action->action_details = calloc(1, sizeof(pe_rsc_action_details_t));
         CRM_CHECK(action->action_details != NULL, return NULL);
     }
 
     details = (pe_rsc_action_details_t *) action->action_details;
     if (details->versioned_parameters == NULL) {
         details->versioned_parameters = create_xml_node(NULL,
                                                         XML_TAG_OP_VER_ATTRS);
     }
     if (details->versioned_meta == NULL) {
         details->versioned_meta = create_xml_node(NULL, XML_TAG_OP_VER_META);
     }
     return details;
 }
 
 static void
 pe_free_rsc_action_details(pe_action_t *action)
 {
     pe_rsc_action_details_t *details;
 
     if ((action == NULL) || (action->action_details == NULL)) {
         return;
     }
 
     details = (pe_rsc_action_details_t *) action->action_details;
 
     if (details->versioned_parameters) {
         free_xml(details->versioned_parameters);
     }
     if (details->versioned_meta) {
         free_xml(details->versioned_meta);
     }
 
     action->action_details = NULL;
 }
 #endif
 
 /*!
  * \internal
  * \brief Check whether we can fence a particular node
  *
  * \param[in] data_set  Working set for cluster
  * \param[in] node      Name of node to check
  *
  * \return true if node can be fenced, false otherwise
  */
 bool
 pe_can_fence(pe_working_set_t *data_set, pe_node_t *node)
 {
     if (pe__is_guest_node(node)) {
         /* Guest nodes are fenced by stopping their container resource. We can
          * do that if the container's host is either online or fenceable.
          */
         pe_resource_t *rsc = node->details->remote_rsc->container;
 
         for (GList *n = rsc->running_on; n != NULL; n = n->next) {
             pe_node_t *container_node = n->data;
 
             if (!container_node->details->online
                 && !pe_can_fence(data_set, container_node)) {
                 return false;
             }
         }
         return true;
 
     } else if (!pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
         return false; /* Turned off */
 
     } else if (!pcmk_is_set(data_set->flags, pe_flag_have_stonith_resource)) {
         return false; /* No devices */
 
     } else if (pcmk_is_set(data_set->flags, pe_flag_have_quorum)) {
         return true;
 
     } else if (data_set->no_quorum_policy == no_quorum_ignore) {
         return true;
 
     } else if(node == NULL) {
         return false;
 
     } else if(node->details->online) {
         crm_notice("We can fence %s without quorum because they're in our membership", node->details->uname);
         return true;
     }
 
     crm_trace("Cannot fence %s", node->details->uname);
     return false;
 }
 
 /*!
  * \internal
  * \brief Copy a node object
  *
  * \param[in] this_node  Node object to copy
  *
  * \return Newly allocated shallow copy of this_node
  * \note This function asserts on errors and is guaranteed to return non-NULL.
  */
 pe_node_t *
 pe__copy_node(const pe_node_t *this_node)
 {
     pe_node_t *new_node = NULL;
 
     CRM_ASSERT(this_node != NULL);
 
     new_node = calloc(1, sizeof(pe_node_t));
     CRM_ASSERT(new_node != NULL);
 
     new_node->rsc_discover_mode = this_node->rsc_discover_mode;
     new_node->weight = this_node->weight;
     new_node->fixed = this_node->fixed;
     new_node->details = this_node->details;
 
     return new_node;
 }
 
 /* any node in list1 or list2 and not in the other gets a score of -INFINITY */
 void
 node_list_exclude(GHashTable * hash, GListPtr list, gboolean merge_scores)
 {
     GHashTable *result = hash;
     pe_node_t *other_node = NULL;
     GListPtr gIter = list;
 
     GHashTableIter iter;
     pe_node_t *node = NULL;
 
     g_hash_table_iter_init(&iter, hash);
     while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
 
         other_node = pe_find_node_id(list, node->details->id);
         if (other_node == NULL) {
             node->weight = -INFINITY;
         } else if (merge_scores) {
             node->weight = pe__add_scores(node->weight, other_node->weight);
         }
     }
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_node_t *node = (pe_node_t *) gIter->data;
 
         other_node = pe_hash_table_lookup(result, node->details->id);
 
         if (other_node == NULL) {
             pe_node_t *new_node = pe__copy_node(node);
 
             new_node->weight = -INFINITY;
             g_hash_table_insert(result, (gpointer) new_node->details->id, new_node);
         }
     }
 }
 
 /*!
  * \internal
  * \brief Create a node hash table from a node list
  *
  * \param[in] list  Node list
  *
  * \return Hash table equivalent of node list
  */
 GHashTable *
 pe__node_list2table(GList *list)
 {
     GHashTable *result = NULL;
 
     result = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, free);
     for (GList *gIter = list; gIter != NULL; gIter = gIter->next) {
         pe_node_t *new_node = pe__copy_node((pe_node_t *) gIter->data);
 
         g_hash_table_insert(result, (gpointer) new_node->details->id, new_node);
     }
     return result;
 }
 
 gint
 sort_node_uname(gconstpointer a, gconstpointer b)
 {
     return pcmk_numeric_strcasecmp(((const pe_node_t *) a)->details->uname,
                                    ((const pe_node_t *) b)->details->uname);
 }
 
 /*!
  * \internal
  * \brief Output node weights to stdout
  *
  * \param[in] rsc       Use allowed nodes for this resource
  * \param[in] comment   Text description to prefix lines with
  * \param[in] nodes     If rsc is not specified, use these nodes
  */
 static void
 pe__output_node_weights(pe_resource_t *rsc, const char *comment,
                         GHashTable *nodes)
 {
     char score[128]; // Stack-allocated since this is called frequently
 
     // Sort the nodes so the output is consistent for regression tests
     GList *list = g_list_sort(g_hash_table_get_values(nodes), sort_node_uname);
 
     for (GList *gIter = list; gIter != NULL; gIter = gIter->next) {
         pe_node_t *node = (pe_node_t *) gIter->data;
 
         score2char_stack(node->weight, score, sizeof(score));
         if (rsc) {
             printf("%s: %s allocation score on %s: %s\n",
                    comment, rsc->id, node->details->uname, score);
         } else {
             printf("%s: %s = %s\n", comment, node->details->uname, score);
         }
     }
     g_list_free(list);
 }
 
 /*!
  * \internal
  * \brief Log node weights at trace level
  *
  * \param[in] file      Caller's filename
  * \param[in] function  Caller's function name
  * \param[in] line      Caller's line number
  * \param[in] rsc       Use allowed nodes for this resource
  * \param[in] comment   Text description to prefix lines with
  * \param[in] nodes     If rsc is not specified, use these nodes
  */
 static void
 pe__log_node_weights(const char *file, const char *function, int line,
                      pe_resource_t *rsc, const char *comment, GHashTable *nodes)
 {
     GHashTableIter iter;
     pe_node_t *node = NULL;
     char score[128]; // Stack-allocated since this is called frequently
 
     // Don't waste time if we're not tracing at this point
     pcmk__log_else(LOG_TRACE, return);
 
     g_hash_table_iter_init(&iter, nodes);
     while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
         score2char_stack(node->weight, score, sizeof(score));
         if (rsc) {
             qb_log_from_external_source(function, file,
                                         "%s: %s allocation score on %s: %s",
                                         LOG_TRACE, line, 0,
                                         comment, rsc->id,
                                         node->details->uname, score);
         } else {
             qb_log_from_external_source(function, file, "%s: %s = %s",
                                         LOG_TRACE, line, 0,
                                         comment, node->details->uname,
                                         score);
         }
     }
 }
 
 /*!
  * \internal
  * \brief Log or output node weights
  *
  * \param[in] file      Caller's filename
  * \param[in] function  Caller's function name
  * \param[in] line      Caller's line number
  * \param[in] to_log    Log if true, otherwise output
  * \param[in] rsc       Use allowed nodes for this resource
  * \param[in] comment   Text description to prefix lines with
  * \param[in] nodes     If rsc is not specified, use these nodes
  */
 void
 pe__show_node_weights_as(const char *file, const char *function, int line,
                          bool to_log, pe_resource_t *rsc, const char *comment,
                          GHashTable *nodes)
 {
     if (rsc != NULL) {
         if (pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
             // Don't show allocation scores for orphans
             return;
         }
         nodes = rsc->allowed_nodes;
     }
     if (nodes == NULL) {
         // Nothing to show
         return;
     }
 
     if (to_log) {
         pe__log_node_weights(file, function, line, rsc, comment, nodes);
     } else {
         pe__output_node_weights(rsc, comment, nodes);
     }
 
     // If this resource has children, repeat recursively for each
     if (rsc && rsc->children) {
         for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
             pe_resource_t *child = (pe_resource_t *) gIter->data;
 
             pe__show_node_weights_as(file, function, line, to_log, child,
                                      comment, nodes);
         }
     }
 }
 
 static void
 append_dump_text(gpointer key, gpointer value, gpointer user_data)
 {
     char **dump_text = user_data;
     char *new_text = crm_strdup_printf("%s %s=%s",
                                        *dump_text, (char *)key, (char *)value);
 
     free(*dump_text);
     *dump_text = new_text;
 }
 
 void
 dump_node_capacity(int level, const char *comment, pe_node_t * node)
 {
     char *dump_text = crm_strdup_printf("%s: %s capacity:",
                                         comment, node->details->uname);
 
     g_hash_table_foreach(node->details->utilization, append_dump_text, &dump_text);
 
     if (level == LOG_STDOUT) {
         fprintf(stdout, "%s\n", dump_text);
     } else {
         crm_trace("%s", dump_text);
     }
 
     free(dump_text);
 }
 
 void
 dump_rsc_utilization(int level, const char *comment, pe_resource_t * rsc, pe_node_t * node)
 {
     char *dump_text = crm_strdup_printf("%s: %s utilization on %s:",
                                         comment, rsc->id, node->details->uname);
 
     g_hash_table_foreach(rsc->utilization, append_dump_text, &dump_text);
     switch (level) {
         case LOG_STDOUT:
             fprintf(stdout, "%s\n", dump_text);
             break;
         case LOG_NEVER:
             break;
         default:
             crm_trace("%s", dump_text);
     }
     free(dump_text);
 }
 
 gint
 sort_rsc_index(gconstpointer a, gconstpointer b)
 {
     const pe_resource_t *resource1 = (const pe_resource_t *)a;
     const pe_resource_t *resource2 = (const pe_resource_t *)b;
 
     if (a == NULL && b == NULL) {
         return 0;
     }
     if (a == NULL) {
         return 1;
     }
     if (b == NULL) {
         return -1;
     }
 
     if (resource1->sort_index > resource2->sort_index) {
         return -1;
     }
 
     if (resource1->sort_index < resource2->sort_index) {
         return 1;
     }
 
     return 0;
 }
 
 gint
 sort_rsc_priority(gconstpointer a, gconstpointer b)
 {
     const pe_resource_t *resource1 = (const pe_resource_t *)a;
     const pe_resource_t *resource2 = (const pe_resource_t *)b;
 
     if (a == NULL && b == NULL) {
         return 0;
     }
     if (a == NULL) {
         return 1;
     }
     if (b == NULL) {
         return -1;
     }
 
     if (resource1->priority > resource2->priority) {
         return -1;
     }
 
     if (resource1->priority < resource2->priority) {
         return 1;
     }
 
     return 0;
 }
 
 static enum pe_quorum_policy
 effective_quorum_policy(pe_resource_t *rsc, pe_working_set_t *data_set)
 {
     enum pe_quorum_policy policy = data_set->no_quorum_policy;
 
     if (pcmk_is_set(data_set->flags, pe_flag_have_quorum)) {
         policy = no_quorum_ignore;
 
     } else if (data_set->no_quorum_policy == no_quorum_demote) {
         switch (rsc->role) {
             case RSC_ROLE_MASTER:
             case RSC_ROLE_SLAVE:
                 if (rsc->next_role > RSC_ROLE_SLAVE) {
                     rsc->next_role = RSC_ROLE_SLAVE;
                 }
                 policy = no_quorum_ignore;
                 break;
             default:
                 policy = no_quorum_stop;
                 break;
         }
     }
     return policy;
 }
 
 pe_action_t *
 custom_action(pe_resource_t * rsc, char *key, const char *task,
               pe_node_t * on_node, gboolean optional, gboolean save_action,
               pe_working_set_t * data_set)
 {
     pe_action_t *action = NULL;
     GListPtr possible_matches = NULL;
 
     CRM_CHECK(key != NULL, return NULL);
     CRM_CHECK(task != NULL, free(key); return NULL);
 
     if (save_action && rsc != NULL) {
         possible_matches = find_actions(rsc->actions, key, on_node);
     } else if(save_action) {
 #if 0
         action = g_hash_table_lookup(data_set->singletons, key);
 #else
         /* More expensive but takes 'node' into account */
         possible_matches = find_actions(data_set->actions, key, on_node);
 #endif
     }
 
     if(data_set->singletons == NULL) {
         data_set->singletons = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, NULL);
     }
 
     if (possible_matches != NULL) {
         if (pcmk__list_of_multiple(possible_matches)) {
             pe_warn("Action %s for %s on %s exists %d times",
                     task, rsc ? rsc->id : "<NULL>",
                     on_node ? on_node->details->uname : "<NULL>", g_list_length(possible_matches));
         }
 
         action = g_list_nth_data(possible_matches, 0);
         pe_rsc_trace(rsc, "Found action %d: %s for %s (%s) on %s",
                      action->id, task, (rsc? rsc->id : "no resource"),
                      action->uuid,
                      (on_node? on_node->details->uname : "no node"));
         g_list_free(possible_matches);
     }
 
     if (action == NULL) {
         if (save_action) {
             pe_rsc_trace(rsc, "Creating action %d (%s): %s for %s (%s) on %s",
                          data_set->action_id,
                          (optional? "optional" : "required"),
                          task, (rsc? rsc->id : "no resource"), key,
                          (on_node? on_node->details->uname : "no node"));
         }
 
         action = calloc(1, sizeof(pe_action_t));
         if (save_action) {
             action->id = data_set->action_id++;
         } else {
             action->id = 0;
         }
         action->rsc = rsc;
         action->task = strdup(task);
         if (on_node) {
             action->node = pe__copy_node(on_node);
         }
         action->uuid = strdup(key);
 
         if (pcmk__str_eq(task, CRM_OP_LRM_DELETE, pcmk__str_casei)) {
             // Resource history deletion for a node can be done on the DC
             pe__set_action_flags(action, pe_action_dc);
         }
 
         pe__set_action_flags(action, pe_action_runnable);
         if (optional) {
             pe__set_action_flags(action, pe_action_optional);
         } else {
             pe__clear_action_flags(action, pe_action_optional);
         }
 
         action->extra = crm_str_table_new();
         action->meta = crm_str_table_new();
 
         if (save_action) {
             data_set->actions = g_list_prepend(data_set->actions, action);
             if(rsc == NULL) {
                 g_hash_table_insert(data_set->singletons, action->uuid, action);
             }
         }
 
         if (rsc != NULL) {
             guint interval_ms = 0;
 
             action->op_entry = find_rsc_op_entry_helper(rsc, key, TRUE);
             parse_op_key(key, NULL, NULL, &interval_ms);
 
             unpack_operation(action, action->op_entry, rsc->container, data_set,
                              interval_ms);
 
             if (save_action) {
                 rsc->actions = g_list_prepend(rsc->actions, action);
             }
         }
     }
 
     if (!optional && pcmk_is_set(action->flags, pe_action_optional)) {
         pe__clear_action_flags(action, pe_action_optional);
     }
 
     if (rsc != NULL) {
         enum action_tasks a_task = text2task(action->task);
         enum pe_quorum_policy quorum_policy = effective_quorum_policy(rsc, data_set);
         int warn_level = LOG_TRACE;
 
         if (save_action) {
             warn_level = LOG_WARNING;
         }
 
         if (!pcmk_is_set(action->flags, pe_action_have_node_attrs)
             && action->node != NULL && action->op_entry != NULL) {
             pe_rule_eval_data_t rule_data = {
                 .node_hash = action->node->details->attrs,
                 .role = RSC_ROLE_UNKNOWN,
                 .now = data_set->now,
                 .match_data = NULL,
                 .rsc_data = NULL,
                 .op_data = NULL
             };
 
             pe__set_action_flags(action, pe_action_have_node_attrs);
             pe__unpack_dataset_nvpairs(action->op_entry, XML_TAG_ATTR_SETS,
                                        &rule_data, action->extra, NULL,
                                        FALSE, data_set);
         }
 
         if (pcmk_is_set(action->flags, pe_action_pseudo)) {
             /* leave untouched */
 
         } else if (action->node == NULL) {
             pe_rsc_trace(rsc, "%s is unrunnable (unallocated)",
                          action->uuid);
             pe__clear_action_flags(action, pe_action_runnable);
 
         } else if (!pcmk_is_set(rsc->flags, pe_rsc_managed)
                    && g_hash_table_lookup(action->meta,
                                           XML_LRM_ATTR_INTERVAL_MS) == NULL) {
             pe_rsc_debug(rsc, "%s on %s is optional (%s is unmanaged)",
                          action->uuid, action->node->details->uname, rsc->id);
             pe__set_action_flags(action, pe_action_optional);
             //pe__clear_action_flags(action, pe_action_runnable);
 
         } else if (!pcmk_is_set(action->flags, pe_action_dc)
                    && !(action->node->details->online)
                    && (!pe__is_guest_node(action->node)
                        || action->node->details->remote_requires_reset)) {
             pe__clear_action_flags(action, pe_action_runnable);
             do_crm_log(warn_level,
                        "%s on %s is unrunnable (node is offline)",
                        action->uuid, action->node->details->uname);
             if (pcmk_is_set(action->rsc->flags, pe_rsc_managed)
                 && save_action && a_task == stop_rsc
                 && action->node->details->unclean == FALSE) {
                 pe_fence_node(data_set, action->node, "resource actions are unrunnable", FALSE);
             }
 
         } else if (!pcmk_is_set(action->flags, pe_action_dc)
                    && action->node->details->pending) {
             pe__clear_action_flags(action, pe_action_runnable);
             do_crm_log(warn_level,
                        "Action %s on %s is unrunnable (node is pending)",
                        action->uuid, action->node->details->uname);
 
         } else if (action->needs == rsc_req_nothing) {
             pe_action_set_reason(action, NULL, TRUE);
             if (pe__is_guest_node(action->node)
                 && !pe_can_fence(data_set, action->node)) {
                 /* An action that requires nothing usually does not require any
                  * fencing in order to be runnable. However, there is an
                  * exception: an action cannot be completed if it is on a guest
                  * node whose host is unclean and cannot be fenced.
                  */
                 pe_rsc_debug(rsc, "%s on %s is unrunnable "
                              "(node's host cannot be fenced)",
                              action->uuid, action->node->details->uname);
                 pe__clear_action_flags(action, pe_action_runnable);
             } else {
                 pe_rsc_trace(rsc, "%s on %s does not require fencing or quorum",
                              action->uuid, action->node->details->uname);
                 pe__set_action_flags(action, pe_action_runnable);
             }
 #if 0
             /*
              * No point checking this
              * - if we don't have quorum we can't stonith anyway
              */
         } else if (action->needs == rsc_req_stonith) {
             crm_trace("Action %s requires only stonith", action->uuid);
             action->runnable = TRUE;
 #endif
         } else if (quorum_policy == no_quorum_stop) {
             pe_rsc_debug(rsc, "%s on %s is unrunnable (no quorum)",
                          action->uuid, action->node->details->uname);
             pe_action_set_flag_reason(__func__, __LINE__, action, NULL,
                                       "no quorum", pe_action_runnable, TRUE);
 
         } else if (quorum_policy == no_quorum_freeze) {
             if (rsc->fns->active(rsc, TRUE) == FALSE || rsc->next_role > rsc->role) {
                 pe_rsc_debug(rsc, "%s on %s is unrunnable (no quorum)",
                              action->uuid, action->node->details->uname);
                 pe_action_set_flag_reason(__func__, __LINE__, action, NULL,
                                           "quorum freeze", pe_action_runnable,
                                           TRUE);
             }
 
         } else {
             //pe_action_set_reason(action, NULL, TRUE);
             pe__set_action_flags(action, pe_action_runnable);
         }
 
         if (save_action) {
             switch (a_task) {
                 case stop_rsc:
                     pe__set_resource_flags(rsc, pe_rsc_stopping);
                     break;
                 case start_rsc:
                     pe__clear_resource_flags(rsc, pe_rsc_starting);
                     if (pcmk_is_set(action->flags, pe_action_runnable)) {
                         pe__set_resource_flags(rsc, pe_rsc_starting);
                     }
                     break;
                 default:
                     break;
             }
         }
     }
 
     free(key);
     return action;
 }
 
 static bool
 valid_stop_on_fail(const char *value)
 {
     return !pcmk__strcase_any_of(value, "standby", "demote", "stop", NULL);
 }
 
 static const char *
 unpack_operation_on_fail(pe_action_t * action)
 {
 
     const char *name = NULL;
     const char *role = NULL;
     const char *on_fail = NULL;
     const char *interval_spec = NULL;
     const char *enabled = NULL;
     const char *value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ON_FAIL);
 
     if (pcmk__str_eq(action->task, CRMD_ACTION_STOP, pcmk__str_casei)
         && !valid_stop_on_fail(value)) {
 
         pcmk__config_err("Resetting '" XML_OP_ATTR_ON_FAIL "' for %s stop "
                          "action to default value because '%s' is not "
                          "allowed for stop", action->rsc->id, value);
         return NULL;
 
     } else if (pcmk__str_eq(action->task, CRMD_ACTION_DEMOTE, pcmk__str_casei) && !value) {
         /* demote on_fail defaults to master monitor value if present */
         xmlNode *operation = NULL;
 
         CRM_CHECK(action->rsc != NULL, return NULL);
 
         for (operation = pcmk__xe_first_child(action->rsc->ops_xml);
              (operation != NULL) && (value == NULL);
              operation = pcmk__xe_next(operation)) {
 
             if (!pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
                 continue;
             }
             name = crm_element_value(operation, "name");
             role = crm_element_value(operation, "role");
             on_fail = crm_element_value(operation, XML_OP_ATTR_ON_FAIL);
             enabled = crm_element_value(operation, "enabled");
             interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
             if (!on_fail) {
                 continue;
             } else if (enabled && !crm_is_true(enabled)) {
                 continue;
             } else if (!pcmk__str_eq(name, "monitor", pcmk__str_casei) || !pcmk__str_eq(role, "Master", pcmk__str_casei)) {
                 continue;
             } else if (crm_parse_interval_spec(interval_spec) == 0) {
                 continue;
             } else if (pcmk__str_eq(on_fail, "demote", pcmk__str_casei)) {
                 continue;
             }
 
             value = on_fail;
         }
     } else if (pcmk__str_eq(action->task, CRM_OP_LRM_DELETE, pcmk__str_casei)) {
         value = "ignore";
 
     } else if (pcmk__str_eq(value, "demote", pcmk__str_casei)) {
         name = crm_element_value(action->op_entry, "name");
         role = crm_element_value(action->op_entry, "role");
         interval_spec = crm_element_value(action->op_entry,
                                           XML_LRM_ATTR_INTERVAL);
 
         if (!pcmk__str_eq(name, CRMD_ACTION_PROMOTE, pcmk__str_casei)
             && (!pcmk__str_eq(name, CRMD_ACTION_STATUS, pcmk__str_casei)
                 || !pcmk__str_eq(role, "Master", pcmk__str_casei)
                 || (crm_parse_interval_spec(interval_spec) == 0))) {
             pcmk__config_err("Resetting '" XML_OP_ATTR_ON_FAIL "' for %s %s "
                              "action to default value because 'demote' is not "
                              "allowed for it", action->rsc->id, name);
             return NULL;
         }
     }
 
     return value;
 }
 
 static xmlNode *
 find_min_interval_mon(pe_resource_t * rsc, gboolean include_disabled)
 {
     guint interval_ms = 0;
     guint min_interval_ms = G_MAXUINT;
     const char *name = NULL;
     const char *value = NULL;
     const char *interval_spec = NULL;
     xmlNode *op = NULL;
     xmlNode *operation = NULL;
 
     for (operation = pcmk__xe_first_child(rsc->ops_xml);
          operation != NULL;
          operation = pcmk__xe_next(operation)) {
 
         if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
             name = crm_element_value(operation, "name");
             interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
             value = crm_element_value(operation, "enabled");
             if (!include_disabled && value && crm_is_true(value) == FALSE) {
                 continue;
             }
 
             if (!pcmk__str_eq(name, RSC_STATUS, pcmk__str_casei)) {
                 continue;
             }
 
             interval_ms = crm_parse_interval_spec(interval_spec);
 
             if (interval_ms && (interval_ms < min_interval_ms)) {
                 min_interval_ms = interval_ms;
                 op = operation;
             }
         }
     }
 
     return op;
 }
 
 static int
 unpack_start_delay(const char *value, GHashTable *meta)
 {
     int start_delay = 0;
 
     if (value != NULL) {
         start_delay = crm_get_msec(value);
 
         if (start_delay < 0) {
             start_delay = 0;
         }
 
         if (meta) {
             g_hash_table_replace(meta, strdup(XML_OP_ATTR_START_DELAY), crm_itoa(start_delay));
         }
     }
 
     return start_delay;
 }
 
 // true if value contains valid, non-NULL interval origin for recurring op
 static bool
 unpack_interval_origin(const char *value, xmlNode *xml_obj, guint interval_ms,
                        crm_time_t *now, long long *start_delay)
 {
     long long result = 0;
     guint interval_sec = interval_ms / 1000;
     crm_time_t *origin = NULL;
 
     // Ignore unspecified values and non-recurring operations
     if ((value == NULL) || (interval_ms == 0) || (now == NULL)) {
         return false;
     }
 
     // Parse interval origin from text
     origin = crm_time_new(value);
     if (origin == NULL) {
         pcmk__config_err("Ignoring '" XML_OP_ATTR_ORIGIN "' for operation "
                          "'%s' because '%s' is not valid",
                          (ID(xml_obj)? ID(xml_obj) : "(missing ID)"), value);
         return false;
     }
 
     // Get seconds since origin (negative if origin is in the future)
     result = crm_time_get_seconds(now) - crm_time_get_seconds(origin);
     crm_time_free(origin);
 
     // Calculate seconds from closest interval to now
     result = result % interval_sec;
 
     // Calculate seconds remaining until next interval
     result = ((result <= 0)? 0 : interval_sec) - result;
     crm_info("Calculated a start delay of %llds for operation '%s'",
              result,
              (ID(xml_obj)? ID(xml_obj) : "(unspecified)"));
 
     if (start_delay != NULL) {
         *start_delay = result * 1000; // milliseconds
     }
     return true;
 }
 
 static int
 unpack_timeout(const char *value)
 {
     int timeout_ms = crm_get_msec(value);
 
     if (timeout_ms < 0) {
         timeout_ms = crm_get_msec(CRM_DEFAULT_OP_TIMEOUT_S);
     }
     return timeout_ms;
 }
 
 int
 pe_get_configured_timeout(pe_resource_t *rsc, const char *action, pe_working_set_t *data_set)
 {
     xmlNode *child = NULL;
     GHashTable *action_meta = NULL;
     const char *timeout_spec = NULL;
     int timeout_ms = 0;
 
     pe_rule_eval_data_t rule_data = {
         .node_hash = NULL,
         .role = RSC_ROLE_UNKNOWN,
         .now = data_set->now,
         .match_data = NULL,
         .rsc_data = NULL,
         .op_data = NULL
     };
 
     for (child = first_named_child(rsc->ops_xml, XML_ATTR_OP);
          child != NULL; child = crm_next_same_xml(child)) {
         if (pcmk__str_eq(action, crm_element_value(child, XML_NVPAIR_ATTR_NAME),
                 pcmk__str_casei)) {
             timeout_spec = crm_element_value(child, XML_ATTR_TIMEOUT);
             break;
         }
     }
 
     if (timeout_spec == NULL && data_set->op_defaults) {
         action_meta = crm_str_table_new();
         pe__unpack_dataset_nvpairs(data_set->op_defaults, XML_TAG_META_SETS,
                                    &rule_data, action_meta, NULL, FALSE, data_set);
         timeout_spec = g_hash_table_lookup(action_meta, XML_ATTR_TIMEOUT);
     }
 
     // @TODO check meta-attributes (including versioned meta-attributes)
     // @TODO maybe use min-interval monitor timeout as default for monitors
 
     timeout_ms = crm_get_msec(timeout_spec);
     if (timeout_ms < 0) {
         timeout_ms = crm_get_msec(CRM_DEFAULT_OP_TIMEOUT_S);
     }
 
     if (action_meta != NULL) {
         g_hash_table_destroy(action_meta);
     }
     return timeout_ms;
 }
 
 #if ENABLE_VERSIONED_ATTRS
 static void
 unpack_versioned_meta(xmlNode *versioned_meta, xmlNode *xml_obj,
                       guint interval_ms, crm_time_t *now)
 {
     xmlNode *attrs = NULL;
     xmlNode *attr = NULL;
 
     for (attrs = pcmk__xe_first_child(versioned_meta); attrs != NULL;
          attrs = pcmk__xe_next(attrs)) {
 
         for (attr = pcmk__xe_first_child(attrs); attr != NULL;
              attr = pcmk__xe_next(attr)) {
 
             const char *name = crm_element_value(attr, XML_NVPAIR_ATTR_NAME);
             const char *value = crm_element_value(attr, XML_NVPAIR_ATTR_VALUE);
 
             if (pcmk__str_eq(name, XML_OP_ATTR_START_DELAY, pcmk__str_casei)) {
                 int start_delay = unpack_start_delay(value, NULL);
 
                 crm_xml_add_int(attr, XML_NVPAIR_ATTR_VALUE, start_delay);
             } else if (pcmk__str_eq(name, XML_OP_ATTR_ORIGIN, pcmk__str_casei)) {
                 long long start_delay = 0;
 
                 if (unpack_interval_origin(value, xml_obj, interval_ms, now,
                                            &start_delay)) {
                     crm_xml_add(attr, XML_NVPAIR_ATTR_NAME,
                                 XML_OP_ATTR_START_DELAY);
                     crm_xml_add_ll(attr, XML_NVPAIR_ATTR_VALUE, start_delay);
                 }
             } else if (pcmk__str_eq(name, XML_ATTR_TIMEOUT, pcmk__str_casei)) {
                 int timeout_ms = unpack_timeout(value);
 
                 crm_xml_add_int(attr, XML_NVPAIR_ATTR_VALUE, timeout_ms);
             }
         }
     }
 }
 #endif
 
 /*!
  * \brief Unpack operation XML into an action structure
  *
  * Unpack an operation's meta-attributes (normalizing the interval, timeout,
  * and start delay values as integer milliseconds), requirements, and
  * failure policy.
  *
  * \param[in,out] action      Action to unpack into
  * \param[in]     xml_obj     Operation XML (or NULL if all defaults)
  * \param[in]     container   Resource that contains affected resource, if any
  * \param[in]     data_set    Cluster state
  * \param[in]     interval_ms How frequently to perform the operation
  */
 static void
 unpack_operation(pe_action_t * action, xmlNode * xml_obj, pe_resource_t * container,
                  pe_working_set_t * data_set, guint interval_ms)
 {
     int timeout_ms = 0;
     const char *value = NULL;
     bool is_probe = pcmk__str_eq(action->task, RSC_STATUS, pcmk__str_casei)
                     && (interval_ms == 0);
 #if ENABLE_VERSIONED_ATTRS
     pe_rsc_action_details_t *rsc_details = NULL;
 #endif
 
     pe_rsc_eval_data_t rsc_rule_data = {
         .standard = crm_element_value(action->rsc->xml, XML_AGENT_ATTR_CLASS),
         .provider = crm_element_value(action->rsc->xml, XML_AGENT_ATTR_PROVIDER),
         .agent = crm_element_value(action->rsc->xml, XML_EXPR_ATTR_TYPE)
     };
 
     pe_op_eval_data_t op_rule_data = {
         .op_name = action->task,
         .interval = interval_ms
     };
 
     pe_rule_eval_data_t rule_data = {
         .node_hash = NULL,
         .role = RSC_ROLE_UNKNOWN,
         .now = data_set->now,
         .match_data = NULL,
         .rsc_data = &rsc_rule_data,
         .op_data = &op_rule_data
     };
 
     CRM_CHECK(action && action->rsc, return);
 
     // Cluster-wide <op_defaults> <meta_attributes>
     pe__unpack_dataset_nvpairs(data_set->op_defaults, XML_TAG_META_SETS, &rule_data,
                                action->meta, NULL, FALSE, data_set);
 
     // Determine probe default timeout differently
     if (is_probe) {
         xmlNode *min_interval_mon = find_min_interval_mon(action->rsc, FALSE);
 
         if (min_interval_mon) {
             value = crm_element_value(min_interval_mon, XML_ATTR_TIMEOUT);
             if (value) {
                 crm_trace("\t%s: Setting default timeout to minimum-interval "
                           "monitor's timeout '%s'", action->uuid, value);
                 g_hash_table_replace(action->meta, strdup(XML_ATTR_TIMEOUT),
                                      strdup(value));
             }
         }
     }
 
     if (xml_obj) {
         xmlAttrPtr xIter = NULL;
 
         // <op> <meta_attributes> take precedence over defaults
         pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_META_SETS, &rule_data,
                                    action->meta, NULL, TRUE, data_set);
 
 #if ENABLE_VERSIONED_ATTRS
         rsc_details = pe_rsc_action_details(action);
 
         pe_eval_versioned_attributes(data_set->input, xml_obj,
                                      XML_TAG_ATTR_SETS, &rule_data,
                                      rsc_details->versioned_parameters,
                                      NULL);
         pe_eval_versioned_attributes(data_set->input, xml_obj,
                                      XML_TAG_META_SETS, &rule_data,
                                      rsc_details->versioned_meta,
                                      NULL);
 #endif
 
         /* Anything set as an <op> XML property has highest precedence.
          * This ensures we use the name and interval from the <op> tag.
          */
         for (xIter = xml_obj->properties; xIter; xIter = xIter->next) {
             const char *prop_name = (const char *)xIter->name;
             const char *prop_value = crm_element_value(xml_obj, prop_name);
 
             g_hash_table_replace(action->meta, strdup(prop_name), strdup(prop_value));
         }
     }
 
     g_hash_table_remove(action->meta, "id");
 
     // Normalize interval to milliseconds
     if (interval_ms > 0) {
         g_hash_table_replace(action->meta, strdup(XML_LRM_ATTR_INTERVAL),
                              crm_strdup_printf("%u", interval_ms));
     } else {
         g_hash_table_remove(action->meta, XML_LRM_ATTR_INTERVAL);
     }
 
     /*
      * Timeout order of precedence:
      *   1. pcmk_monitor_timeout (if rsc has pcmk_ra_cap_fence_params
      *      and task is start or a probe; pcmk_monitor_timeout works
      *      by default for a recurring monitor)
      *   2. explicit op timeout on the primitive
      *   3. default op timeout
      *      a. if probe, then min-interval monitor's timeout
      *      b. else, in XML_CIB_TAG_OPCONFIG
      *   4. CRM_DEFAULT_OP_TIMEOUT_S
      *
      * #1 overrides general rule of <op> XML property having highest
      * precedence.
      */
     if (pcmk_is_set(pcmk_get_ra_caps(rsc_rule_data.standard),
                     pcmk_ra_cap_fence_params)
-            && (pcmk__str_eq(action->task, RSC_START, pcmk__str_casei)
-                || is_probe)
-            && action->rsc->parameters) {
+        && (pcmk__str_eq(action->task, RSC_START, pcmk__str_casei)
+            || is_probe)) {
 
-        value = g_hash_table_lookup(action->rsc->parameters,
-                                    "pcmk_monitor_timeout");
+        GHashTable *params = pe_rsc_params(action->rsc, action->node, data_set);
+
+        value = g_hash_table_lookup(params, "pcmk_monitor_timeout");
 
         if (value) {
             crm_trace("\t%s: Setting timeout to pcmk_monitor_timeout '%s', "
                       "overriding default", action->uuid, value);
             g_hash_table_replace(action->meta, strdup(XML_ATTR_TIMEOUT),
                                  strdup(value));
         }
     }
 
     // Normalize timeout to positive milliseconds
     value = g_hash_table_lookup(action->meta, XML_ATTR_TIMEOUT);
     timeout_ms = unpack_timeout(value);
     g_hash_table_replace(action->meta, strdup(XML_ATTR_TIMEOUT),
                          crm_itoa(timeout_ms));
 
     if (!pcmk__strcase_any_of(action->task, RSC_START, RSC_PROMOTE, NULL)) {
         action->needs = rsc_req_nothing;
         value = "nothing (not start or promote)";
 
     } else if (pcmk_is_set(action->rsc->flags, pe_rsc_needs_fencing)) {
         action->needs = rsc_req_stonith;
         value = "fencing";
 
     } else if (pcmk_is_set(action->rsc->flags, pe_rsc_needs_quorum)) {
         action->needs = rsc_req_quorum;
         value = "quorum";
 
     } else {
         action->needs = rsc_req_nothing;
         value = "nothing";
     }
     pe_rsc_trace(action->rsc, "%s requires %s", action->uuid, value);
 
     value = unpack_operation_on_fail(action);
 
     if (value == NULL) {
 
     } else if (pcmk__str_eq(value, "block", pcmk__str_casei)) {
         action->on_fail = action_fail_block;
         g_hash_table_insert(action->meta, strdup(XML_OP_ATTR_ON_FAIL), strdup("block"));
         value = "block"; // The above could destroy the original string
 
     } else if (pcmk__str_eq(value, "fence", pcmk__str_casei)) {
         action->on_fail = action_fail_fence;
         value = "node fencing";
 
         if (!pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
             pcmk__config_err("Resetting '" XML_OP_ATTR_ON_FAIL "' for "
                              "operation '%s' to 'stop' because 'fence' is not "
                              "valid when fencing is disabled", action->uuid);
             action->on_fail = action_fail_stop;
             action->fail_role = RSC_ROLE_STOPPED;
             value = "stop resource";
         }
 
     } else if (pcmk__str_eq(value, "standby", pcmk__str_casei)) {
         action->on_fail = action_fail_standby;
         value = "node standby";
 
     } else if (pcmk__strcase_any_of(value, "ignore", "nothing", NULL)) {
         action->on_fail = action_fail_ignore;
         value = "ignore";
 
     } else if (pcmk__str_eq(value, "migrate", pcmk__str_casei)) {
         action->on_fail = action_fail_migrate;
         value = "force migration";
 
     } else if (pcmk__str_eq(value, "stop", pcmk__str_casei)) {
         action->on_fail = action_fail_stop;
         action->fail_role = RSC_ROLE_STOPPED;
         value = "stop resource";
 
     } else if (pcmk__str_eq(value, "restart", pcmk__str_casei)) {
         action->on_fail = action_fail_recover;
         value = "restart (and possibly migrate)";
 
     } else if (pcmk__str_eq(value, "restart-container", pcmk__str_casei)) {
         if (container) {
             action->on_fail = action_fail_restart_container;
             value = "restart container (and possibly migrate)";
 
         } else {
             value = NULL;
         }
 
     } else if (pcmk__str_eq(value, "demote", pcmk__str_casei)) {
         action->on_fail = action_fail_demote;
         value = "demote instance";
 
     } else {
         pe_err("Resource %s: Unknown failure type (%s)", action->rsc->id, value);
         value = NULL;
     }
 
     /* defaults */
     if (value == NULL && container) {
         action->on_fail = action_fail_restart_container;
         value = "restart container (and possibly migrate) (default)";
 
     /* For remote nodes, ensure that any failure that results in dropping an
      * active connection to the node results in fencing of the node.
      *
      * There are only two action failures that don't result in fencing.
      * 1. probes - probe failures are expected.
      * 2. start - a start failure indicates that an active connection does not already
      * exist. The user can set op on-fail=fence if they really want to fence start
      * failures. */
     } else if (((value == NULL) || !pcmk_is_set(action->rsc->flags, pe_rsc_managed))
                && pe__resource_is_remote_conn(action->rsc, data_set)
                && !(pcmk__str_eq(action->task, CRMD_ACTION_STATUS, pcmk__str_casei)
                     && (interval_ms == 0))
                && !pcmk__str_eq(action->task, CRMD_ACTION_START, pcmk__str_casei)) {
 
         if (!pcmk_is_set(action->rsc->flags, pe_rsc_managed)) {
             action->on_fail = action_fail_stop;
             action->fail_role = RSC_ROLE_STOPPED;
             value = "stop unmanaged remote node (enforcing default)";
 
         } else {
             if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
                 value = "fence remote node (default)";
             } else {
                 value = "recover remote node connection (default)";
             }
 
             if (action->rsc->remote_reconnect_ms) {
                 action->fail_role = RSC_ROLE_STOPPED;
             }
             action->on_fail = action_fail_reset_remote;
         }
 
     } else if (value == NULL && pcmk__str_eq(action->task, CRMD_ACTION_STOP, pcmk__str_casei)) {
         if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
             action->on_fail = action_fail_fence;
             value = "resource fence (default)";
 
         } else {
             action->on_fail = action_fail_block;
             value = "resource block (default)";
         }
 
     } else if (value == NULL) {
         action->on_fail = action_fail_recover;
         value = "restart (and possibly migrate) (default)";
     }
 
     pe_rsc_trace(action->rsc, "%s failure handling: %s",
                  action->uuid, value);
 
     value = NULL;
     if (xml_obj != NULL) {
         value = g_hash_table_lookup(action->meta, "role_after_failure");
         if (value) {
             pe_warn_once(pe_wo_role_after,
                         "Support for role_after_failure is deprecated and will be removed in a future release");
         }
     }
     if (value != NULL && action->fail_role == RSC_ROLE_UNKNOWN) {
         action->fail_role = text2role(value);
     }
     /* defaults */
     if (action->fail_role == RSC_ROLE_UNKNOWN) {
         if (pcmk__str_eq(action->task, CRMD_ACTION_PROMOTE, pcmk__str_casei)) {
             action->fail_role = RSC_ROLE_SLAVE;
         } else {
             action->fail_role = RSC_ROLE_STARTED;
         }
     }
     pe_rsc_trace(action->rsc, "%s failure results in: %s",
                  action->uuid, role2text(action->fail_role));
 
     value = g_hash_table_lookup(action->meta, XML_OP_ATTR_START_DELAY);
     if (value) {
         unpack_start_delay(value, action->meta);
     } else {
         long long start_delay = 0;
 
         value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ORIGIN);
         if (unpack_interval_origin(value, xml_obj, interval_ms, data_set->now,
                                    &start_delay)) {
             g_hash_table_replace(action->meta, strdup(XML_OP_ATTR_START_DELAY),
                                  crm_strdup_printf("%lld", start_delay));
         }
     }
 
 #if ENABLE_VERSIONED_ATTRS
     unpack_versioned_meta(rsc_details->versioned_meta, xml_obj, interval_ms,
                           data_set->now);
 #endif
 }
 
 static xmlNode *
 find_rsc_op_entry_helper(pe_resource_t * rsc, const char *key, gboolean include_disabled)
 {
     guint interval_ms = 0;
     gboolean do_retry = TRUE;
     char *local_key = NULL;
     const char *name = NULL;
     const char *value = NULL;
     const char *interval_spec = NULL;
     char *match_key = NULL;
     xmlNode *op = NULL;
     xmlNode *operation = NULL;
 
   retry:
     for (operation = pcmk__xe_first_child(rsc->ops_xml); operation != NULL;
          operation = pcmk__xe_next(operation)) {
 
         if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
             name = crm_element_value(operation, "name");
             interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
             value = crm_element_value(operation, "enabled");
             if (!include_disabled && value && crm_is_true(value) == FALSE) {
                 continue;
             }
 
             interval_ms = crm_parse_interval_spec(interval_spec);
             match_key = pcmk__op_key(rsc->id, name, interval_ms);
             if (pcmk__str_eq(key, match_key, pcmk__str_casei)) {
                 op = operation;
             }
             free(match_key);
 
             if (rsc->clone_name) {
                 match_key = pcmk__op_key(rsc->clone_name, name, interval_ms);
                 if (pcmk__str_eq(key, match_key, pcmk__str_casei)) {
                     op = operation;
                 }
                 free(match_key);
             }
 
             if (op != NULL) {
                 free(local_key);
                 return op;
             }
         }
     }
 
     free(local_key);
     if (do_retry == FALSE) {
         return NULL;
     }
 
     do_retry = FALSE;
     if (strstr(key, CRMD_ACTION_MIGRATE) || strstr(key, CRMD_ACTION_MIGRATED)) {
         local_key = pcmk__op_key(rsc->id, "migrate", 0);
         key = local_key;
         goto retry;
 
     } else if (strstr(key, "_notify_")) {
         local_key = pcmk__op_key(rsc->id, "notify", 0);
         key = local_key;
         goto retry;
     }
 
     return NULL;
 }
 
 xmlNode *
 find_rsc_op_entry(pe_resource_t * rsc, const char *key)
 {
     return find_rsc_op_entry_helper(rsc, key, FALSE);
 }
 
 void
 print_node(const char *pre_text, pe_node_t * node, gboolean details)
 {
     if (node == NULL) {
         crm_trace("%s%s: <NULL>", pre_text == NULL ? "" : pre_text, pre_text == NULL ? "" : ": ");
         return;
     }
 
     CRM_ASSERT(node->details);
     crm_trace("%s%s%sNode %s: (weight=%d, fixed=%s)",
               pre_text == NULL ? "" : pre_text,
               pre_text == NULL ? "" : ": ",
               node->details->online ? "" : "Unavailable/Unclean ",
               node->details->uname, node->weight, node->fixed ? "True" : "False");
 
     if (details) {
         int log_level = LOG_TRACE;
 
         char *pe_mutable = strdup("\t\t");
         GListPtr gIter = node->details->running_rsc;
 
         crm_trace("\t\t===Node Attributes");
         g_hash_table_foreach(node->details->attrs, print_str_str, pe_mutable);
         free(pe_mutable);
 
         crm_trace("\t\t=== Resources");
 
         for (; gIter != NULL; gIter = gIter->next) {
             pe_resource_t *rsc = (pe_resource_t *) gIter->data;
 
             rsc->fns->print(rsc, "\t\t", pe_print_log|pe_print_pending,
                             &log_level);
         }
     }
 }
 
 /*
  * Used by the HashTable for-loop
  */
 void
 print_str_str(gpointer key, gpointer value, gpointer user_data)
 {
     crm_trace("%s%s %s ==> %s",
               user_data == NULL ? "" : (char *)user_data,
               user_data == NULL ? "" : ": ", (char *)key, (char *)value);
 }
 
 void
 pe_free_action(pe_action_t * action)
 {
     if (action == NULL) {
         return;
     }
     g_list_free_full(action->actions_before, free);     /* pe_action_wrapper_t* */
     g_list_free_full(action->actions_after, free);      /* pe_action_wrapper_t* */
     if (action->extra) {
         g_hash_table_destroy(action->extra);
     }
     if (action->meta) {
         g_hash_table_destroy(action->meta);
     }
 #if ENABLE_VERSIONED_ATTRS
     if (action->rsc) {
         pe_free_rsc_action_details(action);
     }
 #endif
     free(action->cancel_task);
     free(action->reason);
     free(action->task);
     free(action->uuid);
     free(action->node);
     free(action);
 }
 
 GListPtr
 find_recurring_actions(GListPtr input, pe_node_t * not_on_node)
 {
     const char *value = NULL;
     GListPtr result = NULL;
     GListPtr gIter = input;
 
     CRM_CHECK(input != NULL, return NULL);
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_action_t *action = (pe_action_t *) gIter->data;
 
         value = g_hash_table_lookup(action->meta, XML_LRM_ATTR_INTERVAL_MS);
         if (value == NULL) {
             /* skip */
         } else if (pcmk__str_eq(value, "0", pcmk__str_casei)) {
             /* skip */
         } else if (pcmk__str_eq(CRMD_ACTION_CANCEL, action->task, pcmk__str_casei)) {
             /* skip */
         } else if (not_on_node == NULL) {
             crm_trace("(null) Found: %s", action->uuid);
             result = g_list_prepend(result, action);
 
         } else if (action->node == NULL) {
             /* skip */
         } else if (action->node->details != not_on_node->details) {
             crm_trace("Found: %s", action->uuid);
             result = g_list_prepend(result, action);
         }
     }
 
     return result;
 }
 
 enum action_tasks
 get_complex_task(pe_resource_t * rsc, const char *name, gboolean allow_non_atomic)
 {
     enum action_tasks task = text2task(name);
 
     if (rsc == NULL) {
         return task;
 
     } else if (allow_non_atomic == FALSE || rsc->variant == pe_native) {
         switch (task) {
             case stopped_rsc:
             case started_rsc:
             case action_demoted:
             case action_promoted:
                 crm_trace("Folding %s back into its atomic counterpart for %s", name, rsc->id);
                 return task - 1;
             default:
                 break;
         }
     }
     return task;
 }
 
 pe_action_t *
 find_first_action(GListPtr input, const char *uuid, const char *task, pe_node_t * on_node)
 {
     GListPtr gIter = NULL;
 
     CRM_CHECK(uuid || task, return NULL);
 
     for (gIter = input; gIter != NULL; gIter = gIter->next) {
         pe_action_t *action = (pe_action_t *) gIter->data;
 
         if (uuid != NULL && !pcmk__str_eq(uuid, action->uuid, pcmk__str_casei)) {
             continue;
 
         } else if (task != NULL && !pcmk__str_eq(task, action->task, pcmk__str_casei)) {
             continue;
 
         } else if (on_node == NULL) {
             return action;
 
         } else if (action->node == NULL) {
             continue;
 
         } else if (on_node->details == action->node->details) {
             return action;
         }
     }
 
     return NULL;
 }
 
 GListPtr
 find_actions(GListPtr input, const char *key, const pe_node_t *on_node)
 {
     GListPtr gIter = input;
     GListPtr result = NULL;
 
     CRM_CHECK(key != NULL, return NULL);
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_action_t *action = (pe_action_t *) gIter->data;
 
         if (!pcmk__str_eq(key, action->uuid, pcmk__str_casei)) {
             crm_trace("%s does not match action %s", key, action->uuid);
             continue;
 
         } else if (on_node == NULL) {
             crm_trace("Action %s matches (ignoring node)", key);
             result = g_list_prepend(result, action);
 
         } else if (action->node == NULL) {
             crm_trace("Action %s matches (unallocated, assigning to %s)",
                       key, on_node->details->uname);
 
             action->node = pe__copy_node(on_node);
             result = g_list_prepend(result, action);
 
         } else if (on_node->details == action->node->details) {
             crm_trace("Action %s on %s matches", key, on_node->details->uname);
             result = g_list_prepend(result, action);
 
         } else {
             crm_trace("Action %s on node %s does not match requested node %s",
                       key, action->node->details->uname,
                       on_node->details->uname);
         }
     }
 
     return result;
 }
 
 GList *
 find_actions_exact(GList *input, const char *key, const pe_node_t *on_node)
 {
     GList *result = NULL;
 
     CRM_CHECK(key != NULL, return NULL);
 
     if (on_node == NULL) {
         crm_trace("Not searching for action %s because node not specified",
                   key);
         return NULL;
     }
 
     for (GList *gIter = input; gIter != NULL; gIter = gIter->next) {
         pe_action_t *action = (pe_action_t *) gIter->data;
 
         if (action->node == NULL) {
             crm_trace("Skipping comparison of %s vs action %s without node",
                       key, action->uuid);
 
         } else if (!pcmk__str_eq(key, action->uuid, pcmk__str_casei)) {
             crm_trace("Desired action %s doesn't match %s", key, action->uuid);
 
         } else if (!pcmk__str_eq(on_node->details->id, action->node->details->id, pcmk__str_casei)) {
             crm_trace("Action %s desired node ID %s doesn't match %s",
                       key, on_node->details->id, action->node->details->id);
 
         } else {
             crm_trace("Action %s matches", key);
             result = g_list_prepend(result, action);
         }
     }
 
     return result;
 }
 
 /*!
  * \brief Find all actions of given type for a resource
  *
  * \param[in] rsc           Resource to search
  * \param[in] node          Find only actions scheduled on this node
  * \param[in] task          Action name to search for
  * \param[in] require_node  If TRUE, NULL node or action node will not match
  *
  * \return List of actions found (or NULL if none)
  * \note If node is not NULL and require_node is FALSE, matching actions
  *       without a node will be assigned to node.
  */
 GList *
 pe__resource_actions(const pe_resource_t *rsc, const pe_node_t *node,
                      const char *task, bool require_node)
 {
     GList *result = NULL;
     char *key = pcmk__op_key(rsc->id, task, 0);
 
     if (require_node) {
         result = find_actions_exact(rsc->actions, key, node);
     } else {
         result = find_actions(rsc->actions, key, node);
     }
     free(key);
     return result;
 }
 
 static void
 resource_node_score(pe_resource_t * rsc, pe_node_t * node, int score, const char *tag)
 {
     pe_node_t *match = NULL;
 
     if ((rsc->exclusive_discover || (node->rsc_discover_mode == pe_discover_never))
         && pcmk__str_eq(tag, "symmetric_default", pcmk__str_casei)) {
         /* This string comparision may be fragile, but exclusive resources and
          * exclusive nodes should not have the symmetric_default constraint
          * applied to them.
          */
         return;
 
     } else if (rsc->children) {
         GListPtr gIter = rsc->children;
 
         for (; gIter != NULL; gIter = gIter->next) {
             pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
             resource_node_score(child_rsc, node, score, tag);
         }
     }
 
     pe_rsc_trace(rsc, "Setting %s for %s on %s: %d", tag, rsc->id, node->details->uname, score);
     match = pe_hash_table_lookup(rsc->allowed_nodes, node->details->id);
     if (match == NULL) {
         match = pe__copy_node(node);
         g_hash_table_insert(rsc->allowed_nodes, (gpointer) match->details->id, match);
     }
     match->weight = pe__add_scores(match->weight, score);
 }
 
 void
 resource_location(pe_resource_t * rsc, pe_node_t * node, int score, const char *tag,
                   pe_working_set_t * data_set)
 {
     if (node != NULL) {
         resource_node_score(rsc, node, score, tag);
 
     } else if (data_set != NULL) {
         GListPtr gIter = data_set->nodes;
 
         for (; gIter != NULL; gIter = gIter->next) {
             pe_node_t *node_iter = (pe_node_t *) gIter->data;
 
             resource_node_score(rsc, node_iter, score, tag);
         }
 
     } else {
         GHashTableIter iter;
         pe_node_t *node_iter = NULL;
 
         g_hash_table_iter_init(&iter, rsc->allowed_nodes);
         while (g_hash_table_iter_next(&iter, NULL, (void **)&node_iter)) {
             resource_node_score(rsc, node_iter, score, tag);
         }
     }
 
     if (node == NULL && score == -INFINITY) {
         if (rsc->allocated_to) {
             crm_info("Deallocating %s from %s", rsc->id, rsc->allocated_to->details->uname);
             free(rsc->allocated_to);
             rsc->allocated_to = NULL;
         }
     }
 }
 
 #define sort_return(an_int, why) do {					\
 	free(a_uuid);						\
 	free(b_uuid);						\
 	crm_trace("%s (%d) %c %s (%d) : %s",				\
 		  a_xml_id, a_call_id, an_int>0?'>':an_int<0?'<':'=',	\
 		  b_xml_id, b_call_id, why);				\
 	return an_int;							\
     } while(0)
 
 gint
 sort_op_by_callid(gconstpointer a, gconstpointer b)
 {
     int a_call_id = -1;
     int b_call_id = -1;
 
     char *a_uuid = NULL;
     char *b_uuid = NULL;
 
     const xmlNode *xml_a = a;
     const xmlNode *xml_b = b;
 
     const char *a_xml_id = crm_element_value(xml_a, XML_ATTR_ID);
     const char *b_xml_id = crm_element_value(xml_b, XML_ATTR_ID);
 
     if (pcmk__str_eq(a_xml_id, b_xml_id, pcmk__str_casei)) {
         /* We have duplicate lrm_rsc_op entries in the status
          * section which is unlikely to be a good thing
          *    - we can handle it easily enough, but we need to get
          *    to the bottom of why it's happening.
          */
         pe_err("Duplicate lrm_rsc_op entries named %s", a_xml_id);
         sort_return(0, "duplicate");
     }
 
     crm_element_value_int(xml_a, XML_LRM_ATTR_CALLID, &a_call_id);
     crm_element_value_int(xml_b, XML_LRM_ATTR_CALLID, &b_call_id);
 
     if (a_call_id == -1 && b_call_id == -1) {
         /* both are pending ops so it doesn't matter since
          *   stops are never pending
          */
         sort_return(0, "pending");
 
     } else if (a_call_id >= 0 && a_call_id < b_call_id) {
         sort_return(-1, "call id");
 
     } else if (b_call_id >= 0 && a_call_id > b_call_id) {
         sort_return(1, "call id");
 
     } else if (b_call_id >= 0 && a_call_id == b_call_id) {
         /*
          * The op and last_failed_op are the same
          * Order on last-rc-change
          */
         time_t last_a = -1;
         time_t last_b = -1;
 
         crm_element_value_epoch(xml_a, XML_RSC_OP_LAST_CHANGE, &last_a);
         crm_element_value_epoch(xml_b, XML_RSC_OP_LAST_CHANGE, &last_b);
 
         crm_trace("rc-change: %lld vs %lld",
                   (long long) last_a, (long long) last_b);
         if (last_a >= 0 && last_a < last_b) {
             sort_return(-1, "rc-change");
 
         } else if (last_b >= 0 && last_a > last_b) {
             sort_return(1, "rc-change");
         }
         sort_return(0, "rc-change");
 
     } else {
         /* One of the inputs is a pending operation
          * Attempt to use XML_ATTR_TRANSITION_MAGIC to determine its age relative to the other
          */
 
         int a_id = -1;
         int b_id = -1;
 
         const char *a_magic = crm_element_value(xml_a, XML_ATTR_TRANSITION_MAGIC);
         const char *b_magic = crm_element_value(xml_b, XML_ATTR_TRANSITION_MAGIC);
 
         CRM_CHECK(a_magic != NULL && b_magic != NULL, sort_return(0, "No magic"));
         if (!decode_transition_magic(a_magic, &a_uuid, &a_id, NULL, NULL, NULL,
                                      NULL)) {
             sort_return(0, "bad magic a");
         }
         if (!decode_transition_magic(b_magic, &b_uuid, &b_id, NULL, NULL, NULL,
                                      NULL)) {
             sort_return(0, "bad magic b");
         }
         /* try to determine the relative age of the operation...
          * some pending operations (e.g. a start) may have been superseded
          *   by a subsequent stop
          *
          * [a|b]_id == -1 means it's a shutdown operation and _always_ comes last
          */
         if (!pcmk__str_eq(a_uuid, b_uuid, pcmk__str_casei) || a_id == b_id) {
             /*
              * some of the logic in here may be redundant...
              *
              * if the UUID from the TE doesn't match then one better
              *   be a pending operation.
              * pending operations don't survive between elections and joins
              *   because we query the LRM directly
              */
 
             if (b_call_id == -1) {
                 sort_return(-1, "transition + call");
 
             } else if (a_call_id == -1) {
                 sort_return(1, "transition + call");
             }
 
         } else if ((a_id >= 0 && a_id < b_id) || b_id == -1) {
             sort_return(-1, "transition");
 
         } else if ((b_id >= 0 && a_id > b_id) || a_id == -1) {
             sort_return(1, "transition");
         }
     }
 
     /* we should never end up here */
     CRM_CHECK(FALSE, sort_return(0, "default"));
 
 }
 
 time_t
 get_effective_time(pe_working_set_t * data_set)
 {
     if(data_set) {
         if (data_set->now == NULL) {
             crm_trace("Recording a new 'now'");
             data_set->now = crm_time_new(NULL);
         }
         return crm_time_get_seconds_since_epoch(data_set->now);
     }
 
     crm_trace("Defaulting to 'now'");
     return time(NULL);
 }
 
 gboolean
 get_target_role(pe_resource_t * rsc, enum rsc_role_e * role)
 {
     enum rsc_role_e local_role = RSC_ROLE_UNKNOWN;
     const char *value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
 
     CRM_CHECK(role != NULL, return FALSE);
 
     if (pcmk__str_eq(value, "started", pcmk__str_null_matches | pcmk__str_casei)
         || pcmk__str_eq("default", value, pcmk__str_casei)) {
         return FALSE;
     }
 
     local_role = text2role(value);
     if (local_role == RSC_ROLE_UNKNOWN) {
         pcmk__config_err("Ignoring '" XML_RSC_ATTR_TARGET_ROLE "' for %s "
                          "because '%s' is not valid", rsc->id, value);
         return FALSE;
 
     } else if (local_role > RSC_ROLE_STARTED) {
         if (pcmk_is_set(uber_parent(rsc)->flags, pe_rsc_promotable)) {
             if (local_role > RSC_ROLE_SLAVE) {
                 /* This is what we'd do anyway, just leave the default to avoid messing up the placement algorithm */
                 return FALSE;
             }
 
         } else {
             pcmk__config_err("Ignoring '" XML_RSC_ATTR_TARGET_ROLE "' for %s "
                              "because '%s' only makes sense for promotable "
                              "clones", rsc->id, value);
             return FALSE;
         }
     }
 
     *role = local_role;
     return TRUE;
 }
 
 gboolean
 order_actions(pe_action_t * lh_action, pe_action_t * rh_action, enum pe_ordering order)
 {
     GListPtr gIter = NULL;
     pe_action_wrapper_t *wrapper = NULL;
     GListPtr list = NULL;
 
     if (order == pe_order_none) {
         return FALSE;
     }
 
     if (lh_action == NULL || rh_action == NULL) {
         return FALSE;
     }
 
     crm_trace("Ordering Action %s before %s", lh_action->uuid, rh_action->uuid);
 
     /* Ensure we never create a dependency on ourselves... it's happened */
     CRM_ASSERT(lh_action != rh_action);
 
     /* Filter dups, otherwise update_action_states() has too much work to do */
     gIter = lh_action->actions_after;
     for (; gIter != NULL; gIter = gIter->next) {
         pe_action_wrapper_t *after = (pe_action_wrapper_t *) gIter->data;
 
         if (after->action == rh_action && (after->type & order)) {
             return FALSE;
         }
     }
 
     wrapper = calloc(1, sizeof(pe_action_wrapper_t));
     wrapper->action = rh_action;
     wrapper->type = order;
     list = lh_action->actions_after;
     list = g_list_prepend(list, wrapper);
     lh_action->actions_after = list;
 
     wrapper = calloc(1, sizeof(pe_action_wrapper_t));
     wrapper->action = lh_action;
     wrapper->type = order;
     list = rh_action->actions_before;
     list = g_list_prepend(list, wrapper);
     rh_action->actions_before = list;
     return TRUE;
 }
 
 pe_action_t *
 get_pseudo_op(const char *name, pe_working_set_t * data_set)
 {
     pe_action_t *op = NULL;
 
     if(data_set->singletons) {
         op = g_hash_table_lookup(data_set->singletons, name);
     }
     if (op == NULL) {
         op = custom_action(NULL, strdup(name), name, NULL, TRUE, TRUE, data_set);
         pe__set_action_flags(op, pe_action_pseudo|pe_action_runnable);
     }
 
     return op;
 }
 
 void
 destroy_ticket(gpointer data)
 {
     pe_ticket_t *ticket = data;
 
     if (ticket->state) {
         g_hash_table_destroy(ticket->state);
     }
     free(ticket->id);
     free(ticket);
 }
 
 pe_ticket_t *
 ticket_new(const char *ticket_id, pe_working_set_t * data_set)
 {
     pe_ticket_t *ticket = NULL;
 
     if (pcmk__str_empty(ticket_id)) {
         return NULL;
     }
 
     if (data_set->tickets == NULL) {
         data_set->tickets =
             g_hash_table_new_full(crm_str_hash, g_str_equal, free,
                                   destroy_ticket);
     }
 
     ticket = g_hash_table_lookup(data_set->tickets, ticket_id);
     if (ticket == NULL) {
 
         ticket = calloc(1, sizeof(pe_ticket_t));
         if (ticket == NULL) {
             crm_err("Cannot allocate ticket '%s'", ticket_id);
             return NULL;
         }
 
         crm_trace("Creaing ticket entry for %s", ticket_id);
 
         ticket->id = strdup(ticket_id);
         ticket->granted = FALSE;
         ticket->last_granted = -1;
         ticket->standby = FALSE;
         ticket->state = crm_str_table_new();
 
         g_hash_table_insert(data_set->tickets, strdup(ticket->id), ticket);
     }
 
     return ticket;
 }
 
 const char *rsc_printable_id(pe_resource_t *rsc)
 {
     if (!pcmk_is_set(rsc->flags, pe_rsc_unique)) {
         return ID(rsc->xml);
     }
     return rsc->id;
 }
 
 void
 pe__clear_resource_flags_recursive(pe_resource_t *rsc, uint64_t flags)
 {
     pe__clear_resource_flags(rsc, flags);
     for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
         pe__clear_resource_flags_recursive((pe_resource_t *) gIter->data, flags);
     }
 }
 
 void
 pe__clear_resource_flags_on_all(pe_working_set_t *data_set, uint64_t flag)
 {
     for (GList *lpc = data_set->resources; lpc != NULL; lpc = lpc->next) {
         pe_resource_t *r = (pe_resource_t *) lpc->data;
         pe__clear_resource_flags_recursive(r, flag);
     }
 }
 
 void
 pe__set_resource_flags_recursive(pe_resource_t *rsc, uint64_t flags)
 {
     pe__set_resource_flags(rsc, flags);
     for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
         pe__set_resource_flags_recursive((pe_resource_t *) gIter->data, flags);
     }
 }
 
 static GListPtr
 find_unfencing_devices(GListPtr candidates, GListPtr matches) 
 {
     for (GListPtr gIter = candidates; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *candidate = gIter->data;
         const char *provides = g_hash_table_lookup(candidate->meta,
                                                    PCMK_STONITH_PROVIDES);
         const char *requires = g_hash_table_lookup(candidate->meta, XML_RSC_ATTR_REQUIRES);
 
         if(candidate->children) {
             matches = find_unfencing_devices(candidate->children, matches);
         } else if (!pcmk_is_set(candidate->flags, pe_rsc_fence_device)) {
             continue;
 
         } else if (pcmk__str_eq(provides, "unfencing", pcmk__str_casei) || pcmk__str_eq(requires, "unfencing", pcmk__str_casei)) {
             matches = g_list_prepend(matches, candidate);
         }
     }
     return matches;
 }
 
 static int
 node_priority_fencing_delay(pe_node_t * node, pe_working_set_t * data_set)
 {
     int member_count = 0;
     int online_count = 0;
     int top_priority = 0;
     int lowest_priority = 0;
     GListPtr gIter = NULL;
 
     // `priority-fencing-delay` is disabled
     if (data_set->priority_fencing_delay <= 0) {
         return 0;
     }
 
     /* No need to request a delay if the fencing target is not a normal cluster
      * member, for example if it's a remote node or a guest node. */
     if (node->details->type != node_member) {
         return 0;
     }
 
     // No need to request a delay if the fencing target is in our partition
     if (node->details->online) {
         return 0;
     }
 
     for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
         pe_node_t *n =  gIter->data;
 
         if (n->details->type != node_member) {
             continue;
         }
 
         member_count ++;
 
         if (n->details->online) {
             online_count++;
         }
 
         if (member_count == 1
             || n->details->priority > top_priority) {
             top_priority = n->details->priority;
         }
 
         if (member_count == 1
             || n->details->priority < lowest_priority) {
             lowest_priority = n->details->priority;
         }
     }
 
     // No need to delay if we have more than half of the cluster members
     if (online_count > member_count / 2) {
         return 0;
     }
 
     /* All the nodes have equal priority.
      * Any configured corresponding `pcmk_delay_base/max` will be applied. */
     if (lowest_priority == top_priority) {
         return 0;
     }
 
     if (node->details->priority < top_priority) {
         return 0;
     }
 
     return data_set->priority_fencing_delay;
 }
 
 pe_action_t *
 pe_fence_op(pe_node_t * node, const char *op, bool optional, const char *reason,
             bool priority_delay, pe_working_set_t * data_set)
 {
     char *op_key = NULL;
     pe_action_t *stonith_op = NULL;
 
     if(op == NULL) {
         op = data_set->stonith_action;
     }
 
     op_key = crm_strdup_printf("%s-%s-%s", CRM_OP_FENCE, node->details->uname, op);
 
     if(data_set->singletons) {
         stonith_op = g_hash_table_lookup(data_set->singletons, op_key);
     }
 
     if(stonith_op == NULL) {
         stonith_op = custom_action(NULL, op_key, CRM_OP_FENCE, node, TRUE, TRUE, data_set);
 
         add_hash_param(stonith_op->meta, XML_LRM_ATTR_TARGET, node->details->uname);
         add_hash_param(stonith_op->meta, XML_LRM_ATTR_TARGET_UUID, node->details->id);
         add_hash_param(stonith_op->meta, "stonith_action", op);
 
         if (pe__is_guest_or_remote_node(node)
             && pcmk_is_set(data_set->flags, pe_flag_enable_unfencing)) {
             /* Extra work to detect device changes on remotes
              *
              * We may do this for all nodes in the future, but for now
              * the check_action_definition() based stuff works fine.
              */
             long max = 1024;
             long digests_all_offset = 0;
             long digests_secure_offset = 0;
 
             char *digests_all = calloc(max, sizeof(char));
             char *digests_secure = calloc(max, sizeof(char));
             GListPtr matches = find_unfencing_devices(data_set->resources, NULL);
 
             for (GListPtr gIter = matches; gIter != NULL; gIter = gIter->next) {
                 pe_resource_t *match = gIter->data;
                 const char *agent = g_hash_table_lookup(match->meta,
                                                         XML_ATTR_TYPE);
                 op_digest_cache_t *data = NULL;
 
                 data = pe__compare_fencing_digest(match, agent, node, data_set);
                 if(data->rc == RSC_DIGEST_ALL) {
                     optional = FALSE;
                     crm_notice("Unfencing %s (remote): because the definition of %s changed", node->details->uname, match->id);
                     if (pcmk_is_set(data_set->flags, pe_flag_stdout)) {
                         fprintf(stdout, "  notice: Unfencing %s (remote): because the definition of %s changed\n", node->details->uname, match->id);
                     }
                 }
 
                 digests_all_offset += snprintf(
                     digests_all+digests_all_offset, max-digests_all_offset,
                     "%s:%s:%s,", match->id, agent, data->digest_all_calc);
 
                 digests_secure_offset += snprintf(
                     digests_secure+digests_secure_offset, max-digests_secure_offset,
                     "%s:%s:%s,", match->id, agent, data->digest_secure_calc);
             }
             g_hash_table_insert(stonith_op->meta,
                                 strdup(XML_OP_ATTR_DIGESTS_ALL),
                                 digests_all);
             g_hash_table_insert(stonith_op->meta,
                                 strdup(XML_OP_ATTR_DIGESTS_SECURE),
                                 digests_secure);
         }
 
     } else {
         free(op_key);
     }
 
     if (data_set->priority_fencing_delay > 0
 
             /* It's a suitable case where `priority-fencing-delay` applies.
              * At least add `priority-fencing-delay` field as an indicator. */
         && (priority_delay
 
             /* Re-calculate priority delay for the suitable case when
              * pe_fence_op() is called again by stage6() after node priority has
              * been actually calculated with native_add_running() */
             || g_hash_table_lookup(stonith_op->meta,
                                    XML_CONFIG_ATTR_PRIORITY_FENCING_DELAY) != NULL)) {
 
             /* Add `priority-fencing-delay` to the fencing op even if it's 0 for
              * the targeting node. So that it takes precedence over any possible
              * `pcmk_delay_base/max`.
              */
             char *delay_s = crm_itoa(node_priority_fencing_delay(node, data_set));
 
             g_hash_table_insert(stonith_op->meta,
                                 strdup(XML_CONFIG_ATTR_PRIORITY_FENCING_DELAY),
                                 delay_s);
     }
 
     if(optional == FALSE && pe_can_fence(data_set, node)) {
         pe_action_required(stonith_op, NULL, reason);
     } else if(reason && stonith_op->reason == NULL) {
         stonith_op->reason = strdup(reason);
     }
 
     return stonith_op;
 }
 
 void
 trigger_unfencing(
     pe_resource_t * rsc, pe_node_t *node, const char *reason, pe_action_t *dependency, pe_working_set_t * data_set) 
 {
     if (!pcmk_is_set(data_set->flags, pe_flag_enable_unfencing)) {
         /* No resources require it */
         return;
 
     } else if ((rsc != NULL)
                && !pcmk_is_set(rsc->flags, pe_rsc_fence_device)) {
         /* Wasn't a stonith device */
         return;
 
     } else if(node
               && node->details->online
               && node->details->unclean == FALSE
               && node->details->shutdown == FALSE) {
         pe_action_t *unfence = pe_fence_op(node, "on", FALSE, reason, FALSE, data_set);
 
         if(dependency) {
             order_actions(unfence, dependency, pe_order_optional);
         }
 
     } else if(rsc) {
         GHashTableIter iter;
 
         g_hash_table_iter_init(&iter, rsc->allowed_nodes);
         while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
             if(node->details->online && node->details->unclean == FALSE && node->details->shutdown == FALSE) {
                 trigger_unfencing(rsc, node, reason, dependency, data_set);
             }
         }
     }
 }
 
 gboolean
 add_tag_ref(GHashTable * tags, const char * tag_name,  const char * obj_ref)
 {
     pe_tag_t *tag = NULL;
     GListPtr gIter = NULL;
     gboolean is_existing = FALSE;
 
     CRM_CHECK(tags && tag_name && obj_ref, return FALSE);
 
     tag = g_hash_table_lookup(tags, tag_name);
     if (tag == NULL) {
         tag = calloc(1, sizeof(pe_tag_t));
         if (tag == NULL) {
             return FALSE;
         }
         tag->id = strdup(tag_name);
         tag->refs = NULL;
         g_hash_table_insert(tags, strdup(tag_name), tag);
     }
 
     for (gIter = tag->refs; gIter != NULL; gIter = gIter->next) {
         const char *existing_ref = (const char *) gIter->data;
 
         if (pcmk__str_eq(existing_ref, obj_ref, pcmk__str_none)){
             is_existing = TRUE;
             break;
         }
     }
 
     if (is_existing == FALSE) {
         tag->refs = g_list_append(tag->refs, strdup(obj_ref));
         crm_trace("Added: tag=%s ref=%s", tag->id, obj_ref);
     }
 
     return TRUE;
 }
 
 void pe_action_set_flag_reason(const char *function, long line,
                                pe_action_t *action, pe_action_t *reason, const char *text,
                                enum pe_action_flags flags, bool overwrite)
 {
     bool unset = FALSE;
     bool update = FALSE;
     const char *change = NULL;
 
     if (pcmk_is_set(flags, pe_action_runnable)) {
         unset = TRUE;
         change = "unrunnable";
     } else if (pcmk_is_set(flags, pe_action_optional)) {
         unset = TRUE;
         change = "required";
     } else if (pcmk_is_set(flags, pe_action_migrate_runnable)) {
         unset = TRUE;
         overwrite = TRUE;
         change = "unrunnable";
     } else if (pcmk_is_set(flags, pe_action_dangle)) {
         change = "dangling";
     } else if (pcmk_is_set(flags, pe_action_requires_any)) {
         change = "required";
     } else {
         crm_err("Unknown flag change to %x by %s: 0x%s",
                 flags, action->uuid, (reason? reason->uuid : "0"));
     }
 
     if(unset) {
         if (pcmk_is_set(action->flags, flags)) {
             pe__clear_action_flags_as(function, line, action, flags);
             update = TRUE;
         }
 
     } else {
         if (!pcmk_is_set(action->flags, flags)) {
             pe__set_action_flags_as(function, line, action, flags);
             update = TRUE;
         }
     }
 
     if((change && update) || text) {
         char *reason_text = NULL;
         if(reason == NULL) {
             pe_action_set_reason(action, text, overwrite);
 
         } else if(reason->rsc == NULL) {
             reason_text = crm_strdup_printf("%s %s%c %s", change, reason->task, text?':':0, text?text:"");
         } else {
             reason_text = crm_strdup_printf("%s %s %s%c %s", change, reason->rsc->id, reason->task, text?':':0, text?text:"NA");
         }
 
         if(reason_text && action->rsc != reason->rsc) {
             pe_action_set_reason(action, reason_text, overwrite);
         }
         free(reason_text);
     }
  }
 
 void pe_action_set_reason(pe_action_t *action, const char *reason, bool overwrite) 
 {
     if (action->reason != NULL && overwrite) {
         pe_rsc_trace(action->rsc, "Changing %s reason from '%s' to '%s'",
                      action->uuid, action->reason, crm_str(reason));
         free(action->reason);
     } else if (action->reason == NULL) {
         pe_rsc_trace(action->rsc, "Set %s reason to '%s'",
                      action->uuid, crm_str(reason));
     } else {
         // crm_assert(action->reason != NULL && !overwrite);
         return;
     }
 
     if (reason != NULL) {
         action->reason = strdup(reason);
     } else {
         action->reason = NULL;
     }
 }
 
 /*!
  * \internal
  * \brief Check whether shutdown has been requested for a node
  *
  * \param[in] node  Node to check
  *
  * \return TRUE if node has shutdown attribute set and nonzero, FALSE otherwise
  * \note This differs from simply using node->details->shutdown in that it can
  *       be used before that has been determined (and in fact to determine it),
  *       and it can also be used to distinguish requested shutdown from implicit
  *       shutdown of remote nodes by virtue of their connection stopping.
  */
 bool
 pe__shutdown_requested(pe_node_t *node)
 {
     const char *shutdown = pe_node_attribute_raw(node, XML_CIB_ATTR_SHUTDOWN);
 
     return !pcmk__str_eq(shutdown, "0", pcmk__str_null_matches);
 }
 
 /*!
  * \internal
  * \brief Update a data set's "recheck by" time
  *
  * \param[in]     recheck   Epoch time when recheck should happen
  * \param[in,out] data_set  Current working set
  */
 void
 pe__update_recheck_time(time_t recheck, pe_working_set_t *data_set)
 {
     if ((recheck > get_effective_time(data_set))
         && ((data_set->recheck_by == 0)
             || (data_set->recheck_by > recheck))) {
         data_set->recheck_by = recheck;
     }
 }
 
 /*!
  * \internal
  * \brief Wrapper for pe_unpack_nvpairs() using a cluster working set
  */
 void
 pe__unpack_dataset_nvpairs(xmlNode *xml_obj, const char *set_name,
                            pe_rule_eval_data_t *rule_data, GHashTable *hash,
                            const char *always_first, gboolean overwrite,
                            pe_working_set_t *data_set)
 {
     crm_time_t *next_change = crm_time_new_undefined();
 
     pe_eval_nvpairs(data_set->input, xml_obj, set_name, rule_data, hash,
                     always_first, overwrite, next_change);
     if (crm_time_is_defined(next_change)) {
         time_t recheck = (time_t) crm_time_get_seconds_since_epoch(next_change);
 
         pe__update_recheck_time(recheck, data_set);
     }
     crm_time_free(next_change);
 }
 
 bool
 pe__resource_is_disabled(pe_resource_t *rsc)
 {
     const char *target_role = NULL;
 
     CRM_CHECK(rsc != NULL, return false);
     target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
     if (target_role) {
         enum rsc_role_e target_role_e = text2role(target_role);
 
         if ((target_role_e == RSC_ROLE_STOPPED)
             || ((target_role_e == RSC_ROLE_SLAVE)
                 && pcmk_is_set(uber_parent(rsc)->flags, pe_rsc_promotable))) {
             return true;
         }
     }
     return false;
 }
 
 /*!
  * \internal
  * \brief Create an action to clear a resource's history from CIB
  *
  * \param[in] rsc   Resource to clear
  * \param[in] node  Node to clear history on
  *
  * \return New action to clear resource history
  */
 pe_action_t *
 pe__clear_resource_history(pe_resource_t *rsc, pe_node_t *node,
                            pe_working_set_t *data_set)
 {
     char *key = NULL;
 
     CRM_ASSERT(rsc && node);
     key = pcmk__op_key(rsc->id, CRM_OP_LRM_DELETE, 0);
     return custom_action(rsc, key, CRM_OP_LRM_DELETE, node, FALSE, TRUE,
                          data_set);
 }
 
 bool
 pe__rsc_running_on_any_node_in_list(pe_resource_t *rsc, GListPtr node_list)
 {
     for (GListPtr ele = rsc->running_on; ele; ele = ele->next) {
         pe_node_t *node = (pe_node_t *) ele->data;
         if (pcmk__str_in_list(node_list, node->details->uname)) {
             return true;
         }
     }
 
     return false;
 }
 
 bool
 pcmk__rsc_filtered_by_node(pe_resource_t *rsc, GListPtr only_node)
 {
     return (rsc->fns->active(rsc, FALSE) && !pe__rsc_running_on_any_node_in_list(rsc, only_node));
 }
 
 GListPtr
 pe__filter_rsc_list(GListPtr rscs, GListPtr filter)
 {
     GListPtr retval = NULL;
 
     for (GListPtr gIter = rscs; gIter; gIter = gIter->next) {
         pe_resource_t *rsc = (pe_resource_t *) gIter->data;
 
         /* I think the second condition is safe here for all callers of this
          * function.  If not, it needs to move into pe__node_text.
          */
         if (pcmk__str_in_list(filter, rsc_printable_id(rsc)) ||
             (rsc->parent && pcmk__str_in_list(filter, rsc_printable_id(rsc->parent)))) {
             retval = g_list_prepend(retval, rsc);
         }
     }
 
     return retval;
 }
diff --git a/tools/crm_mon.h b/tools/crm_mon.h
index 143e8d8e8c..f746507f3d 100644
--- a/tools/crm_mon.h
+++ b/tools/crm_mon.h
@@ -1,127 +1,126 @@
 /*
  * Copyright 2019-2020 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <glib.h>
 
 #include <crm/common/output_internal.h>
 #include <crm/common/curses_internal.h>
 #include <crm/pengine/pe_types.h>
 #include <crm/stonith-ng.h>
 
 /* Never display node attributes whose name starts with one of these prefixes */
 #define FILTER_STR { PCMK__FAIL_COUNT_PREFIX, PCMK__LAST_FAILURE_PREFIX,   \
                      "shutdown", "terminate", "standby", "probe_complete", \
                      "#", NULL }
 
 #if CURSES_ENABLED
 #  define print_dot(output_format) if (output_format == mon_output_console) { \
 	printw(".");				\
 	clrtoeol();				\
 	refresh();				\
     } else {					\
 	fprintf(stdout, ".");			\
     }
 #else
 #  define print_dot(output_format) fprintf(stdout, ".");
 #endif
 
 #if CURSES_ENABLED
 #  define print_as(output_format, fmt, args...) if (output_format == mon_output_console) { \
 	printw(fmt, ##args);				\
 	clrtoeol();					\
 	refresh();					\
     } else {						\
 	fprintf(stdout, fmt, ##args);			\
     }
 #else
 #  define print_as(output_format, fmt, args...) fprintf(stdout, fmt, ##args);
 #endif
 
 typedef enum mon_output_format_e {
     mon_output_unset,
     mon_output_none,
     mon_output_monitor,
     mon_output_plain,
     mon_output_console,
     mon_output_xml,
     mon_output_legacy_xml,
     mon_output_html,
     mon_output_cgi
 } mon_output_format_t;
 
 #define mon_show_stack          (1 << 0)
 #define mon_show_dc             (1 << 1)
 #define mon_show_times          (1 << 2)
 #define mon_show_counts         (1 << 3)
 #define mon_show_options        (1 << 4)
 #define mon_show_nodes          (1 << 5)
 #define mon_show_resources      (1 << 6)
 #define mon_show_attributes     (1 << 7)
 #define mon_show_failcounts     (1 << 8)
 #define mon_show_operations     (1 << 9)
 #define mon_show_fence_failed   (1 << 10)
 #define mon_show_fence_pending  (1 << 11)
 #define mon_show_fence_worked   (1 << 12)
 #define mon_show_tickets        (1 << 13)
 #define mon_show_bans           (1 << 14)
 #define mon_show_failures       (1 << 15)
 
 #define mon_show_fencing_all    (mon_show_fence_failed | mon_show_fence_pending | mon_show_fence_worked)
 #define mon_show_summary        (mon_show_stack | mon_show_dc | mon_show_times | mon_show_counts)
 #define mon_show_all            (mon_show_summary | mon_show_nodes | mon_show_resources | \
                                  mon_show_attributes | mon_show_failcounts | mon_show_operations | \
                                  mon_show_fencing_all | mon_show_tickets | mon_show_bans | \
                                  mon_show_failures | mon_show_options)
 
 #define mon_op_group_by_node        (0x0001U)
 #define mon_op_inactive_resources   (0x0002U)
 #define mon_op_one_shot             (0x0004U)
 #define mon_op_has_warnings         (0x0008U)
 #define mon_op_print_timing         (0x0010U)
 #define mon_op_watch_fencing        (0x0020U)
 #define mon_op_fence_history        (0x0040U)
 #define mon_op_fence_full_history   (0x0080U)
 #define mon_op_fence_connect        (0x0100U)
 #define mon_op_print_brief          (0x0200U)
 #define mon_op_print_pending        (0x0400U)
 #define mon_op_print_clone_detail   (0x0800U)
 
 #define mon_op_default              (mon_op_print_pending | mon_op_fence_history | mon_op_fence_connect)
 
 void print_status(pcmk__output_t *out, pe_working_set_t *data_set,
                   stonith_history_t *stonith_history, unsigned int mon_ops,
                   unsigned int show, char *prefix, char *only_node,
                   char *only_rsc);
 void print_xml_status(pcmk__output_t *out, pe_working_set_t *data_set,
                       crm_exit_t history_rc, stonith_history_t *stonith_history,
                       unsigned int mon_ops, unsigned int show, char *prefix,
                       char *only_node, char *only_rsc);
 int print_html_status(pcmk__output_t *out, pe_working_set_t *data_set,
                       stonith_history_t *stonith_history, unsigned int mon_ops,
                       unsigned int show, char *prefix, char *only_node,
                       char *only_rsc);
 
 GList *append_attr_list(GList *attr_list, char *name);
 void blank_screen(void);
-void crm_mon_get_parameters(pe_resource_t *rsc, pe_working_set_t *data_set);
 unsigned int get_resource_display_options(unsigned int mon_ops);
 
 void crm_mon_register_messages(pcmk__output_t *out);
 
 pcmk__output_t *crm_mon_mk_curses_output(char **argv);
 void curses_indented_printf(pcmk__output_t *out, const char *format, ...) G_GNUC_PRINTF(2, 3);
 void curses_indented_vprintf(pcmk__output_t *out, const char *format, va_list args) G_GNUC_PRINTF(2, 0);
 
 #if CURSES_ENABLED
 extern GOptionEntry crm_mon_curses_output_entries[];
 #define CRM_MON_SUPPORTED_FORMAT_CURSES { "console", crm_mon_mk_curses_output, crm_mon_curses_output_entries }
 #endif
 
 pcmk__output_t *crm_mon_mk_xml_output(char **argv);
 #define CRM_MON_SUPPORTED_FORMAT_XML { "xml", crm_mon_mk_xml_output, pcmk__xml_output_entries }
diff --git a/tools/crm_mon_print.c b/tools/crm_mon_print.c
index cc3efb022a..8ae11bfdf9 100644
--- a/tools/crm_mon_print.c
+++ b/tools/crm_mon_print.c
@@ -1,997 +1,997 @@
 /*
  * Copyright 2019-2020 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <glib.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <time.h>
 
 #ifndef PCMK__CONFIG_H
 #  define PCMK__CONFIG_H
 #  include <config.h>
 #endif
 
 #include <crm/cib/util.h>
 #include <crm/common/curses_internal.h>
 #include <crm/common/iso8601_internal.h>
 #include <crm/common/xml.h>
 #include <crm/msg_xml.h>
 #include <crm/pengine/internal.h>
 #include <crm/pengine/pe_types.h>
 #include <crm/stonith-ng.h>
 #include <crm/common/internal.h>
 #include <crm/common/xml_internal.h>
 #include <crm/common/util.h>
 #include <crm/fencing/internal.h>
 
 #include "crm_mon.h"
 
 static int print_rsc_history(pcmk__output_t *out, pe_working_set_t *data_set,
                              pe_node_t *node, xmlNode *rsc_entry, unsigned int mon_ops,
                              GListPtr op_list);
 static int print_node_history(pcmk__output_t *out, pe_working_set_t *data_set,
                               pe_node_t *node, xmlNode *node_state, gboolean operations,
                               unsigned int mon_ops, GListPtr only_node, GListPtr only_rsc);
-static gboolean add_extra_info(pcmk__output_t *out, pe_node_t * node, GListPtr rsc_list,
+static gboolean add_extra_info(pcmk__output_t *out, pe_node_t *node,
+                               GListPtr rsc_list, pe_working_set_t *data_set,
                                const char *attrname, int *expected_score);
 static void print_node_attribute(gpointer name, gpointer user_data);
 static int print_node_summary(pcmk__output_t *out, pe_working_set_t * data_set,
                               gboolean operations, unsigned int mon_ops,
                               GListPtr only_node, GListPtr only_rsc, gboolean print_spacer);
 static int print_cluster_tickets(pcmk__output_t *out, pe_working_set_t * data_set,
                                  gboolean print_spacer);
 static int print_neg_locations(pcmk__output_t *out, pe_working_set_t *data_set,
                                unsigned int mon_ops, const char *prefix,
                                GListPtr only_rsc, gboolean print_spacer);
 static int print_node_attributes(pcmk__output_t *out, pe_working_set_t *data_set,
                                  unsigned int mon_ops, GListPtr only_node,
                                  GListPtr only_rsc, gboolean print_spacer);
 static int print_failed_actions(pcmk__output_t *out, pe_working_set_t *data_set,
                                 GListPtr only_node, GListPtr only_rsc, gboolean print_spacer);
 
 static GListPtr
 build_uname_list(pe_working_set_t *data_set, const char *s) {
     GListPtr unames = NULL;
 
     if (pcmk__str_eq(s, "*", pcmk__str_null_matches)) {
         /* Nothing was given so return a list of all node names.  Or, '*' was
          * given.  This would normally fall into the pe__unames_with_tag branch
          * where it will return an empty list.  Catch it here instead.
          */
         unames = g_list_prepend(unames, strdup("*"));
     } else {
         pe_node_t *node = pe_find_node(data_set->nodes, s);
 
         if (node) {
             /* The given string was a valid uname for a node.  Return a
              * singleton list containing just that uname.
              */
             unames = g_list_prepend(unames, strdup(s));
         } else {
             /* The given string was not a valid uname.  It's either a tag or
              * it's a typo or something.  In the first case, we'll return a
              * list of all the unames of the nodes with the given tag.  In the
              * second case, we'll return a NULL pointer and nothing will
              * get displayed.
              */
             unames = pe__unames_with_tag(data_set, s);
         }
     }
 
     return unames;
 }
 
 static GListPtr
 build_rsc_list(pe_working_set_t *data_set, const char *s) {
     GListPtr resources = NULL;
 
     if (pcmk__str_eq(s, "*", pcmk__str_null_matches)) {
         resources = g_list_prepend(resources, strdup("*"));
     } else {
         pe_resource_t *rsc = pe_find_resource_with_flags(data_set->resources, s,
                                                          pe_find_renamed|pe_find_any);
 
         if (rsc) {
             /* A colon in the name we were given means we're being asked to filter
              * on a specific instance of a cloned resource.  Put that exact string
              * into the filter list.  Otherwise, use the printable ID of whatever
              * resource was found that matches what was asked for.
              */
             if (strstr(s, ":") != NULL) {
                 resources = g_list_prepend(resources, strdup(rsc->id));
             } else {
                 resources = g_list_prepend(resources, strdup(rsc_printable_id(rsc)));
             }
         } else {
             /* The given string was not a valid resource name.  It's either
              * a tag or it's a typo or something.  See build_uname_list for
              * more detail.
              */
             resources = pe__rscs_with_tag(data_set, s);
         }
     }
 
     return resources;
 }
 
 static int
 failure_count(pe_working_set_t *data_set, pe_node_t *node, pe_resource_t *rsc, time_t *last_failure) {
     return rsc ? pe_get_failcount(node, rsc, last_failure, pe_fc_default,
                                   NULL, data_set)
                : 0;
 }
 
 static GListPtr
 get_operation_list(xmlNode *rsc_entry) {
     GListPtr op_list = NULL;
     xmlNode *rsc_op = NULL;
 
     for (rsc_op = pcmk__xe_first_child(rsc_entry); rsc_op != NULL;
          rsc_op = pcmk__xe_next(rsc_op)) {
         const char *task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK);
         const char *interval_ms_s = crm_element_value(rsc_op,
                                                       XML_LRM_ATTR_INTERVAL_MS);
         const char *op_rc = crm_element_value(rsc_op, XML_LRM_ATTR_RC);
         int op_rc_i = crm_parse_int(op_rc, "0");
 
         /* Display 0-interval monitors as "probe" */
         if (pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_casei)
             && pcmk__str_eq(interval_ms_s, "0", pcmk__str_null_matches | pcmk__str_casei)) {
             task = "probe";
         }
 
         /* Ignore notifies and some probes */
         if (pcmk__str_eq(task, CRMD_ACTION_NOTIFY, pcmk__str_casei) || (pcmk__str_eq(task, "probe", pcmk__str_casei) && (op_rc_i == 7))) {
             continue;
         }
 
         if (pcmk__str_eq((const char *)rsc_op->name, XML_LRM_TAG_RSC_OP, pcmk__str_none)) {
             op_list = g_list_append(op_list, rsc_op);
         }
     }
 
     op_list = g_list_sort(op_list, sort_op_by_callid);
     return op_list;
 }
 
 /*!
  * \internal
  * \brief Print resource operation/failure history
  *
  * \param[in] out       The output functions structure.
  * \param[in] data_set  Cluster state to display.
  * \param[in] node      Node that ran this resource.
  * \param[in] rsc_entry Root of XML tree describing resource status.
  * \param[in] mon_ops   Bitmask of mon_op_*.
  * \param[in] op_list   A list of operations to print.
  */
 static int
 print_rsc_history(pcmk__output_t *out, pe_working_set_t *data_set, pe_node_t *node,
                   xmlNode *rsc_entry, unsigned int mon_ops, GListPtr op_list)
 {
     GListPtr gIter = NULL;
     int rc = pcmk_rc_no_output;
     const char *rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID);
     pe_resource_t *rsc = pe_find_resource(data_set->resources, rsc_id);
 
     /* Print each operation */
     for (gIter = op_list; gIter != NULL; gIter = gIter->next) {
         xmlNode *xml_op = (xmlNode *) gIter->data;
         const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
         const char *interval_ms_s = crm_element_value(xml_op,
                                                       XML_LRM_ATTR_INTERVAL_MS);
         const char *op_rc = crm_element_value(xml_op, XML_LRM_ATTR_RC);
         int op_rc_i = crm_parse_int(op_rc, "0");
 
         /* Display 0-interval monitors as "probe" */
         if (pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_casei)
             && pcmk__str_eq(interval_ms_s, "0", pcmk__str_null_matches | pcmk__str_casei)) {
             task = "probe";
         }
 
         /* If this is the first printed operation, print heading for resource */
         if (rc == pcmk_rc_no_output) {
             time_t last_failure = 0;
             int failcount = failure_count(data_set, node, rsc, &last_failure);
 
             out->message(out, "resource-history", rsc, rsc_id, TRUE, failcount, last_failure, TRUE);
             rc = pcmk_rc_ok;
         }
 
         /* Print the operation */
         out->message(out, "op-history", xml_op, task, interval_ms_s,
                      op_rc_i, pcmk_is_set(mon_ops, mon_op_print_timing));
     }
 
     /* Free the list we created (no need to free the individual items) */
     g_list_free(op_list);
 
     PCMK__OUTPUT_LIST_FOOTER(out, rc);
     return rc;
 }
 
 /*!
  * \internal
  * \brief Print node operation/failure history
  *
  * \param[in] out        The output functions structure.
  * \param[in] data_set   Cluster state to display.
  * \param[in] node_state Root of XML tree describing node status.
  * \param[in] operations Whether to print operations or just failcounts.
  * \param[in] mon_ops    Bitmask of mon_op_*.
  */
 static int
 print_node_history(pcmk__output_t *out, pe_working_set_t *data_set,
                    pe_node_t *node, xmlNode *node_state, gboolean operations,
                    unsigned int mon_ops, GListPtr only_node, GListPtr only_rsc)
 {
     xmlNode *lrm_rsc = NULL;
     xmlNode *rsc_entry = NULL;
     int rc = pcmk_rc_no_output;
 
     lrm_rsc = find_xml_node(node_state, XML_CIB_TAG_LRM, FALSE);
     lrm_rsc = find_xml_node(lrm_rsc, XML_LRM_TAG_RESOURCES, FALSE);
 
     /* Print history of each of the node's resources */
     for (rsc_entry = pcmk__xe_first_child(lrm_rsc); rsc_entry != NULL;
          rsc_entry = pcmk__xe_next(rsc_entry)) {
 
         const char *rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID);
         pe_resource_t *rsc = pe_find_resource(data_set->resources, rsc_id);
 
         if (!pcmk__str_eq((const char *)rsc_entry->name, XML_LRM_TAG_RESOURCE, pcmk__str_none)) {
             continue;
         }
 
         /* We can't use is_filtered here to filter group resources.  For is_filtered,
          * we have to decide whether to check the parent or not.  If we check the
          * parent, all elements of a group will always be printed because that's how
          * is_filtered works for groups.  If we do not check the parent, sometimes
          * this will filter everything out.
          *
          * For other resource types, is_filtered is okay.
          */
         if (uber_parent(rsc)->variant == pe_group) {
             if (!pcmk__str_in_list(only_rsc, rsc_printable_id(rsc)) &&
                 !pcmk__str_in_list(only_rsc, rsc_printable_id(uber_parent(rsc)))) {
                 continue;
             }
         } else {
             if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
                 continue;
             }
         }
 
         if (operations == FALSE) {
             time_t last_failure = 0;
             int failcount = failure_count(data_set, node, rsc, &last_failure);
 
             if (failcount <= 0) {
                 continue;
             }
 
             if (rc == pcmk_rc_no_output) {
                 rc = pcmk_rc_ok;
                 out->message(out, "node", node, get_resource_display_options(mon_ops),
                              FALSE, NULL,
                              pcmk_is_set(mon_ops, mon_op_print_clone_detail),
                              pcmk_is_set(mon_ops, mon_op_print_brief),
                              pcmk_is_set(mon_ops, mon_op_group_by_node),
                              only_node, only_rsc);
             }
 
             out->message(out, "resource-history", rsc, rsc_id, FALSE,
                          failcount, last_failure, FALSE);
         } else {
             GListPtr op_list = get_operation_list(rsc_entry);
 
             if (op_list == NULL) {
                 continue;
             }
 
             if (rc == pcmk_rc_no_output) {
                 rc = pcmk_rc_ok;
                 out->message(out, "node", node, get_resource_display_options(mon_ops),
                              FALSE, NULL,
                              pcmk_is_set(mon_ops, mon_op_print_clone_detail),
                              pcmk_is_set(mon_ops, mon_op_print_brief),
                              pcmk_is_set(mon_ops, mon_op_group_by_node),
                              only_node, only_rsc);
             }
 
             print_rsc_history(out, data_set, node, rsc_entry, mon_ops, op_list);
         }
     }
 
     PCMK__OUTPUT_LIST_FOOTER(out, rc);
     return rc;
 }
 
 /*!
  * \internal
  * \brief Determine whether extended information about an attribute should be added.
  *
  * \param[in]  out            The output functions structure.
  * \param[in]  node           Node that ran this resource.
  * \param[in]  rsc_list       The list of resources for this node.
  * \param[in]  attrname       The attribute to find.
  * \param[out] expected_score The expected value for this attribute.
  *
  * \return TRUE if extended information should be printed, FALSE otherwise
  * \note Currently, extended information is only supported for ping/pingd
  *       resources, for which a message will be printed if connectivity is lost
  *       or degraded.
  */
 static gboolean
 add_extra_info(pcmk__output_t *out, pe_node_t *node, GListPtr rsc_list,
-               const char *attrname, int *expected_score)
+               pe_working_set_t *data_set, const char *attrname,
+               int *expected_score)
 {
     GListPtr gIter = NULL;
 
     for (gIter = rsc_list; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *rsc = (pe_resource_t *) gIter->data;
         const char *type = g_hash_table_lookup(rsc->meta, "type");
         const char *name = NULL;
+        GHashTable *params = NULL;
 
         if (rsc->children != NULL) {
-            if (add_extra_info(out, node, rsc->children, attrname, expected_score)) {
+            if (add_extra_info(out, node, rsc->children, data_set, attrname,
+                               expected_score)) {
                 return TRUE;
             }
         }
 
         if (!pcmk__strcase_any_of(type, "ping", "pingd", NULL)) {
             continue;
         }
 
-        name = g_hash_table_lookup(rsc->parameters, "name");
+        params = pe_rsc_params(rsc, node, data_set);
+        name = g_hash_table_lookup(params, "name");
 
         if (name == NULL) {
             name = "pingd";
         }
 
         /* To identify the resource with the attribute name. */
         if (pcmk__str_eq(name, attrname, pcmk__str_casei)) {
             int host_list_num = 0;
             /* int value = crm_parse_int(attrvalue, "0"); */
-            const char *hosts = g_hash_table_lookup(rsc->parameters, "host_list");
-            const char *multiplier = g_hash_table_lookup(rsc->parameters, "multiplier");
+            const char *hosts = g_hash_table_lookup(params, "host_list");
+            const char *multiplier = g_hash_table_lookup(params, "multiplier");
 
             if (hosts) {
                 char **host_list = g_strsplit(hosts, " ", 0);
                 host_list_num = g_strv_length(host_list);
                 g_strfreev(host_list);
             }
 
             /* pingd multiplier is the same as the default value. */
             *expected_score = host_list_num * crm_parse_int(multiplier, "1");
 
             return TRUE;
         }
     }
     return FALSE;
 }
 
 /* structure for passing multiple user data to g_list_foreach() */
 struct mon_attr_data {
     pcmk__output_t *out;
     pe_node_t *node;
+    pe_working_set_t *data_set;
 };
 
 static void
 print_node_attribute(gpointer name, gpointer user_data)
 {
     const char *value = NULL;
     int expected_score = 0;
     gboolean add_extra = FALSE;
     struct mon_attr_data *data = (struct mon_attr_data *) user_data;
 
     value = pe_node_attribute_raw(data->node, name);
 
     add_extra = add_extra_info(data->out, data->node, data->node->details->running_rsc,
-                               name, &expected_score);
+                               data->data_set, name, &expected_score);
 
     /* Print attribute name and value */
     data->out->message(data->out, "node-attribute", name, value, add_extra,
                        expected_score);
 }
 
 /*!
  * \internal
  * \brief Print history for all nodes.
  *
  * \param[in] out        The output functions structure.
  * \param[in] data_set   Cluster state to display.
  * \param[in] operations Whether to print operations or just failcounts.
  * \param[in] mon_ops    Bitmask of mon_op_*.
  */
 static int
 print_node_summary(pcmk__output_t *out, pe_working_set_t * data_set,
                    gboolean operations, unsigned int mon_ops, GListPtr only_node,
                    GListPtr only_rsc, gboolean print_spacer)
 {
     xmlNode *node_state = NULL;
     xmlNode *cib_status = get_object_root(XML_CIB_TAG_STATUS, data_set->input);
     int rc = pcmk_rc_no_output;
 
     if (xmlChildElementCount(cib_status) == 0) {
         return rc;
     }
 
     /* Print each node in the CIB status */
     for (node_state = pcmk__xe_first_child(cib_status); node_state != NULL;
          node_state = pcmk__xe_next(node_state)) {
         pe_node_t *node;
 
         if (!pcmk__str_eq((const char *)node_state->name, XML_CIB_TAG_STATE, pcmk__str_none)) {
             continue;
         }
 
         node = pe_find_node_id(data_set->nodes, ID(node_state));
 
         if (!node || !node->details || !node->details->online) {
             continue;
         }
 
         if (!pcmk__str_in_list(only_node, node->details->uname)) {
             continue;
         }
 
         PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc, operations ? "Operations" : "Migration Summary");
 
         print_node_history(out, data_set, node, node_state, operations, mon_ops,
                            only_node, only_rsc);
     }
 
     PCMK__OUTPUT_LIST_FOOTER(out, rc);
     return rc;
 }
 
 /*!
  * \internal
  * \brief Print all tickets.
  *
  * \param[in] out      The output functions structure.
  * \param[in] data_set Cluster state to display.
  */
 static int
 print_cluster_tickets(pcmk__output_t *out, pe_working_set_t * data_set,
                       gboolean print_spacer)
 {
     GHashTableIter iter;
     gpointer key, value;
 
     if (g_hash_table_size(data_set->tickets) == 0) {
         return pcmk_rc_no_output;
     }
 
     PCMK__OUTPUT_SPACER_IF(out, print_spacer);
 
     /* Print section heading */
     out->begin_list(out, NULL, NULL, "Tickets");
 
     /* Print each ticket */
     g_hash_table_iter_init(&iter, data_set->tickets);
     while (g_hash_table_iter_next(&iter, &key, &value)) {
         pe_ticket_t *ticket = (pe_ticket_t *) value;
         out->message(out, "ticket", ticket);
     }
 
     /* Close section */
     out->end_list(out);
     return pcmk_rc_ok;
 }
 
 /*!
  * \internal
  * \brief Print section for negative location constraints
  *
  * \param[in] out      The output functions structure.
  * \param[in] data_set Cluster state to display.
  * \param[in] mon_ops  Bitmask of mon_op_*.
  * \param[in] prefix   ID prefix to filter results by.
  */
 static int
 print_neg_locations(pcmk__output_t *out, pe_working_set_t *data_set,
                     unsigned int mon_ops, const char *prefix, GListPtr only_rsc,
                     gboolean print_spacer)
 {
     GListPtr gIter, gIter2;
     int rc = pcmk_rc_no_output;
 
     /* Print each ban */
     for (gIter = data_set->placement_constraints; gIter != NULL; gIter = gIter->next) {
         pe__location_t *location = gIter->data;
 
         if (prefix != NULL && !g_str_has_prefix(location->id, prefix))
             continue;
 
         if (!pcmk__str_in_list(only_rsc, rsc_printable_id(location->rsc_lh)) &&
             !pcmk__str_in_list(only_rsc, rsc_printable_id(uber_parent(location->rsc_lh)))) {
             continue;
         }
 
         for (gIter2 = location->node_list_rh; gIter2 != NULL; gIter2 = gIter2->next) {
             pe_node_t *node = (pe_node_t *) gIter2->data;
 
             if (node->weight < 0) {
                 PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc, "Negative Location Constraints");
                 out->message(out, "ban", node, location,
                              pcmk_is_set(mon_ops, mon_op_print_clone_detail));
             }
         }
     }
 
     PCMK__OUTPUT_LIST_FOOTER(out, rc);
     return rc;
 }
 
 /*!
  * \internal
  * \brief Print node attributes section
  *
  * \param[in] out      The output functions structure.
  * \param[in] data_set Cluster state to display.
  * \param[in] mon_ops  Bitmask of mon_op_*.
  */
 static int
 print_node_attributes(pcmk__output_t *out, pe_working_set_t *data_set,
                       unsigned int mon_ops, GListPtr only_node,
                       GListPtr only_rsc, gboolean print_spacer)
 {
     GListPtr gIter = NULL;
     int rc = pcmk_rc_no_output;
 
-    /* Unpack all resource parameters (it would be more efficient to do this
-     * only when needed for the first time in add_extra_info())
-     */
-    for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
-        crm_mon_get_parameters(gIter->data, data_set);
-    }
-
     /* Display each node's attributes */
     for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
         struct mon_attr_data data;
 
         data.out = out;
         data.node = (pe_node_t *) gIter->data;
+        data.data_set = data_set;
 
         if (data.node && data.node->details && data.node->details->online) {
             GList *attr_list = NULL;
             GHashTableIter iter;
             gpointer key, value;
 
             g_hash_table_iter_init(&iter, data.node->details->attrs);
             while (g_hash_table_iter_next (&iter, &key, &value)) {
                 attr_list = append_attr_list(attr_list, key);
             }
 
             if (attr_list == NULL) {
                 continue;
             }
 
             if (!pcmk__str_in_list(only_node, data.node->details->uname)) {
                 continue;
             }
 
             PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc, "Node Attributes");
 
             out->message(out, "node", data.node, get_resource_display_options(mon_ops),
                          FALSE, NULL,
                          pcmk_is_set(mon_ops, mon_op_print_clone_detail),
                          pcmk_is_set(mon_ops, mon_op_print_brief),
                          pcmk_is_set(mon_ops, mon_op_group_by_node),
                          only_node, only_rsc);
             g_list_foreach(attr_list, print_node_attribute, &data);
             g_list_free(attr_list);
             out->end_list(out);
         }
     }
 
     PCMK__OUTPUT_LIST_FOOTER(out, rc);
     return rc;
 }
 
 /*!
  * \internal
  * \brief Print a section for failed actions
  *
  * \param[in] out      The output functions structure.
  * \param[in] data_set Cluster state to display.
  */
 static int
 print_failed_actions(pcmk__output_t *out, pe_working_set_t *data_set,
                      GListPtr only_node, GListPtr only_rsc, gboolean print_spacer)
 {
     xmlNode *xml_op = NULL;
     int rc = pcmk_rc_no_output;
 
     const char *id = NULL;
 
     if (xmlChildElementCount(data_set->failed) == 0) {
         return rc;
     }
 
     for (xml_op = pcmk__xml_first_child(data_set->failed); xml_op != NULL;
          xml_op = pcmk__xml_next(xml_op)) {
         char *rsc = NULL;
 
         if (!pcmk__str_in_list(only_node, crm_element_value(xml_op, XML_ATTR_UNAME))) {
             continue;
         }
 
         id = crm_element_value(xml_op, XML_LRM_ATTR_TASK_KEY);
         if (parse_op_key(id ? id : ID(xml_op), &rsc, NULL, NULL) == FALSE) {
             continue;
         }
 
         if (!pcmk__str_in_list(only_rsc, rsc)) {
             free(rsc);
             continue;
         }
 
         free(rsc);
 
         PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc, "Failed Resource Actions");
         out->message(out, "failed-action", xml_op);
     }
 
     PCMK__OUTPUT_LIST_FOOTER(out, rc);
     return rc;
 }
 
 #define CHECK_RC(retcode, retval)   \
     if (retval == pcmk_rc_ok) {     \
         retcode = pcmk_rc_ok;       \
     }
 
 /*!
  * \internal
  * \brief Top-level printing function for text/curses output.
  *
  * \param[in] out             The output functions structure.
  * \param[in] data_set        Cluster state to display.
  * \param[in] stonith_history List of stonith actions.
  * \param[in] mon_ops         Bitmask of mon_op_*.
  * \param[in] show            Bitmask of mon_show_*.
  * \param[in] prefix          ID prefix to filter results by.
  */
 void
 print_status(pcmk__output_t *out, pe_working_set_t *data_set,
              stonith_history_t *stonith_history, unsigned int mon_ops,
              unsigned int show, char *prefix, char *only_node, char *only_rsc)
 {
     GListPtr unames = NULL;
     GListPtr resources = NULL;
 
     unsigned int print_opts = get_resource_display_options(mon_ops);
     int rc = pcmk_rc_no_output;
 
     CHECK_RC(rc, out->message(out, "cluster-summary", data_set,
                               pcmk_is_set(mon_ops, mon_op_print_clone_detail),
                               pcmk_is_set(show, mon_show_stack),
                               pcmk_is_set(show, mon_show_dc),
                               pcmk_is_set(show, mon_show_times),
                               pcmk_is_set(show, mon_show_counts),
                               pcmk_is_set(show, mon_show_options)));
 
     unames = build_uname_list(data_set, only_node);
     resources = build_rsc_list(data_set, only_rsc);
 
     if (pcmk_is_set(show, mon_show_nodes) && unames) {
         PCMK__OUTPUT_SPACER_IF(out, rc == pcmk_rc_ok);
         CHECK_RC(rc, out->message(out, "node-list", data_set->nodes, unames,
                                   resources, print_opts,
                                   pcmk_is_set(mon_ops, mon_op_print_clone_detail),
                                   pcmk_is_set(mon_ops, mon_op_print_brief),
                                   pcmk_is_set(mon_ops, mon_op_group_by_node)));
     }
 
     /* Print resources section, if needed */
     if (pcmk_is_set(show, mon_show_resources)) {
         CHECK_RC(rc, out->message(out, "resource-list", data_set, print_opts,
                                   pcmk_is_set(mon_ops, mon_op_group_by_node),
                                   pcmk_is_set(mon_ops, mon_op_inactive_resources),
                                   pcmk_is_set(mon_ops, mon_op_print_brief), TRUE, unames,
                                   resources, rc == pcmk_rc_ok));
     }
 
     /* print Node Attributes section if requested */
     if (pcmk_is_set(show, mon_show_attributes)) {
         CHECK_RC(rc, print_node_attributes(out, data_set, mon_ops, unames, resources,
                                            rc == pcmk_rc_ok));
     }
 
     /* If requested, print resource operations (which includes failcounts)
      * or just failcounts
      */
     if (pcmk_is_set(show, mon_show_operations)
         || pcmk_is_set(show, mon_show_failcounts)) {
 
         CHECK_RC(rc, print_node_summary(out, data_set,
                                         pcmk_is_set(show, mon_show_operations),
                                         mon_ops, unames, resources,
                                         (rc == pcmk_rc_ok)));
     }
 
     /* If there were any failed actions, print them */
     if (pcmk_is_set(show, mon_show_failures)
         && xml_has_children(data_set->failed)) {
 
         CHECK_RC(rc, print_failed_actions(out, data_set, unames, resources,
                                           rc == pcmk_rc_ok));
     }
 
     /* Print failed stonith actions */
     if (pcmk_is_set(show, mon_show_fence_failed)
         && pcmk_is_set(mon_ops, mon_op_fence_history)) {
 
         stonith_history_t *hp = stonith__first_matching_event(stonith_history, stonith__event_state_eq,
                                                               GINT_TO_POINTER(st_failed));
 
         if (hp) {
             CHECK_RC(rc, out->message(out, "failed-fencing-list", stonith_history, unames,
                                       pcmk_is_set(mon_ops, mon_op_fence_full_history),
                                       rc == pcmk_rc_ok));
         }
     }
 
     /* Print tickets if requested */
     if (pcmk_is_set(show, mon_show_tickets)) {
         CHECK_RC(rc, print_cluster_tickets(out, data_set, rc == pcmk_rc_ok));
     }
 
     /* Print negative location constraints if requested */
     if (pcmk_is_set(show, mon_show_bans)) {
         CHECK_RC(rc, print_neg_locations(out, data_set, mon_ops, prefix, resources,
                                          rc == pcmk_rc_ok));
     }
 
     /* Print stonith history */
     if (pcmk_is_set(mon_ops, mon_op_fence_history)) {
         if (pcmk_is_set(show, mon_show_fence_worked)) {
             stonith_history_t *hp = stonith__first_matching_event(stonith_history, stonith__event_state_neq,
                                                                   GINT_TO_POINTER(st_failed));
 
             if (hp) {
                 CHECK_RC(rc, out->message(out, "fencing-list", hp, unames,
                                           pcmk_is_set(mon_ops, mon_op_fence_full_history),
                                           rc == pcmk_rc_ok));
             }
         } else if (pcmk_is_set(show, mon_show_fence_pending)) {
             stonith_history_t *hp = stonith__first_matching_event(stonith_history, stonith__event_state_pending, NULL);
 
             if (hp) {
                 CHECK_RC(rc, out->message(out, "pending-fencing-list", hp, unames,
                                           pcmk_is_set(mon_ops, mon_op_fence_full_history),
                                           rc == pcmk_rc_ok));
             }
         }
     }
 
     g_list_free_full(unames, free);
 }
 
 /*!
  * \internal
  * \brief Top-level printing function for XML output.
  *
  * \param[in] out             The output functions structure.
  * \param[in] data_set        Cluster state to display.
  * \param[in] stonith_history List of stonith actions.
  * \param[in] mon_ops         Bitmask of mon_op_*.
  * \param[in] show            Bitmask of mon_show_*.
  * \param[in] prefix          ID prefix to filter results by.
  */
 void
 print_xml_status(pcmk__output_t *out, pe_working_set_t *data_set,
                  crm_exit_t history_rc, stonith_history_t *stonith_history,
                  unsigned int mon_ops, unsigned int show, char *prefix,
                  char *only_node, char *only_rsc)
 {
     GListPtr unames = NULL;
     GListPtr resources = NULL;
     unsigned int print_opts = get_resource_display_options(mon_ops);
 
     out->message(out, "cluster-summary", data_set,
                  pcmk_is_set(mon_ops, mon_op_print_clone_detail),
                  pcmk_is_set(show, mon_show_stack),
                  pcmk_is_set(show, mon_show_dc),
                  pcmk_is_set(show, mon_show_times),
                  pcmk_is_set(show, mon_show_counts),
                  pcmk_is_set(show, mon_show_options));
 
     unames = build_uname_list(data_set, only_node);
     resources = build_rsc_list(data_set, only_rsc);
 
     /*** NODES ***/
     if (pcmk_is_set(show, mon_show_nodes)) {
         out->message(out, "node-list", data_set->nodes, unames,
                      resources, print_opts,
                      pcmk_is_set(mon_ops, mon_op_print_clone_detail),
                      pcmk_is_set(mon_ops, mon_op_print_brief),
                      pcmk_is_set(mon_ops, mon_op_group_by_node));
     }
 
     /* Print resources section, if needed */
     if (pcmk_is_set(show, mon_show_resources)) {
         out->message(out, "resource-list", data_set, print_opts,
                      pcmk_is_set(mon_ops, mon_op_group_by_node),
                      pcmk_is_set(mon_ops, mon_op_inactive_resources),
                      FALSE, FALSE, unames, resources, FALSE);
     }
 
     /* print Node Attributes section if requested */
     if (pcmk_is_set(show, mon_show_attributes)) {
         print_node_attributes(out, data_set, mon_ops, unames, resources, FALSE);
     }
 
     /* If requested, print resource operations (which includes failcounts)
      * or just failcounts
      */
     if (pcmk_is_set(show, mon_show_operations)
         || pcmk_is_set(show, mon_show_failcounts)) {
 
         print_node_summary(out, data_set,
                            pcmk_is_set(show, mon_show_operations),
                            mon_ops, unames, resources, FALSE);
     }
 
     /* If there were any failed actions, print them */
     if (pcmk_is_set(show, mon_show_failures)
         && xml_has_children(data_set->failed)) {
 
         print_failed_actions(out, data_set, unames, resources, FALSE);
     }
 
     /* Print stonith history */
     if (pcmk_is_set(show, mon_show_fencing_all)
         && pcmk_is_set(mon_ops, mon_op_fence_history)) {
 
         out->message(out, "full-fencing-list", history_rc, stonith_history,
                      unames, pcmk_is_set(mon_ops, mon_op_fence_full_history),
                      FALSE);
     }
 
     /* Print tickets if requested */
     if (pcmk_is_set(show, mon_show_tickets)) {
         print_cluster_tickets(out, data_set, FALSE);
     }
 
     /* Print negative location constraints if requested */
     if (pcmk_is_set(show, mon_show_bans)) {
         print_neg_locations(out, data_set, mon_ops, prefix, resources, FALSE);
     }
 
     g_list_free_full(unames, free);
     g_list_free_full(resources, free);
 }
 
 /*!
  * \internal
  * \brief Top-level printing function for HTML output.
  *
  * \param[in] out             The output functions structure.
  * \param[in] data_set        Cluster state to display.
  * \param[in] stonith_history List of stonith actions.
  * \param[in] mon_ops         Bitmask of mon_op_*.
  * \param[in] show            Bitmask of mon_show_*.
  * \param[in] prefix          ID prefix to filter results by.
  */
 int
 print_html_status(pcmk__output_t *out, pe_working_set_t *data_set,
                   stonith_history_t *stonith_history, unsigned int mon_ops,
                   unsigned int show, char *prefix, char *only_node,
                   char *only_rsc)
 {
     GListPtr unames = NULL;
     GListPtr resources = NULL;
 
     unsigned int print_opts = get_resource_display_options(mon_ops);
 
     out->message(out, "cluster-summary", data_set,
                  pcmk_is_set(mon_ops, mon_op_print_clone_detail),
                  pcmk_is_set(show, mon_show_stack),
                  pcmk_is_set(show, mon_show_dc),
                  pcmk_is_set(show, mon_show_times),
                  pcmk_is_set(show, mon_show_counts),
                  pcmk_is_set(show, mon_show_options));
 
     unames = build_uname_list(data_set, only_node);
     resources = build_rsc_list(data_set, only_rsc);
 
     /*** NODE LIST ***/
     if (pcmk_is_set(show, mon_show_nodes) && unames) {
         out->message(out, "node-list", data_set->nodes, unames,
                      resources, print_opts,
                      pcmk_is_set(mon_ops, mon_op_print_clone_detail),
                      pcmk_is_set(mon_ops, mon_op_print_brief),
                      pcmk_is_set(mon_ops, mon_op_group_by_node));
     }
 
     /* Print resources section, if needed */
     if (pcmk_is_set(show, mon_show_resources)) {
         out->message(out, "resource-list", data_set, print_opts,
                      pcmk_is_set(mon_ops, mon_op_group_by_node),
                      pcmk_is_set(mon_ops, mon_op_inactive_resources),
                      pcmk_is_set(mon_ops, mon_op_print_brief), TRUE, unames,
                      resources, FALSE);
     }
 
     /* print Node Attributes section if requested */
     if (pcmk_is_set(show, mon_show_attributes)) {
         print_node_attributes(out, data_set, mon_ops, unames, resources, FALSE);
     }
 
     /* If requested, print resource operations (which includes failcounts)
      * or just failcounts
      */
     if (pcmk_is_set(show, mon_show_operations)
         || pcmk_is_set(show, mon_show_failcounts)) {
 
         print_node_summary(out, data_set,
                            pcmk_is_set(show, mon_show_operations),
                            mon_ops, unames, resources, FALSE);
     }
 
     /* If there were any failed actions, print them */
     if (pcmk_is_set(show, mon_show_failures)
         && xml_has_children(data_set->failed)) {
 
         print_failed_actions(out, data_set, unames, resources, FALSE);
     }
 
     /* Print failed stonith actions */
     if (pcmk_is_set(show, mon_show_fence_failed)
         && pcmk_is_set(mon_ops, mon_op_fence_history)) {
 
         stonith_history_t *hp = stonith__first_matching_event(stonith_history, stonith__event_state_eq,
                                                               GINT_TO_POINTER(st_failed));
 
         if (hp) {
             out->message(out, "failed-fencing-list", stonith_history, unames,
                          pcmk_is_set(mon_ops, mon_op_fence_full_history), FALSE);
         }
     }
 
     /* Print stonith history */
     if (pcmk_is_set(mon_ops, mon_op_fence_history)) {
         if (pcmk_is_set(show, mon_show_fence_worked)) {
             stonith_history_t *hp = stonith__first_matching_event(stonith_history, stonith__event_state_neq,
                                                                   GINT_TO_POINTER(st_failed));
 
             if (hp) {
                 out->message(out, "fencing-list", hp, unames,
                              pcmk_is_set(mon_ops, mon_op_fence_full_history),
                              FALSE);
             }
         } else if (pcmk_is_set(show, mon_show_fence_pending)) {
             stonith_history_t *hp = stonith__first_matching_event(stonith_history, stonith__event_state_pending, NULL);
 
             if (hp) {
                 out->message(out, "pending-fencing-list", hp, unames,
                              pcmk_is_set(mon_ops, mon_op_fence_full_history),
                              FALSE);
             }
         }
     }
 
     /* Print tickets if requested */
     if (pcmk_is_set(show, mon_show_tickets)) {
         print_cluster_tickets(out, data_set, FALSE);
     }
 
     /* Print negative location constraints if requested */
     if (pcmk_is_set(show, mon_show_bans)) {
         print_neg_locations(out, data_set, mon_ops, prefix, resources, FALSE);
     }
 
     g_list_free_full(unames, free);
     g_list_free_full(resources, free);
     return 0;
 }
diff --git a/tools/crm_mon_runtime.c b/tools/crm_mon_runtime.c
index ce31559a5d..43152ce915 100644
--- a/tools/crm_mon_runtime.c
+++ b/tools/crm_mon_runtime.c
@@ -1,106 +1,93 @@
 /*
- * Copyright 2019 the Pacemaker project contributors
+ * Copyright 2019-2020 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <glib.h>
 #include <stdio.h>
 #include <stdlib.h>
 
 #include <crm/crm.h>
 #include <crm/msg_xml.h>
 #include <crm/common/curses_internal.h>
 #include <crm/pengine/common.h>
 #include <crm/pengine/internal.h>
 #include <crm/pengine/pe_types.h>
 #include <crm/stonith-ng.h>
 #include <crm/common/internal.h>
 #include <crm/common/util.h>
 
 #include "crm_mon.h"
 
 void
 blank_screen(void)
 {
 #if CURSES_ENABLED
     int lpc = 0;
 
     for (lpc = 0; lpc < LINES; lpc++) {
         move(lpc, 0);
         clrtoeol();
     }
     move(0, 0);
     refresh();
 #endif
 }
 
 static int
 compare_attribute(gconstpointer a, gconstpointer b)
 {
     int rc;
 
     rc = strcmp((const char *)a, (const char *)b);
 
     return rc;
 }
 
 GList *
 append_attr_list(GList *attr_list, char *name)
 {
     int i;
     const char *filt_str[] = FILTER_STR;
 
     CRM_CHECK(name != NULL, return attr_list);
 
     /* filtering automatic attributes */
     for (i = 0; filt_str[i] != NULL; i++) {
         if (g_str_has_prefix(name, filt_str[i])) {
             return attr_list;
         }
     }
 
     return g_list_insert_sorted(attr_list, name, compare_attribute);
 }
 
-void
-crm_mon_get_parameters(pe_resource_t *rsc, pe_working_set_t * data_set)
-{
-    get_rsc_attributes(rsc->parameters, rsc, NULL, data_set);
-    if(rsc->children) {
-        GListPtr gIter = NULL;
-
-        for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
-            crm_mon_get_parameters(gIter->data, data_set);
-        }
-    }
-}
-
 /*!
  * \internal
  * \brief Return resource display options corresponding to command-line choices
  *
  * \return Bitmask of pe_print_options suitable for resource print functions
  */
 unsigned int
 get_resource_display_options(unsigned int mon_ops)
 {
     int print_opts = 0;
 
     if (pcmk_is_set(mon_ops, mon_op_print_pending)) {
         print_opts |= pe_print_pending;
     }
     if (pcmk_is_set(mon_ops, mon_op_print_clone_detail)) {
         print_opts |= pe_print_clone_details|pe_print_implicit;
     }
     if (!pcmk_is_set(mon_ops, mon_op_inactive_resources)) {
         print_opts |= pe_print_clone_active;
     }
     if (pcmk_is_set(mon_ops, mon_op_print_brief)) {
         print_opts |= pe_print_brief;
     }
     return print_opts;
 }
diff --git a/tools/crm_resource.c b/tools/crm_resource.c
index b028c40c48..9663e68b18 100644
--- a/tools/crm_resource.c
+++ b/tools/crm_resource.c
@@ -1,2045 +1,2049 @@
 /*
  * Copyright 2004-2020 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_resource.h>
 #include <crm/lrmd_internal.h>
 #include <crm/common/cmdline_internal.h>
 #include <crm/common/lists_internal.h>
 #include <pacemaker-internal.h>
 
 #include <sys/param.h>
 #include <stdio.h>
 #include <sys/types.h>
 #include <unistd.h>
 #include <stdlib.h>
 #include <errno.h>
 #include <fcntl.h>
 #include <libgen.h>
 #include <time.h>
 
 #include <crm/crm.h>
 #include <crm/stonith-ng.h>
 #include <crm/common/ipc_controld.h>
 #include <crm/cib/internal.h>
 
 #define SUMMARY "crm_resource - perform tasks related to Pacemaker cluster resources"
 
 enum rsc_command {
     cmd_none = 0,           // No command option given (yet)
     cmd_ban,
     cmd_cleanup,
     cmd_clear,
     cmd_colocations,
     cmd_colocations_deep,
     cmd_cts,
     cmd_delete,
     cmd_delete_param,
     cmd_execute_agent,
     cmd_fail,
     cmd_get_param,
     cmd_get_property,
     cmd_list_active_ops,
     cmd_list_agents,
     cmd_list_all_ops,
     cmd_list_alternatives,
     cmd_list_instances,
     cmd_list_providers,
     cmd_list_resources,
     cmd_list_standards,
     cmd_locate,
     cmd_metadata,
     cmd_move,
     cmd_query_raw_xml,
     cmd_query_xml,
     cmd_refresh,
     cmd_restart,
     cmd_set_param,
     cmd_set_property,
     cmd_wait,
     cmd_why,
 };
 
 struct {
     enum rsc_command rsc_cmd;   // The crm_resource command to perform
     const char *attr_set_type;
     int cib_options;
     gboolean clear_expired;
     int find_flags;             /* Flags to use when searching for resource */
     gboolean force;
     gchar *host_uname;
     gchar *interval_spec;
     gchar *move_lifetime;
     gchar *operation;
     GHashTable *override_params;
     gchar *prop_id;
     char *prop_name;
     gchar *prop_set;
     gchar *prop_value;
     gboolean recursive;
     gchar **remainder;
     gboolean require_cib;           // Whether command requires CIB connection
     gboolean require_crmd;          /* whether command requires controller connection */
     gboolean require_dataset;       /* whether command requires populated dataset instance */
     gboolean require_resource;      /* whether command requires that resource be specified */
     int resource_verbose;
     gchar *rsc_id;
     gchar *rsc_type;
     gboolean promoted_role_only;
     int timeout_ms;
     char *agent_spec;               // Standard and/or provider and/or agent
     char *v_agent;
     char *v_class;
     char *v_provider;
     gboolean validate_cmdline;      /* whether we are just validating based on command line options */
     GHashTable *validate_options;
     gchar *xml_file;
 } options = {
     .attr_set_type = XML_TAG_ATTR_SETS,
     .cib_options = cib_sync_call,
     .require_cib = TRUE,
     .require_dataset = TRUE,
     .require_resource = TRUE,
 };
 
 #if 0
 // @COMPAT @TODO enable this at next backward compatibility break
 #define SET_COMMAND(cmd) do {                                               \
         if (options.rsc_cmd != cmd_none) {                                  \
             g_set_error(error, PCMK__EXITC_ERROR, CRM_EX_USAGE,             \
                         "Only one command option may be specified");        \
             return FALSE;                                                   \
         }                                                                   \
         options.rsc_cmd = (cmd);                                            \
     } while (0)
 #else
 #define SET_COMMAND(cmd) do { options.rsc_cmd = (cmd); } while (0)
 #endif
 
 gboolean agent_provider_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
 gboolean attr_set_type_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
 gboolean class_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
 gboolean cleanup_refresh_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
 gboolean delete_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
 gboolean expired_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
 gboolean list_agents_cb(const gchar *option_name, const gchar *optarg,
                         gpointer data, GError **error);
 gboolean list_providers_cb(const gchar *option_name, const gchar *optarg,
                            gpointer data, GError **error);
 gboolean list_standards_cb(const gchar *option_name, const gchar *optarg,
                            gpointer data, GError **error);
 gboolean list_alternatives_cb(const gchar *option_name, const gchar *optarg,
                               gpointer data, GError **error);
 gboolean metadata_cb(const gchar *option_name, const gchar *optarg,
                      gpointer data, GError **error);
 gboolean option_cb(const gchar *option_name, const gchar *optarg,
                    gpointer data, GError **error);
 gboolean fail_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
 gboolean flag_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
 gboolean get_param_prop_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
 gboolean list_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
 gboolean set_delete_param_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
 gboolean set_prop_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
 gboolean timeout_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
 gboolean validate_or_force_cb(const gchar *option_name, const gchar *optarg,
                               gpointer data, GError **error);
 gboolean restart_cb(const gchar *option_name, const gchar *optarg,
                     gpointer data, GError **error);
 gboolean wait_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
 gboolean why_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
 
 static crm_exit_t exit_code = CRM_EX_OK;
 static pcmk__output_t *out = NULL;
 
 // Things that should be cleaned up on exit
 static GError *error = NULL;
 static GMainLoop *mainloop = NULL;
 static cib_t *cib_conn = NULL;
 static pcmk_ipc_api_t *controld_api = NULL;
 static pe_working_set_t *data_set = NULL;
 
 #define MESSAGE_TIMEOUT_S 60
 
 #define INDENT "                                    "
 
 static pcmk__supported_format_t formats[] = {
     PCMK__SUPPORTED_FORMAT_NONE,
     PCMK__SUPPORTED_FORMAT_TEXT,
     PCMK__SUPPORTED_FORMAT_XML,
     { NULL, NULL, NULL }
 };
 
 // Clean up and exit
 static crm_exit_t
 bye(crm_exit_t ec)
 {
     if (error != NULL) {
         if (out != NULL) {
             out->err(out, "%s: %s", g_get_prgname(), error->message);
         } else {
             fprintf(stderr, "%s: %s\n", g_get_prgname(), error->message);
         }
 
         g_clear_error(&error);
     }
 
     if (out != NULL) {
         out->finish(out, ec, true, NULL);
         pcmk__output_free(out);
     }
 
     if (cib_conn != NULL) {
         cib_t *save_cib_conn = cib_conn;
 
         cib_conn = NULL; // Ensure we can't free this twice
         save_cib_conn->cmds->signoff(save_cib_conn);
         cib_delete(save_cib_conn);
     }
     if (controld_api != NULL) {
         pcmk_ipc_api_t *save_controld_api = controld_api;
 
         controld_api = NULL; // Ensure we can't free this twice
         pcmk_free_ipc_api(save_controld_api);
     }
     if (mainloop != NULL) {
         g_main_loop_unref(mainloop);
         mainloop = NULL;
     }
     pe_free_working_set(data_set);
     data_set = NULL;
     crm_exit(ec);
     return ec;
 }
 
 static void
 quit_main_loop(crm_exit_t ec)
 {
     exit_code = ec;
     if (mainloop != NULL) {
         GMainLoop *mloop = mainloop;
 
         mainloop = NULL; // Don't re-enter this block
         pcmk_quit_main_loop(mloop, 10);
         g_main_loop_unref(mloop);
     }
 }
 
 static gboolean
 resource_ipc_timeout(gpointer data)
 {
     // Start with newline because "Waiting for ..." message doesn't have one
     if (error != NULL) {
         g_clear_error(&error);
     }
 
     g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_TIMEOUT,
                 "Aborting because no messages received in %d seconds", MESSAGE_TIMEOUT_S);
 
     quit_main_loop(CRM_EX_TIMEOUT);
     return FALSE;
 }
 
 static void
 controller_event_callback(pcmk_ipc_api_t *api, enum pcmk_ipc_event event_type,
                           crm_exit_t status, void *event_data, void *user_data)
 {
     switch (event_type) {
         case pcmk_ipc_event_disconnect:
             if (exit_code == CRM_EX_DISCONNECT) { // Unexpected
                 crm_info("Connection to controller was terminated");
             }
             quit_main_loop(exit_code);
             break;
 
         case pcmk_ipc_event_reply:
             if (status != CRM_EX_OK) {
                 out->err(out, "Error: bad reply from controller: %s",
                          crm_exit_str(status));
                 pcmk_disconnect_ipc(api);
                 quit_main_loop(status);
             } else {
                 if ((pcmk_controld_api_replies_expected(api) == 0)
                     && mainloop && g_main_loop_is_running(mainloop)) {
                     out->info(out, "... got reply (done)");
                     crm_debug("Got all the replies we expected");
                     pcmk_disconnect_ipc(api);
                     quit_main_loop(CRM_EX_OK);
                 } else {
                     out->info(out, "... got reply");
                 }
             }
             break;
 
         default:
             break;
     }
 }
 
 static void
 start_mainloop(pcmk_ipc_api_t *capi)
 {
     unsigned int count = pcmk_controld_api_replies_expected(capi);
 
     if (count > 0) {
         out->info(out, "Waiting for %d %s from the controller",
                   count, pcmk__plural_alt(count, "reply", "replies"));
         exit_code = CRM_EX_DISCONNECT; // For unexpected disconnects
         mainloop = g_main_loop_new(NULL, FALSE);
         g_timeout_add(MESSAGE_TIMEOUT_S * 1000, resource_ipc_timeout, NULL);
         g_main_loop_run(mainloop);
     }
 }
 
 static int
 compare_id(gconstpointer a, gconstpointer b)
 {
     return strcmp((const char *)a, (const char *)b);
 }
 
 static GListPtr
 build_constraint_list(xmlNode *root)
 {
     GListPtr retval = NULL;
     xmlNode *cib_constraints = NULL;
     xmlXPathObjectPtr xpathObj = NULL;
     int ndx = 0;
 
     cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, root);
     xpathObj = xpath_search(cib_constraints, "//" XML_CONS_TAG_RSC_LOCATION);
 
     for (ndx = 0; ndx < numXpathResults(xpathObj); ndx++) {
         xmlNode *match = getXpathResult(xpathObj, ndx);
         retval = g_list_insert_sorted(retval, (gpointer) ID(match), compare_id);
     }
 
     freeXpathObject(xpathObj);
     return retval;
 }
 
 /* short option letters still available: eEJkKXyYZ */
 
 static GOptionEntry query_entries[] = {
     { "list", 'L', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, list_cb,
       "List all cluster resources with status",
       NULL },
     { "list-raw", 'l', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, list_cb,
       "List IDs of all instantiated resources (individual members\n"
       INDENT "rather than groups etc.)",
       NULL },
     { "list-cts", 'c', G_OPTION_FLAG_HIDDEN|G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, list_cb,
       NULL,
       NULL },
     { "list-operations", 'O', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, list_cb,
       "List active resource operations, optionally filtered by\n"
       INDENT "--resource and/or --node",
       NULL },
     { "list-all-operations", 'o', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, list_cb,
       "List all resource operations, optionally filtered by\n"
       INDENT "--resource and/or --node",
       NULL },
     { "list-standards", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
       list_standards_cb,
       "List supported standards",
       NULL },
     { "list-ocf-providers", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
       list_providers_cb,
       "List all available OCF providers",
       NULL },
     { "list-agents", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK,
       list_agents_cb,
       "List all agents available for the named standard and/or provider",
       "STD/PROV" },
     { "list-ocf-alternatives", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK,
       list_alternatives_cb,
       "List all available providers for the named OCF agent",
       "AGENT" },
     { "show-metadata", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK,
       metadata_cb,
       "Show the metadata for the named class:provider:agent",
       "SPEC" },
     { "query-xml", 'q', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb,
       "Show XML configuration of resource (after any template expansion)",
       NULL },
     { "query-xml-raw", 'w', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb,
       "Show XML configuration of resource (before any template expansion)",
       NULL },
     { "get-parameter", 'g', G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, get_param_prop_cb,
       "Display named parameter for resource (use instance attribute\n"
       INDENT "unless --meta or --utilization is specified)",
       "PARAM" },
     { "get-property", 'G', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_CALLBACK, get_param_prop_cb,
       "Display named property of resource ('class', 'type', or 'provider') "
       "(requires --resource)",
       "PROPERTY" },
     { "locate", 'W', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb,
       "Show node(s) currently running resource",
       NULL },
     { "stack", 'A', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb,
       "Display the (co)location constraints that apply to a resource\n"
       INDENT "and the resources is it colocated with",
       NULL },
     { "constraints", 'a', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb,
       "Display the (co)location constraints that apply to a resource",
       NULL },
     { "why", 'Y', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, why_cb,
       "Show why resources are not running, optionally filtered by\n"
       INDENT "--resource and/or --node",
       NULL },
 
     { NULL }
 };
 
 static GOptionEntry command_entries[] = {
     { "validate", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
       validate_or_force_cb,
       "Validate resource configuration by calling agent's validate-all\n"
       INDENT "action. The configuration may be specified either by giving an\n"
       INDENT "existing resource name with -r, or by specifying --class,\n"
       INDENT "--agent, and --provider arguments, along with any number of\n"
       INDENT "--option arguments.",
       NULL },
     { "cleanup", 'C', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, cleanup_refresh_cb,
       "If resource has any past failures, clear its history and fail\n"
       INDENT "count. Optionally filtered by --resource, --node, --operation\n"
       INDENT "and --interval (otherwise all). --operation and --interval\n"
       INDENT "apply to fail counts, but entire history is always clear, to\n"
       INDENT "allow current state to be rechecked. If the named resource is\n"
       INDENT "part of a group, or one numbered instance of a clone or bundled\n"
       INDENT "resource, the clean-up applies to the whole collective resource\n"
       INDENT "unless --force is given.",
       NULL },
     { "refresh", 'R', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, cleanup_refresh_cb,
       "Delete resource's history (including failures) so its current state\n"
       INDENT "is rechecked. Optionally filtered by --resource and --node\n"
       INDENT "(otherwise all). If the named resource is part of a group, or one\n"
       INDENT "numbered instance of a clone or bundled resource, the refresh\n"
       INDENT "applies to the whole collective resource unless --force is given.",
       NULL },
     { "set-parameter", 'p', G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, set_delete_param_cb,
       "Set named parameter for resource (requires -v). Use instance\n"
       INDENT "attribute unless --meta or --utilization is specified.",
       "PARAM" },
     { "delete-parameter", 'd', G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, set_delete_param_cb,
       "Delete named parameter for resource. Use instance attribute\n"
       INDENT "unless --meta or --utilization is specified.",
       "PARAM" },
     { "set-property", 'S', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_CALLBACK, set_prop_cb,
       "Set named property of resource ('class', 'type', or 'provider') "
       "(requires -r, -t, -v)",
       "PROPERTY" },
 
     { NULL }
 };
 
 static GOptionEntry location_entries[] = {
     { "move", 'M', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb,
       "Create a constraint to move resource. If --node is specified,\n"
       INDENT "the constraint will be to move to that node, otherwise it\n"
       INDENT "will be to ban the current node. Unless --force is specified\n"
       INDENT "this will return an error if the resource is already running\n"
       INDENT "on the specified node. If --force is specified, this will\n"
       INDENT "always ban the current node.\n"
       INDENT "Optional: --lifetime, --master. NOTE: This may prevent the\n"
       INDENT "resource from running on its previous location until the\n"
       INDENT "implicit constraint expires or is removed with --clear.",
       NULL },
     { "ban", 'B', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb,
       "Create a constraint to keep resource off a node.\n"
       INDENT "Optional: --node, --lifetime, --master.\n"
       INDENT "NOTE: This will prevent the resource from running on the\n"
       INDENT "affected node until the implicit constraint expires or is\n"
       INDENT "removed with --clear. If --node is not specified, it defaults\n"
       INDENT "to the node currently running the resource for primitives\n"
       INDENT "and groups, or the master for promotable clones with\n"
       INDENT "promoted-max=1 (all other situations result in an error as\n"
       INDENT "there is no sane default).",
       NULL },
     { "clear", 'U', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb,
       "Remove all constraints created by the --ban and/or --move\n"
       INDENT "commands. Requires: --resource. Optional: --node, --master,\n"
       INDENT "--expired. If --node is not specified, all constraints created\n"
       INDENT "by --ban and --move will be removed for the named resource. If\n"
       INDENT "--node and --force are specified, any constraint created by\n"
       INDENT "--move will be cleared, even if it is not for the specified\n"
       INDENT "node. If --expired is specified, only those constraints whose\n"
       INDENT "lifetimes have expired will be removed.",
       NULL },
     { "expired", 'e', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, expired_cb,
       "Modifies the --clear argument to remove constraints with\n"
       INDENT "expired lifetimes.",
       NULL },
     { "lifetime", 'u', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.move_lifetime,
       "Lifespan (as ISO 8601 duration) of created constraints (with\n"
       INDENT "-B, -M) see https://en.wikipedia.org/wiki/ISO_8601#Durations)",
       "TIMESPEC" },
     { "master", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &options.promoted_role_only,
       "Limit scope of command to Master role (with -B, -M, -U). For\n"
       INDENT "-B and -M the previous master may remain active in the Slave role.",
       NULL },
 
     { NULL }
 };
 
 static GOptionEntry advanced_entries[] = {
     { "delete", 'D', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, delete_cb,
       "(Advanced) Delete a resource from the CIB. Required: -t",
       NULL },
     { "fail", 'F', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, fail_cb,
       "(Advanced) Tell the cluster this resource has failed",
       NULL },
     { "restart", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, restart_cb,
       "(Advanced) Tell the cluster to restart this resource and\n"
       INDENT "anything that depends on it",
       NULL },
     { "wait", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, wait_cb,
       "(Advanced) Wait until the cluster settles into a stable state",
       NULL },
     { "force-demote", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
       validate_or_force_cb,
       "(Advanced) Bypass the cluster and demote a resource on the local\n"
       INDENT "node. Unless --force is specified, this will refuse to do so if\n"
       INDENT "the cluster believes the resource is a clone instance already\n"
       INDENT "running on the local node.",
       NULL },
     { "force-stop", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
       validate_or_force_cb,
       "(Advanced) Bypass the cluster and stop a resource on the local node",
       NULL },
     { "force-start", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
       validate_or_force_cb,
       "(Advanced) Bypass the cluster and start a resource on the local\n"
       INDENT "node. Unless --force is specified, this will refuse to do so if\n"
       INDENT "the cluster believes the resource is a clone instance already\n"
       INDENT "running on the local node.",
       NULL },
     { "force-promote", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
       validate_or_force_cb,
       "(Advanced) Bypass the cluster and promote a resource on the local\n"
       INDENT "node. Unless --force is specified, this will refuse to do so if\n"
       INDENT "the cluster believes the resource is a clone instance already\n"
       INDENT "running on the local node.",
       NULL },
     { "force-check", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
       validate_or_force_cb,
       "(Advanced) Bypass the cluster and check the state of a resource on\n"
       INDENT "the local node",
       NULL },
 
     { NULL }
 };
 
 static GOptionEntry validate_entries[] = {
     { "class", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, class_cb,
       "The standard the resource agent confirms to (for example, ocf).\n"
       INDENT "Use with --agent, --provider, --option, and --validate.",
       "CLASS" },
     { "agent", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, agent_provider_cb,
       "The agent to use (for example, IPaddr). Use with --class,\n"
       INDENT "--provider, --option, and --validate.",
       "AGENT" },
     { "provider", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, agent_provider_cb,
       "The vendor that supplies the resource agent (for example,\n"
       INDENT "heartbeat). Use with --class, --agent, --option, and --validate.",
       "PROVIDER" },
     { "option", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, option_cb,
       "Specify a device configuration parameter as NAME=VALUE (may be\n"
       INDENT "specified multiple times). Use with --validate and without the\n"
       INDENT "-r option.",
       "PARAM" },
 
     { NULL }
 };
 
 static GOptionEntry addl_entries[] = {
     { "node", 'N', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.host_uname,
       "Node name",
       "NAME" },
     { "recursive", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &options.recursive,
       "Follow colocation chains when using --set-parameter",
       NULL },
     { "resource-type", 't', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.rsc_type,
       "Resource XML element (primitive, group, etc.) (with -D)",
       "ELEMENT" },
     { "parameter-value", 'v', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.prop_value,
       "Value to use with -p",
       "PARAM" },
     { "meta", 'm', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, attr_set_type_cb,
       "Use resource meta-attribute instead of instance attribute\n"
       INDENT "(with -p, -g, -d)",
       NULL },
     { "utilization", 'z', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, attr_set_type_cb,
       "Use resource utilization attribute instead of instance attribute\n"
       INDENT "(with -p, -g, -d)",
       NULL },
     { "operation", 'n', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.operation,
       "Operation to clear instead of all (with -C -r)",
       "OPERATION" },
     { "interval", 'I', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.interval_spec,
       "Interval of operation to clear (default 0) (with -C -r -n)",
       "N" },
     { "set-name", 's', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.prop_set,
       "(Advanced) XML ID of attributes element to use (with -p, -d)",
       "ID" },
     { "nvpair", 'i', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.prop_id,
       "(Advanced) XML ID of nvpair element to use (with -p, -d)",
       "ID" },
     { "timeout", 'T', G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, timeout_cb,
       "(Advanced) Abort if command does not finish in this time (with\n"
       INDENT "--restart, --wait, --force-*)",
       "N" },
     { "force", 'f', G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &options.force,
       "If making CIB changes, do so regardless of quorum. See help for\n"
       INDENT "individual commands for additional behavior.",
       NULL },
     { "xml-file", 'x', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_FILENAME, &options.xml_file,
       NULL,
       "FILE" },
     { "host-uname", 'H', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_STRING, &options.host_uname,
       NULL,
       "HOST" },
 
     { NULL }
 };
 
 gboolean
 agent_provider_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
     options.validate_cmdline = TRUE;
     options.require_resource = FALSE;
 
     if (pcmk__str_eq(option_name, "--provider", pcmk__str_casei)) {
         if (options.v_provider) {
            free(options.v_provider);
         }
         options.v_provider = strdup(optarg);
     } else {
         if (options.v_agent) {
            free(options.v_agent);
         }
         options.v_agent = strdup(optarg);
     }
 
     return TRUE;
 }
 
 gboolean
 attr_set_type_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
     if (pcmk__str_any_of(option_name, "-m", "--meta", NULL)) {
         options.attr_set_type = XML_TAG_META_SETS;
     } else if (pcmk__str_any_of(option_name, "-z", "--utilization", NULL)) {
         options.attr_set_type = XML_TAG_UTILIZATION;
     }
 
     return TRUE;
 }
 
 gboolean
 class_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
     if (!(pcmk_get_ra_caps(optarg) & pcmk_ra_cap_params)) {
         if (!out->is_quiet(out)) {
             g_set_error(error, G_OPTION_ERROR, CRM_EX_INVALID_PARAM,
                         "Standard %s does not support parameters\n", optarg);
         }
         return FALSE;
 
     } else {
         if (options.v_class != NULL) {
             free(options.v_class);
         }
 
         options.v_class = strdup(optarg);
     }
 
     options.validate_cmdline = TRUE;
     options.require_resource = FALSE;
     return TRUE;
 }
 
 gboolean
 cleanup_refresh_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
     if (pcmk__str_any_of(option_name, "-C", "--cleanup", NULL)) {
         SET_COMMAND(cmd_cleanup);
     } else {
         SET_COMMAND(cmd_refresh);
     }
 
     options.require_resource = FALSE;
     if (getenv("CIB_file") == NULL) {
         options.require_crmd = TRUE;
     }
     options.find_flags = pe_find_renamed|pe_find_anon;
     return TRUE;
 }
 
 gboolean
 delete_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
     options.require_dataset = FALSE;
     SET_COMMAND(cmd_delete);
     options.find_flags = pe_find_renamed|pe_find_any;
     return TRUE;
 }
 
 gboolean
 expired_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
     options.clear_expired = TRUE;
     options.require_resource = FALSE;
     return TRUE;
 }
 
 static void
 get_agent_spec(const gchar *optarg)
 {
     options.require_cib = FALSE;
     options.require_dataset = FALSE;
     options.require_resource = FALSE;
     if (options.agent_spec != NULL) {
         free(options.agent_spec);
         options.agent_spec = NULL;
     }
     if (optarg != NULL) {
         options.agent_spec = strdup(optarg);
     }
 }
 
 gboolean
 list_agents_cb(const gchar *option_name, const gchar *optarg, gpointer data,
                GError **error)
 {
     SET_COMMAND(cmd_list_agents);
     get_agent_spec(optarg);
     return TRUE;
 }
 
 gboolean
 list_providers_cb(const gchar *option_name, const gchar *optarg, gpointer data,
                   GError **error)
 {
     SET_COMMAND(cmd_list_providers);
     get_agent_spec(optarg);
     return TRUE;
 }
 
 gboolean
 list_standards_cb(const gchar *option_name, const gchar *optarg, gpointer data,
                   GError **error)
 {
     SET_COMMAND(cmd_list_standards);
     options.require_cib = FALSE;
     options.require_dataset = FALSE;
     options.require_resource = FALSE;
     return TRUE;
 }
 
 gboolean
 list_alternatives_cb(const gchar *option_name, const gchar *optarg,
                      gpointer data, GError **error)
 {
     SET_COMMAND(cmd_list_alternatives);
     get_agent_spec(optarg);
     return TRUE;
 }
 
 gboolean
 metadata_cb(const gchar *option_name, const gchar *optarg, gpointer data,
             GError **error)
 {
     SET_COMMAND(cmd_metadata);
     get_agent_spec(optarg);
     return TRUE;
 }
 
 gboolean
 option_cb(const gchar *option_name, const gchar *optarg, gpointer data,
           GError **error)
 {
     char *name = NULL;
     char *value = NULL;
 
     if (pcmk_scan_nvpair(optarg, &name, &value) != 2) {
         return FALSE;
     }
     if (options.validate_options == NULL) {
         options.validate_options = crm_str_table_new();
     }
     g_hash_table_replace(options.validate_options, name, value);
     return TRUE;
 }
 
 gboolean
 fail_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
     options.require_crmd = TRUE;
     SET_COMMAND(cmd_fail);
     return TRUE;
 }
 
 gboolean
 flag_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
     if (pcmk__str_any_of(option_name, "-U", "--clear", NULL)) {
         options.find_flags = pe_find_renamed|pe_find_anon;
         SET_COMMAND(cmd_clear);
     } else if (pcmk__str_any_of(option_name, "-B", "--ban", NULL)) {
         options.find_flags = pe_find_renamed|pe_find_anon;
         SET_COMMAND(cmd_ban);
     } else if (pcmk__str_any_of(option_name, "-M", "--move", NULL)) {
         options.find_flags = pe_find_renamed|pe_find_anon;
         SET_COMMAND(cmd_move);
     } else if (pcmk__str_any_of(option_name, "-q", "--query-xml", NULL)) {
         options.find_flags = pe_find_renamed|pe_find_any;
         SET_COMMAND(cmd_query_xml);
     } else if (pcmk__str_any_of(option_name, "-w", "--query-xml-raw", NULL)) {
         options.find_flags = pe_find_renamed|pe_find_any;
         SET_COMMAND(cmd_query_raw_xml);
     } else if (pcmk__str_any_of(option_name, "-W", "--locate", NULL)) {
         options.find_flags = pe_find_renamed|pe_find_anon;
         SET_COMMAND(cmd_locate);
     } else if (pcmk__str_any_of(option_name, "-A", "--stack", NULL)) {
         options.find_flags = pe_find_renamed|pe_find_anon;
         SET_COMMAND(cmd_colocations_deep);
     } else {
         options.find_flags = pe_find_renamed|pe_find_anon;
         SET_COMMAND(cmd_colocations);
     }
 
     return TRUE;
 }
 
 gboolean
 get_param_prop_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
     if (pcmk__str_any_of(option_name, "-g", "--get-parameter", NULL)) {
         SET_COMMAND(cmd_get_param);
     } else {
         SET_COMMAND(cmd_get_property);
     }
 
     if (options.prop_name) {
         free(options.prop_name);
     }
 
     options.prop_name = strdup(optarg);
     options.find_flags = pe_find_renamed|pe_find_any;
     return TRUE;
 }
 
 gboolean
 list_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
     if (pcmk__str_any_of(option_name, "-c", "--list-cts", NULL)) {
         SET_COMMAND(cmd_cts);
     } else if (pcmk__str_any_of(option_name, "-L", "--list", NULL)) {
         SET_COMMAND(cmd_list_resources);
     } else if (pcmk__str_any_of(option_name, "-l", "--list-raw", NULL)) {
         SET_COMMAND(cmd_list_instances);
     } else if (pcmk__str_any_of(option_name, "-O", "--list-operations", NULL)) {
         SET_COMMAND(cmd_list_active_ops);
     } else {
         SET_COMMAND(cmd_list_all_ops);
     }
 
     options.require_resource = FALSE;
     return TRUE;
 }
 
 gboolean
 set_delete_param_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
     if (pcmk__str_any_of(option_name, "-p", "--set-parameter", NULL)) {
         SET_COMMAND(cmd_set_param);
     } else {
         SET_COMMAND(cmd_delete_param);
     }
 
     if (options.prop_name) {
         free(options.prop_name);
     }
 
     options.prop_name = strdup(optarg);
     options.find_flags = pe_find_renamed|pe_find_any;
     return TRUE;
 }
 
 gboolean
 set_prop_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
     options.require_dataset = FALSE;
 
     if (options.prop_name) {
         free(options.prop_name);
     }
 
     options.prop_name = strdup(optarg);
     SET_COMMAND(cmd_set_property);
     options.find_flags = pe_find_renamed|pe_find_any;
     return TRUE;
 }
 
 gboolean
 timeout_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
     options.timeout_ms = crm_get_msec(optarg);
     return TRUE;
 }
 
 gboolean
 validate_or_force_cb(const gchar *option_name, const gchar *optarg,
                      gpointer data, GError **error)
 {
     SET_COMMAND(cmd_execute_agent);
     if (options.operation) {
         g_free(options.operation);
     }
     options.operation = g_strdup(option_name + 2); // skip "--"
     options.find_flags = pe_find_renamed|pe_find_anon;
     options.override_params = crm_str_table_new();
     return TRUE;
 }
 
 gboolean
 restart_cb(const gchar *option_name, const gchar *optarg, gpointer data,
            GError **error)
 {
     SET_COMMAND(cmd_restart);
     options.find_flags = pe_find_renamed|pe_find_anon;
     return TRUE;
 }
 
 gboolean
 wait_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
     SET_COMMAND(cmd_wait);
     options.require_resource = FALSE;
     options.require_dataset = FALSE;
     return TRUE;
 }
 
 gboolean
 why_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
     options.require_resource = FALSE;
     SET_COMMAND(cmd_why);
     options.find_flags = pe_find_renamed|pe_find_anon;
     return TRUE;
 }
 
 static int
 ban_or_move(pcmk__output_t *out, pe_resource_t *rsc, const char *move_lifetime,
             crm_exit_t *exit_code)
 {
     int rc = pcmk_rc_ok;
     pe_node_t *current = NULL;
     unsigned int nactive = 0;
 
     CRM_CHECK(rsc != NULL, return EINVAL);
 
     current = pe__find_active_requires(rsc, &nactive);
 
     if (nactive == 1) {
         rc = cli_resource_ban(out, options.rsc_id, current->details->uname, move_lifetime, NULL,
                               cib_conn, options.cib_options, options.promoted_role_only);
 
     } else if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
         int count = 0;
         GListPtr iter = NULL;
 
         current = NULL;
         for(iter = rsc->children; iter; iter = iter->next) {
             pe_resource_t *child = (pe_resource_t *)iter->data;
             enum rsc_role_e child_role = child->fns->state(child, TRUE);
 
             if(child_role == RSC_ROLE_MASTER) {
                 count++;
                 current = pe__current_node(child);
             }
         }
 
         if(count == 1 && current) {
             rc = cli_resource_ban(out, options.rsc_id, current->details->uname, move_lifetime, NULL,
                                   cib_conn, options.cib_options, options.promoted_role_only);
 
         } else {
             rc = EINVAL;
             *exit_code = CRM_EX_USAGE;
             g_set_error(&error, PCMK__EXITC_ERROR, *exit_code,
                         "Resource '%s' not moved: active in %d locations (promoted in %d).\n"
                         "To prevent '%s' from running on a specific location, "
                         "specify a node."
                         "To prevent '%s' from being promoted at a specific "
                         "location, specify a node and the master option.",
                         options.rsc_id, nactive, count, options.rsc_id, options.rsc_id);
         }
 
     } else {
         rc = EINVAL;
         *exit_code = CRM_EX_USAGE;
         g_set_error(&error, PCMK__EXITC_ERROR, *exit_code,
                     "Resource '%s' not moved: active in %d locations.\n"
                     "To prevent '%s' from running on a specific location, "
                     "specify a node.",
                     options.rsc_id, nactive, options.rsc_id);
     }
 
     return rc;
 }
 
 static void
 cleanup(pcmk__output_t *out, pe_resource_t *rsc)
 {
     int rc = pcmk_rc_ok;
 
     if (options.force == FALSE) {
         rsc = uber_parent(rsc);
     }
 
     crm_debug("Erasing failures of %s (%s requested) on %s",
               rsc->id, options.rsc_id, (options.host_uname? options.host_uname: "all nodes"));
     rc = cli_resource_delete(out, controld_api, options.host_uname, rsc, options.operation,
                              options.interval_spec, TRUE, data_set, options.force);
 
     if ((rc == pcmk_rc_ok) && !out->is_quiet(out)) {
         // Show any reasons why resource might stay stopped
         cli_resource_check(out, cib_conn, rsc);
     }
 
     if (rc == pcmk_rc_ok) {
         start_mainloop(controld_api);
     }
 }
 
 static int
 clear_constraints(pcmk__output_t *out, xmlNodePtr *cib_xml_copy)
 {
     GListPtr before = NULL;
     GListPtr after = NULL;
     GListPtr remaining = NULL;
     GListPtr ele = NULL;
     pe_node_t *dest = NULL;
     int rc = pcmk_rc_ok;
 
     if (!out->is_quiet(out)) {
         before = build_constraint_list(data_set->input);
     }
 
     if (options.clear_expired) {
         rc = cli_resource_clear_all_expired(data_set->input, cib_conn, options.cib_options,
                                             options.rsc_id, options.host_uname,
                                             options.promoted_role_only);
 
     } else if (options.host_uname) {
         dest = pe_find_node(data_set->nodes, options.host_uname);
         if (dest == NULL) {
             rc = pcmk_rc_node_unknown;
             if (!out->is_quiet(out)) {
                 g_list_free(before);
             }
             return rc;
         }
         rc = cli_resource_clear(options.rsc_id, dest->details->uname, NULL,
                                 cib_conn, options.cib_options, TRUE, options.force);
 
     } else {
         rc = cli_resource_clear(options.rsc_id, NULL, data_set->nodes,
                                 cib_conn, options.cib_options, TRUE, options.force);
     }
 
     if (!out->is_quiet(out)) {
         rc = cib_conn->cmds->query(cib_conn, NULL, cib_xml_copy, cib_scope_local | cib_sync_call);
         rc = pcmk_legacy2rc(rc);
 
         if (rc != pcmk_rc_ok) {
             g_set_error(&error, PCMK__RC_ERROR, rc,
                         "Could not get modified CIB: %s\n", pcmk_strerror(rc));
             g_list_free(before);
             return rc;
         }
 
         data_set->input = *cib_xml_copy;
         cluster_status(data_set);
 
         after = build_constraint_list(data_set->input);
         remaining = pcmk__subtract_lists(before, after, (GCompareFunc) strcmp);
 
         for (ele = remaining; ele != NULL; ele = ele->next) {
             out->info(out, "Removing constraint: %s", (char *) ele->data);
         }
 
         g_list_free(before);
         g_list_free(after);
         g_list_free(remaining);
     }
 
     return rc;
 }
 
 static int
 delete()
 {
     int rc = pcmk_rc_ok;
     xmlNode *msg_data = NULL;
 
     if (options.rsc_type == NULL) {
         rc = ENXIO;
         g_set_error(&error, PCMK__RC_ERROR, rc,
                     "You need to specify a resource type with -t");
         return rc;
     }
 
     msg_data = create_xml_node(NULL, options.rsc_type);
     crm_xml_add(msg_data, XML_ATTR_ID, options.rsc_id);
 
     rc = cib_conn->cmds->remove(cib_conn, XML_CIB_TAG_RESOURCES, msg_data,
                                 options.cib_options);
     rc = pcmk_legacy2rc(rc);
     free_xml(msg_data);
     return rc;
 }
 
 static int
 list_agents(pcmk__output_t *out, const char *agent_spec, crm_exit_t *exit_code)
 {
     int rc = pcmk_rc_ok;
     char *provider = strchr(agent_spec, ':');
     lrmd_t *lrmd_conn = lrmd_api_new();
     lrmd_list_t *list = NULL;
 
     if (provider) {
         *provider++ = 0;
     }
 
     rc = lrmd_conn->cmds->list_agents(lrmd_conn, &list, agent_spec, provider);
 
     if (rc > 0) {
         rc = out->message(out, "agents-list", list, agent_spec, provider);
     } else {
         rc = pcmk_rc_error;
     }
 
     if (rc != pcmk_rc_ok) {
         *exit_code = CRM_EX_NOSUCH;
         if (provider == NULL) {
             g_set_error(&error, PCMK__EXITC_ERROR, *exit_code,
                         "No agents found for standard '%s'", agent_spec);
         } else {
             g_set_error(&error, PCMK__EXITC_ERROR, *exit_code,
                         "No agents found for standard '%s' and provider '%s'",
                         agent_spec, provider);
         }
     }
 
     lrmd_api_delete(lrmd_conn);
     return rc;
 }
 
 static int
 list_providers(pcmk__output_t *out, const char *agent_spec, crm_exit_t *exit_code)
 {
     int rc;
     const char *text = NULL;
     lrmd_t *lrmd_conn = lrmd_api_new();
     lrmd_list_t *list = NULL;
 
     switch (options.rsc_cmd) {
         case cmd_list_alternatives:
             rc = lrmd_conn->cmds->list_ocf_providers(lrmd_conn, agent_spec, &list);
 
             if (rc > 0) {
                 rc = out->message(out, "alternatives-list", list, agent_spec);
             } else {
                 rc = pcmk_rc_error;
             }
 
             text = "OCF providers";
             break;
         case cmd_list_standards:
             rc = lrmd_conn->cmds->list_standards(lrmd_conn, &list);
 
             if (rc > 0) {
                 rc = out->message(out, "standards-list", list);
             } else {
                 rc = pcmk_rc_error;
             }
 
             text = "standards";
             break;
         case cmd_list_providers:
             rc = lrmd_conn->cmds->list_ocf_providers(lrmd_conn, agent_spec, &list);
 
             if (rc > 0) {
                 rc = out->message(out, "providers-list", list, agent_spec);
             } else {
                 rc = pcmk_rc_error;
             }
 
             text = "OCF providers";
             break;
         default:
             *exit_code = CRM_EX_SOFTWARE;
             g_set_error(&error, PCMK__EXITC_ERROR, *exit_code, "Bug");
             lrmd_api_delete(lrmd_conn);
             return pcmk_rc_error;
     }
 
     if (rc != pcmk_rc_ok) {
         if (agent_spec != NULL) {
             *exit_code = CRM_EX_NOSUCH;
             rc = pcmk_rc_error;
             g_set_error(&error, PCMK__EXITC_ERROR, *exit_code,
                         "No %s found for %s", text, agent_spec);
 
         } else {
             *exit_code = CRM_EX_NOSUCH;
             rc = pcmk_rc_error;
             g_set_error(&error, PCMK__EXITC_ERROR, *exit_code,
                         "No %s found", text);
         }
     }
 
     lrmd_api_delete(lrmd_conn);
     return rc;
 }
 
 static int
 populate_working_set(xmlNodePtr *cib_xml_copy)
 {
     int rc = pcmk_rc_ok;
 
     if (options.xml_file != NULL) {
         *cib_xml_copy = filename2xml(options.xml_file);
     } else {
         rc = cib_conn->cmds->query(cib_conn, NULL, cib_xml_copy, cib_scope_local | cib_sync_call);
         rc = pcmk_legacy2rc(rc);
     }
 
     if(rc != pcmk_rc_ok) {
         return rc;
     }
 
     /* Populate the working set instance */
     data_set = pe_new_working_set();
     if (data_set == NULL) {
         rc = ENOMEM;
         return rc;
     }
 
     pe__set_working_set_flags(data_set, pe_flag_no_counts|pe_flag_no_compat);
     rc = update_working_set_xml(data_set, cib_xml_copy);
     if (rc == pcmk_rc_ok) {
         cluster_status(data_set);
     }
     return rc;
 }
 
 static int
 refresh(pcmk__output_t *out)
 {
     int rc = pcmk_rc_ok;
     const char *router_node = options.host_uname;
     int attr_options = pcmk__node_attr_none;
 
     if (options.host_uname) {
         pe_node_t *node = pe_find_node(data_set->nodes, options.host_uname);
 
         if (pe__is_guest_or_remote_node(node)) {
             node = pe__current_node(node->details->remote_rsc);
             if (node == NULL) {
                 rc = ENXIO;
                 g_set_error(&error, PCMK__RC_ERROR, rc,
                             "No cluster connection to Pacemaker Remote node %s detected",
                             options.host_uname);
                 return rc;
             }
             router_node = node->details->uname;
             attr_options |= pcmk__node_attr_remote;
         }
     }
 
     if (controld_api == NULL) {
         out->info(out, "Dry run: skipping clean-up of %s due to CIB_file",
                   options.host_uname? options.host_uname : "all nodes");
         rc = pcmk_rc_ok;
         return rc;
     }
 
     crm_debug("Re-checking the state of all resources on %s", options.host_uname?options.host_uname:"all nodes");
 
     rc = pcmk__node_attr_request_clear(NULL, options.host_uname,
                                        NULL, NULL, NULL,
                                        NULL, attr_options);
 
     if (pcmk_controld_api_reprobe(controld_api, options.host_uname,
                                   router_node) == pcmk_rc_ok) {
         start_mainloop(controld_api);
     }
 
     return rc;
 }
 
 static void
 refresh_resource(pcmk__output_t *out, pe_resource_t *rsc)
 {
     int rc = pcmk_rc_ok;
 
     if (options.force == FALSE) {
         rsc = uber_parent(rsc);
     }
 
     crm_debug("Re-checking the state of %s (%s requested) on %s",
               rsc->id, options.rsc_id, (options.host_uname? options.host_uname: "all nodes"));
     rc = cli_resource_delete(out, controld_api, options.host_uname, rsc, NULL,
                              0, FALSE, data_set, options.force);
 
     if ((rc == pcmk_rc_ok) && !out->is_quiet(out)) {
         // Show any reasons why resource might stay stopped
         cli_resource_check(out, cib_conn, rsc);
     }
 
     if (rc == pcmk_rc_ok) {
         start_mainloop(controld_api);
     }
 }
 
 static int
 set_property()
 {
     int rc = pcmk_rc_ok;
     xmlNode *msg_data = NULL;
 
     if (pcmk__str_empty(options.rsc_type)) {
         g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
                     "Must specify -t with resource type");
         rc = ENXIO;
         return rc;
 
     } else if (pcmk__str_empty(options.prop_value)) {
         g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
                     "Must supply -v with new value");
         rc = EINVAL;
         return rc;
     }
 
     CRM_LOG_ASSERT(options.prop_name != NULL);
 
     msg_data = create_xml_node(NULL, options.rsc_type);
     crm_xml_add(msg_data, XML_ATTR_ID, options.rsc_id);
     crm_xml_add(msg_data, options.prop_name, options.prop_value);
 
     rc = cib_conn->cmds->modify(cib_conn, XML_CIB_TAG_RESOURCES, msg_data,
                                 options.cib_options);
     rc = pcmk_legacy2rc(rc);
     free_xml(msg_data);
 
     return rc;
 }
 
 static int
 show_metadata(pcmk__output_t *out, const char *agent_spec, crm_exit_t *exit_code)
 {
     int rc = pcmk_rc_ok;
     char *standard = NULL;
     char *provider = NULL;
     char *type = NULL;
     char *metadata = NULL;
     lrmd_t *lrmd_conn = lrmd_api_new();
 
     rc = crm_parse_agent_spec(agent_spec, &standard, &provider, &type);
     rc = pcmk_legacy2rc(rc);
 
     if (rc == pcmk_rc_ok) {
         rc = lrmd_conn->cmds->get_metadata(lrmd_conn, standard,
                                            provider, type,
                                            &metadata, 0);
         rc = pcmk_legacy2rc(rc);
 
         if (metadata) {
             out->output_xml(out, "metadata", metadata);
         } else {
             *exit_code = crm_errno2exit(rc);
             g_set_error(&error, PCMK__EXITC_ERROR, *exit_code,
                         "Metadata query for %s failed: %s",
                         agent_spec, pcmk_rc_str(rc));
         }
     } else {
         rc = ENXIO;
         g_set_error(&error, PCMK__RC_ERROR, rc,
                     "'%s' is not a valid agent specification", agent_spec);
     }
 
     lrmd_api_delete(lrmd_conn);
     return rc;
 }
 
 static void
 validate_cmdline(crm_exit_t *exit_code)
 {
     // -r cannot be used with any of --class, --agent, or --provider
     if (options.rsc_id != NULL) {
         g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
                     "--resource cannot be used with --class, --agent, and --provider");
 
     // If --class, --agent, or --provider are given, --validate must also be given.
     } else if (options.rsc_cmd != cmd_execute_agent) {
         g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
                     "--class, --agent, and --provider require --validate");
 
     // Not all of --class, --agent, and --provider need to be given.  Not all
     // classes support the concept of a provider.  Check that what we were given
     // is valid.
     } else if (pcmk__str_eq(options.v_class, "stonith", pcmk__str_none)) {
         if (options.v_provider != NULL) {
             g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
                         "stonith does not support providers");
 
         } else if (stonith_agent_exists(options.v_agent, 0) == FALSE) {
             g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
                         "%s is not a known stonith agent", options.v_agent ? options.v_agent : "");
         }
 
     } else if (resources_agent_exists(options.v_class, options.v_provider, options.v_agent) == FALSE) {
         g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
                     "%s:%s:%s is not a known resource",
                     options.v_class ? options.v_class : "",
                     options.v_provider ? options.v_provider : "",
                     options.v_agent ? options.v_agent : "");
     }
 
     if (error == NULL) {
         if (options.validate_options == NULL) {
             options.validate_options = crm_str_table_new();
         }
         *exit_code = cli_resource_execute_from_params(out, "test", options.v_class, options.v_provider, options.v_agent,
                                                       "validate-all", options.validate_options,
                                                       options.override_params, options.timeout_ms,
                                                       options.resource_verbose, options.force);
     }
 }
 
 static GOptionContext *
 build_arg_context(pcmk__common_args_t *args, GOptionGroup **group) {
     GOptionContext *context = NULL;
 
     GOptionEntry extra_prog_entries[] = {
         { "quiet", 'Q', G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &(args->quiet),
           "Be less descriptive in output.",
           NULL },
         { "resource", 'r', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.rsc_id,
           "Resource ID",
           "ID" },
         { G_OPTION_REMAINING, 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING_ARRAY, &options.remainder,
           NULL,
           NULL },
 
         { NULL }
     };
 
     const char *description = "Examples:\n\n"
                               "List the available OCF agents:\n\n"
                               "\t# crm_resource --list-agents ocf\n\n"
                               "List the available OCF agents from the linux-ha project:\n\n"
                               "\t# crm_resource --list-agents ocf:heartbeat\n\n"
                               "Move 'myResource' to a specific node:\n\n"
                               "\t# crm_resource --resource myResource --move --node altNode\n\n"
                               "Allow (but not force) 'myResource' to move back to its original "
                               "location:\n\n"
                               "\t# crm_resource --resource myResource --clear\n\n"
                               "Stop 'myResource' (and anything that depends on it):\n\n"
                               "\t# crm_resource --resource myResource --set-parameter target-role "
                               "--meta --parameter-value Stopped\n\n"
                               "Tell the cluster not to manage 'myResource' (the cluster will not "
                               "attempt to start or stop the\n"
                               "resource under any circumstances; useful when performing maintenance "
                               "tasks on a resource):\n\n"
                               "\t# crm_resource --resource myResource --set-parameter is-managed "
                               "--meta --parameter-value false\n\n"
                               "Erase the operation history of 'myResource' on 'aNode' (the cluster "
                               "will 'forget' the existing\n"
                               "resource state, including any errors, and attempt to recover the"
                               "resource; useful when a resource\n"
                               "had failed permanently and has been repaired by an administrator):\n\n"
                               "\t# crm_resource --resource myResource --cleanup --node aNode\n\n";
 
     context = pcmk__build_arg_context(args, "text (default), xml", group, NULL);
     g_option_context_set_description(context, description);
 
     /* Add the -Q option, which cannot be part of the globally supported options
      * because some tools use that flag for something else.
      */
     pcmk__add_main_args(context, extra_prog_entries);
 
     pcmk__add_arg_group(context, "queries", "Queries:",
                         "Show query help", query_entries);
     pcmk__add_arg_group(context, "commands", "Commands:",
                         "Show command help", command_entries);
     pcmk__add_arg_group(context, "locations", "Locations:",
                         "Show location help", location_entries);
     pcmk__add_arg_group(context, "validate", "Validate:",
                         "Show validate help", validate_entries);
     pcmk__add_arg_group(context, "advanced", "Advanced:",
                         "Show advanced option help", advanced_entries);
     pcmk__add_arg_group(context, "additional", "Additional Options:",
                         "Show additional options", addl_entries);
     return context;
 }
 
 int
 main(int argc, char **argv)
 {
     xmlNode *cib_xml_copy = NULL;
     pe_resource_t *rsc = NULL;
 
     int rc = pcmk_rc_ok;
 
     pcmk__common_args_t *args = pcmk__new_common_args(SUMMARY);
     GOptionContext *context = NULL;
     GOptionGroup *output_group = NULL;
     gchar **processed_args = NULL;
 
     context = build_arg_context(args, &output_group);
     pcmk__register_formats(output_group, formats);
     crm_log_cli_init("crm_resource");
 
     processed_args = pcmk__cmdline_preproc(argv, "GINSTdginpstuv");
 
     if (!g_option_context_parse_strv(context, &processed_args, &error)) {
         exit_code = CRM_EX_USAGE;
         goto done;
     }
 
     for (int i = 0; i < args->verbosity; i++) {
         crm_bump_log_level(argc, argv);
     }
 
     rc = pcmk__output_new(&out, args->output_ty, args->output_dest, argv);
     if (rc != pcmk_rc_ok) {
         fprintf(stderr, "Error creating output format %s: %s\n",
                 args->output_ty, pcmk_rc_str(rc));
         exit_code = CRM_EX_ERROR;
         goto done;
     }
 
     options.resource_verbose = args->verbosity;
     out->quiet = args->quiet;
 
     crm_log_args(argc, argv);
 
     if (options.host_uname) {
         crm_trace("Option host => %s", options.host_uname);
     }
 
     // If the user didn't explicitly specify a command, list resources
     if (options.rsc_cmd == cmd_none) {
         options.rsc_cmd = cmd_list_resources;
         options.require_resource = FALSE;
     }
 
     // --expired without --clear/-U doesn't make sense
     if (options.clear_expired && (options.rsc_cmd != cmd_clear)) {
         g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, "--expired requires --clear or -U");
         goto done;
     }
 
     if ((options.remainder != NULL) && (options.override_params != NULL)) {
         // Commands that use positional arguments will create override_params
         for (gchar **s = options.remainder; *s; s++) {
             char *name = calloc(1, strlen(*s));
             char *value = calloc(1, strlen(*s));
             int rc = sscanf(*s, "%[^=]=%s", name, value);
 
             if (rc == 2) {
                 g_hash_table_replace(options.override_params, name, value);
 
             } else {
                 g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
                             "Error parsing '%s' as a name=value pair",
                             argv[optind]);
                 free(value);
                 free(name);
                 goto done;
             }
         }
 
     } else if (options.remainder != NULL) {
         gchar **strv = NULL;
         gchar *msg = NULL;
         int i = 1;
         int len = 0;
 
         for (gchar **s = options.remainder; *s; s++) {
             len++;
         }
 
         CRM_ASSERT(len > 0);
 
         strv = calloc(len, sizeof(char *));
         strv[0] = strdup("non-option ARGV-elements:");
 
         for (gchar **s = options.remainder; *s; s++) {
             strv[i] = crm_strdup_printf("[%d of %d] %s\n", i, len, *s);
             i++;
         }
 
         msg = g_strjoinv("", strv);
         g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, "%s", msg);
         g_free(msg);
 
         for(i = 0; i < len; i++) {
             free(strv[i]);
         }
         free(strv);
 
         goto done;
     }
 
     if (pcmk__str_eq(args->output_ty, "xml", pcmk__str_none)) {
         /* Kind of a hack to display XML lists using a real tag instead of <list>.  This just
          * saves from having to write custom messages to build the lists around all these things
          */
         switch (options.rsc_cmd) {
             case cmd_list_resources:
             case cmd_query_xml:
             case cmd_query_raw_xml:
             case cmd_list_active_ops:
             case cmd_list_all_ops:
             case cmd_colocations:
             case cmd_colocations_deep:
                 pcmk__force_args(context, &error, "%s --xml-simple-list --xml-substitute", g_get_prgname());
                 break;
 
             default:
                 pcmk__force_args(context, &error, "%s --xml-substitute", g_get_prgname());
                 break;
         }
     } else if (pcmk__str_eq(args->output_ty, "text", pcmk__str_null_matches)) {
         if (options.rsc_cmd == cmd_colocations || options.rsc_cmd == cmd_colocations_deep ||
             options.rsc_cmd == cmd_list_resources) {
             pcmk__force_args(context, &error, "%s --text-fancy", g_get_prgname());
         }
     }
 
     pe__register_messages(out);
     crm_resource_register_messages(out);
     lrmd__register_messages(out);
     pcmk__register_lib_messages(out);
 
     if (args->version) {
         out->version(out, false);
         goto done;
     }
 
     if (optind > argc) {
         g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
                     "Invalid option(s) supplied, use --help for valid usage");
         exit_code = CRM_EX_USAGE;
         goto done;
     }
 
     // Sanity check validating from command line parameters.  If everything checks out,
     // go ahead and run the validation.  This way we don't need a CIB connection.
     if (options.validate_cmdline) {
         validate_cmdline(&exit_code);
         goto done;
     } else if (options.validate_options != NULL) {
         // @COMPAT @TODO error out here when we can break backward compatibility
         g_hash_table_destroy(options.validate_options);
         options.validate_options = NULL;
     }
 
     if (error != NULL) {
         exit_code = CRM_EX_USAGE;
         goto done;
     }
 
     if (options.force) {
         crm_debug("Forcing...");
         cib__set_call_options(options.cib_options, crm_system_name,
                               cib_quorum_override);
     }
 
     if (options.require_resource && !options.rsc_id) {
         rc = ENXIO;
         g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
                     "Must supply a resource id with -r");
         goto done;
     }
 
     if (options.find_flags && options.rsc_id) {
         options.require_dataset = TRUE;
     }
 
     // Establish a connection to the CIB if needed
     if (options.require_cib) {
         cib_conn = cib_new();
         if ((cib_conn == NULL) || (cib_conn->cmds == NULL)) {
             rc = pcmk_rc_error;
             g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_DISCONNECT,
                         "Could not create CIB connection");
             goto done;
         }
         rc = cib_conn->cmds->signon(cib_conn, crm_system_name, cib_command);
         rc = pcmk_legacy2rc(rc);
         if (rc != pcmk_rc_ok) {
             g_set_error(&error, PCMK__RC_ERROR, rc,
                         "Could not connect to the CIB: %s", pcmk_rc_str(rc));
             goto done;
         }
     }
 
     /* Populate working set from XML file if specified or CIB query otherwise */
     if (options.require_dataset) {
         rc = populate_working_set(&cib_xml_copy);
         if (rc != pcmk_rc_ok) {
             goto done;
         }
     }
 
     // If command requires that resource exist if specified, find it
     if (options.find_flags && options.rsc_id) {
         rsc = pe_find_resource_with_flags(data_set->resources, options.rsc_id,
                                           options.find_flags);
         if (rsc == NULL) {
             rc = ENXIO;
             g_set_error(&error, PCMK__RC_ERROR, rc,
                         "Resource '%s' not found", options.rsc_id);
             goto done;
         }
     }
 
     // Establish a connection to the controller if needed
     if (options.require_crmd) {
         rc = pcmk_new_ipc_api(&controld_api, pcmk_ipc_controld);
         if (rc != pcmk_rc_ok) {
             g_set_error(&error, PCMK__RC_ERROR, rc,
                         "Error connecting to the controller: %s", pcmk_rc_str(rc));
             goto done;
         }
         pcmk_register_ipc_callback(controld_api, controller_event_callback,
                                    NULL);
         rc = pcmk_connect_ipc(controld_api, pcmk_ipc_dispatch_main);
         if (rc != pcmk_rc_ok) {
             g_set_error(&error, PCMK__RC_ERROR, rc,
                         "Error connecting to the controller: %s", pcmk_rc_str(rc));
             goto done;
         }
     }
 
     switch (options.rsc_cmd) {
         case cmd_list_resources: {
             GListPtr all = NULL;
             all = g_list_prepend(all, strdup("*"));
             rc = out->message(out, "resource-list", data_set,
                               pe_print_rsconly | pe_print_pending,
                               FALSE, TRUE, FALSE, TRUE, all, all, FALSE);
             g_list_free_full(all, free);
 
             if (rc == pcmk_rc_no_output) {
                 rc = ENXIO;
             }
             break;
         }
 
         case cmd_list_instances:
             rc = out->message(out, "resource-names-list", data_set->resources);
 
             if (rc != pcmk_rc_ok) {
                 rc = ENXIO;
             }
 
             break;
 
         case cmd_list_standards:
         case cmd_list_providers:
         case cmd_list_alternatives:
             rc = list_providers(out, options.agent_spec, &exit_code);
             break;
 
         case cmd_list_agents:
             rc = list_agents(out, options.agent_spec, &exit_code);
             break;
 
         case cmd_metadata:
             rc = show_metadata(out, options.agent_spec, &exit_code);
             break;
 
         case cmd_restart:
             /* We don't pass data_set because rsc needs to stay valid for the
              * entire lifetime of cli_resource_restart(), but it will reset and
              * update the working set multiple times, so it needs to use its own
              * copy.
              */
             rc = cli_resource_restart(out, rsc, options.host_uname,
                                       options.move_lifetime, options.timeout_ms,
                                       cib_conn, options.cib_options,
                                       options.promoted_role_only,
                                       options.force);
             break;
 
         case cmd_wait:
             rc = wait_till_stable(out, options.timeout_ms, cib_conn);
             break;
 
         case cmd_execute_agent:
             exit_code = cli_resource_execute(out, rsc, options.rsc_id,
                                              options.operation,
                                              options.override_params,
                                              options.timeout_ms, cib_conn,
                                              data_set, options.resource_verbose,
                                              options.force);
             break;
 
         case cmd_colocations:
             rc = out->message(out, "stacks-constraints", rsc, data_set, false);
             break;
 
         case cmd_colocations_deep:
             rc = out->message(out, "stacks-constraints", rsc, data_set, true);
             break;
 
         case cmd_cts:
             rc = pcmk_rc_ok;
 
             for (GList *lpc = data_set->resources; lpc != NULL;
                  lpc = lpc->next) {
 
                 rsc = (pe_resource_t *) lpc->data;
                 cli_resource_print_cts(out, rsc);
             }
 
             cli_resource_print_cts_constraints(out, data_set);
             break;
 
         case cmd_fail:
             rc = cli_resource_fail(out, controld_api, options.host_uname,
                                    options.rsc_id, data_set);
             if (rc == pcmk_rc_ok) {
                 start_mainloop(controld_api);
             }
             break;
 
         case cmd_list_active_ops:
             rc = cli_resource_print_operations(out, options.rsc_id,
                                                options.host_uname, TRUE,
                                                data_set);
             break;
 
         case cmd_list_all_ops:
             rc = cli_resource_print_operations(out, options.rsc_id,
                                                options.host_uname, FALSE,
                                                data_set);
             break;
 
         case cmd_locate: {
             GListPtr resources = cli_resource_search(out, rsc, options.rsc_id, data_set);
             rc = out->message(out, "resource-search-list", resources, rsc, options.rsc_id);
             break;
         }
 
         case cmd_query_xml:
             rc = cli_resource_print(out, rsc, data_set, TRUE);
             break;
 
         case cmd_query_raw_xml:
             rc = cli_resource_print(out, rsc, data_set, FALSE);
             break;
 
         case cmd_why:
             {
                 pe_node_t *dest = NULL;
 
                 if (options.host_uname) {
                     dest = pe_find_node(data_set->nodes, options.host_uname);
                     if (dest == NULL) {
                         rc = pcmk_rc_node_unknown;
                         goto done;
                     }
                 }
                 out->message(out, "resource-reasons-list", cib_conn, data_set->resources, rsc, dest);
                 rc = pcmk_rc_ok;
             }
             break;
 
         case cmd_clear:
             rc = clear_constraints(out, &cib_xml_copy);
             break;
 
         case cmd_move:
             if (options.host_uname == NULL) {
                 rc = ban_or_move(out, rsc, options.move_lifetime, &exit_code);
             } else {
                 rc = cli_resource_move(out, rsc, options.rsc_id, options.host_uname,
                                        options.move_lifetime, cib_conn,
                                        options.cib_options, data_set,
                                        options.promoted_role_only,
                                        options.force);
             }
             break;
 
         case cmd_ban:
             if (options.host_uname == NULL) {
                 rc = ban_or_move(out, rsc, options.move_lifetime, &exit_code);
             } else {
                 pe_node_t *dest = pe_find_node(data_set->nodes,
                                                options.host_uname);
 
                 if (dest == NULL) {
                     rc = pcmk_rc_node_unknown;
                     goto done;
                 }
                 rc = cli_resource_ban(out, options.rsc_id, dest->details->uname,
                                       options.move_lifetime, NULL, cib_conn,
                                       options.cib_options,
                                       options.promoted_role_only);
             }
             break;
 
         case cmd_get_property:
             rc = out->message(out, "property-list", rsc, options.prop_name);
             if (rc == pcmk_rc_no_output) {
                 rc = ENXIO;
             }
 
             break;
 
         case cmd_set_property:
             rc = set_property();
             break;
 
         case cmd_get_param: {
             unsigned int count = 0;
             GHashTable *params = NULL;
             pe_node_t *current = pe__find_active_on(rsc, &count, NULL);
+            bool free_params = true;
 
             if (count > 1) {
                 out->err(out, "%s is active on more than one node,"
                          " returning the default value for %s", rsc->id, crm_str(options.prop_name));
                 current = NULL;
             }
 
-            params = crm_str_table_new();
+            crm_debug("Looking up %s in %s", options.prop_name, rsc->id);
 
             if (pcmk__str_eq(options.attr_set_type, XML_TAG_ATTR_SETS, pcmk__str_casei)) {
-                get_rsc_attributes(params, rsc, current, data_set);
+                params = pe_rsc_params(rsc, current, data_set);
+                free_params = false;
 
             } else if (pcmk__str_eq(options.attr_set_type, XML_TAG_META_SETS, pcmk__str_casei)) {
-                /* No need to redirect to the parent */
+                params = crm_str_table_new();
                 get_meta_attributes(params, rsc, current, data_set);
 
             } else {
+                params = crm_str_table_new();
                 pe__unpack_dataset_nvpairs(rsc->xml, XML_TAG_UTILIZATION, NULL, params,
                                            NULL, FALSE, data_set);
             }
 
-            crm_debug("Looking up %s in %s", options.prop_name, rsc->id);
             rc = out->message(out, "attribute-list", rsc, options.prop_name, params);
-            g_hash_table_destroy(params);
+            if (free_params) {
+                g_hash_table_destroy(params);
+            }
             break;
         }
 
         case cmd_set_param:
             if (pcmk__str_empty(options.prop_value)) {
                 g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
                             "You need to supply a value with the -v option");
                 rc = EINVAL;
                 goto done;
             }
 
             /* coverity[var_deref_model] False positive */
             rc = cli_resource_update_attribute(out, rsc, options.rsc_id,
                                                options.prop_set,
                                                options.attr_set_type,
                                                options.prop_id,
                                                options.prop_name,
                                                options.prop_value,
                                                options.recursive, cib_conn,
                                                options.cib_options, data_set,
                                                options.force);
             break;
 
         case cmd_delete_param:
             /* coverity[var_deref_model] False positive */
             rc = cli_resource_delete_attribute(out, rsc, options.rsc_id,
                                                options.prop_set,
                                                options.attr_set_type,
                                                options.prop_id,
                                                options.prop_name, cib_conn,
                                                options.cib_options, data_set,
                                                options.force);
             break;
 
         case cmd_cleanup:
             if (rsc == NULL) {
                 rc = cli_cleanup_all(out, controld_api, options.host_uname,
                                      options.operation, options.interval_spec,
                                      data_set);
                 if (rc == pcmk_rc_ok) {
                     start_mainloop(controld_api);
                 }
             } else {
                 cleanup(out, rsc);
             }
             break;
 
         case cmd_refresh:
             if (rsc == NULL) {
                 rc = refresh(out);
             } else {
                 refresh_resource(out, rsc);
             }
             break;
 
         case cmd_delete:
             rc = delete();
             break;
 
         default:
             g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_SOFTWARE,
                         "Unimplemented command: %d", (int) options.rsc_cmd);
             break;
     }
 
 done:
     if (rc != pcmk_rc_ok) {
         if (rc == pcmk_rc_no_quorum) {
             g_prefix_error(&error, "To ignore quorum, use the force option.\n");
         }
 
         if (error != NULL) {
             char *msg = crm_strdup_printf("%s\nError performing operation: %s",
                                           error->message, pcmk_rc_str(rc));
             g_clear_error(&error);
             g_set_error(&error, PCMK__RC_ERROR, rc, "%s", msg);
             free(msg);
         } else {
             g_set_error(&error, PCMK__RC_ERROR, rc,
                         "Error performing operation: %s", pcmk_rc_str(rc));
         }
 
         if (exit_code == CRM_EX_OK) {
             exit_code = pcmk_rc2exitc(rc);
         }
     }
 
     g_free(options.host_uname);
     g_free(options.interval_spec);
     g_free(options.move_lifetime);
     g_free(options.operation);
     g_free(options.prop_id);
     free(options.prop_name);
     g_free(options.prop_set);
     g_free(options.prop_value);
     g_free(options.rsc_id);
     g_free(options.rsc_type);
     free(options.agent_spec);
     free(options.v_agent);
     free(options.v_class);
     free(options.v_provider);
     g_free(options.xml_file);
     g_strfreev(options.remainder);
 
     if (options.override_params != NULL) {
         g_hash_table_destroy(options.override_params);
     }
 
     /* options.validate_options does not need to be destroyed here.  See the
      * comments in cli_resource_execute_from_params.
      */
 
     g_strfreev(processed_args);
     g_option_context_free(context);
 
     return bye(exit_code);
 }
diff --git a/tools/crm_resource_print.c b/tools/crm_resource_print.c
index 89d6172dd3..398fef0b7e 100644
--- a/tools/crm_resource_print.c
+++ b/tools/crm_resource_print.c
@@ -1,574 +1,580 @@
 /*
  * Copyright 2004-2020 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_resource.h>
 #include <crm/common/lists_internal.h>
 #include <crm/common/xml_internal.h>
 #include <crm/common/output_internal.h>
 
 #define cons_string(x) x?x:"NA"
 void
 cli_resource_print_cts_constraints(pcmk__output_t *out, pe_working_set_t * data_set)
 {
     xmlNode *xml_obj = NULL;
     xmlNode *lifetime = NULL;
     xmlNode *cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, data_set->input);
 
     for (xml_obj = pcmk__xe_first_child(cib_constraints); xml_obj != NULL;
          xml_obj = pcmk__xe_next(xml_obj)) {
         const char *id = crm_element_value(xml_obj, XML_ATTR_ID);
 
         if (id == NULL) {
             continue;
         }
 
         // @COMPAT lifetime is deprecated
         lifetime = first_named_child(xml_obj, "lifetime");
         if (pe_evaluate_rules(lifetime, NULL, data_set->now, NULL) == FALSE) {
             continue;
         }
 
         if (!pcmk__str_eq(XML_CONS_TAG_RSC_DEPEND, crm_element_name(xml_obj), pcmk__str_casei)) {
             continue;
         }
 
         out->info(out, "Constraint %s %s %s %s %s %s %s",
                   crm_element_name(xml_obj),
                   cons_string(crm_element_value(xml_obj, XML_ATTR_ID)),
                   cons_string(crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE)),
                   cons_string(crm_element_value(xml_obj, XML_COLOC_ATTR_TARGET)),
                   cons_string(crm_element_value(xml_obj, XML_RULE_ATTR_SCORE)),
                   cons_string(crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE_ROLE)),
                   cons_string(crm_element_value(xml_obj, XML_COLOC_ATTR_TARGET_ROLE)));
     }
 }
 
 void
 cli_resource_print_cts(pcmk__output_t *out, pe_resource_t * rsc)
 {
     GListPtr lpc = NULL;
     const char *host = NULL;
     bool needs_quorum = TRUE;
     const char *rtype = crm_element_value(rsc->xml, XML_ATTR_TYPE);
     const char *rprov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER);
     const char *rclass = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
     pe_node_t *node = pe__current_node(rsc);
 
     if (pcmk__str_eq(rclass, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) {
         needs_quorum = FALSE;
     } else {
         // @TODO check requires in resource meta-data and rsc_defaults
     }
 
     if (node != NULL) {
         host = node->details->uname;
     }
 
     out->info(out, "Resource: %s %s %s %s %s %s %s %s %d %lld 0x%.16llx",
               crm_element_name(rsc->xml), rsc->id,
               rsc->clone_name ? rsc->clone_name : rsc->id, rsc->parent ? rsc->parent->id : "NA",
               rprov ? rprov : "NA", rclass, rtype, host ? host : "NA", needs_quorum, rsc->flags,
               rsc->flags);
 
     for (lpc = rsc->children; lpc != NULL; lpc = lpc->next) {
         pe_resource_t *child = (pe_resource_t *) lpc->data;
 
         cli_resource_print_cts(out, child);
     }
 }
 
 // \return Standard Pacemaker return code
 int
 cli_resource_print_operations(pcmk__output_t *out, const char *rsc_id,
                               const char *host_uname, bool active,
                               pe_working_set_t * data_set)
 {
     int rc = pcmk_rc_no_output;
     GListPtr ops = find_operations(rsc_id, host_uname, active, data_set);
 
     if (!ops) {
         return rc;
     }
 
     out->begin_list(out, NULL, NULL, "Resource Operations");
     rc = pcmk_rc_ok;
 
     for (GListPtr lpc = ops; lpc != NULL; lpc = lpc->next) {
         xmlNode *xml_op = (xmlNode *) lpc->data;
         out->message(out, "node-and-op", data_set, xml_op);
     }
 
     out->end_list(out);
     return rc;
 }
 
 // \return Standard Pacemaker return code
 int
 cli_resource_print(pcmk__output_t *out, pe_resource_t *rsc,
                    pe_working_set_t *data_set, bool expanded)
 {
     unsigned int opts = pe_print_pending;
     GListPtr all = NULL;
 
     all = g_list_prepend(all, strdup("*"));
 
     out->begin_list(out, NULL, NULL, "Resource Config");
     out->message(out, crm_map_element_name(rsc->xml), opts, rsc, all, all);
     out->message(out, "resource-config", rsc, !expanded);
     out->end_list(out);
 
     g_list_free_full(all, free);
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("attribute-list", "pe_resource_t *", "char *", "GHashTable *")
 static int
 attribute_list_default(pcmk__output_t *out, va_list args) {
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     char *attr = va_arg(args, char *);
     GHashTable *params = va_arg(args, GHashTable *);
 
-    const char *value = g_hash_table_lookup(params, attr);
+    const char *value = NULL;
 
+    if (params != NULL) {
+        value = g_hash_table_lookup(params, attr);
+    }
     if (value != NULL) {
         out->begin_list(out, NULL, NULL, "Attributes");
         out->list_item(out, attr, "%s", value);
         out->end_list(out);
     } else {
         out->err(out, "Attribute '%s' not found for '%s'", attr, rsc->id);
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("attribute-list", "pe_resource_t *", "char *", "GHashTable *")
 static int
 attribute_list_text(pcmk__output_t *out, va_list args) {
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     char *attr = va_arg(args, char *);
     GHashTable *params = va_arg(args, GHashTable *);
 
-    const char *value = g_hash_table_lookup(params, attr);
+    const char *value = NULL;
 
+    if (params != NULL) {
+        value = g_hash_table_lookup(params, attr);
+    }
     if (value != NULL) {
         out->info(out, "%s", value);
     } else {
         out->err(out, "Attribute '%s' not found for '%s'", attr, rsc->id);
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("property-list", "pe_resource_t *", "char *")
 static int
 property_list_default(pcmk__output_t *out, va_list args) {
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     char *attr = va_arg(args, char *);
 
     const char *value = crm_element_value(rsc->xml, attr);
 
     if (value != NULL) {
         out->begin_list(out, NULL, NULL, "Properties");
         out->list_item(out, attr, "%s", value);
         out->end_list(out);
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("property-list", "pe_resource_t *", "char *")
 static int
 property_list_text(pcmk__output_t *out, va_list args) {
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     char *attr = va_arg(args, char *);
 
     const char *value = crm_element_value(rsc->xml, attr);
 
     if (value != NULL) {
         out->info(out, "%s", value);
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("resource-check-list", "resource_checks_t *")
 static int
 resource_check_list_default(pcmk__output_t *out, va_list args) {
     resource_checks_t *checks = va_arg(args, resource_checks_t *);
 
     pe_resource_t *parent = uber_parent(checks->rsc);
     int rc = pcmk_rc_no_output;
     bool printed = false;
 
     if (checks->flags != 0 || checks->lock_node != NULL) {
         printed = true;
         out->begin_list(out, NULL, NULL, "Resource Checks");
     }
 
     if (pcmk_is_set(checks->flags, rsc_remain_stopped)) {
         out->list_item(out, "check", "Configuration specifies '%s' should remain stopped",
                        parent->id);
     }
 
     if (pcmk_is_set(checks->flags, rsc_unpromotable)) {
         out->list_item(out, "check", "Configuration specifies '%s' should not be promoted",
                        parent->id);
     }
 
     if (pcmk_is_set(checks->flags, rsc_unmanaged)) {
         out->list_item(out, "check", "Configuration prevents cluster from stopping or starting unmanaged '%s'",
                        parent->id);
     }
 
     if (checks->lock_node) {
         out->list_item(out, "check", "'%s' is locked to node %s due to shutdown",
                        parent->id, checks->lock_node);
     }
 
     if (printed) {
         out->end_list(out);
         rc = pcmk_rc_ok;
     }
 
     return rc;
 }
 
 PCMK__OUTPUT_ARGS("resource-check-list", "resource_checks_t *")
 static int
 resource_check_list_xml(pcmk__output_t *out, va_list args) {
     resource_checks_t *checks = va_arg(args, resource_checks_t *);
 
     pe_resource_t *parent = uber_parent(checks->rsc);
     int rc = pcmk_rc_no_output;
 
     xmlNodePtr node = pcmk__output_create_xml_node(out, "check",
                                                    "id", parent->id,
                                                    NULL);
 
     if (pcmk_is_set(checks->flags, rsc_remain_stopped)) {
         crm_xml_add(node, "remain_stopped", "true");
     }
 
     if (pcmk_is_set(checks->flags, rsc_unpromotable)) {
         crm_xml_add(node, "promotable", "false");
     }
 
     if (pcmk_is_set(checks->flags, rsc_unmanaged)) {
         crm_xml_add(node, "unmanaged", "true");
     }
 
     if (checks->lock_node) {
         crm_xml_add(node, "locked-to", checks->lock_node);
     }
 
     return rc;
 }
 
 PCMK__OUTPUT_ARGS("resource-search-list", "GList *", "pe_resource_t *", "gchar *")
 static int
 resource_search_list_default(pcmk__output_t *out, va_list args)
 {
     GList *nodes = va_arg(args, GList *);
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     gchar *requested_name = va_arg(args, gchar *);
 
     bool printed = false;
     int rc = pcmk_rc_no_output;
 
     if (!out->is_quiet(out) && nodes == NULL) {
         out->err(out, "resource %s is NOT running", requested_name);
         return rc;
     }
 
     for (GList *lpc = nodes; lpc != NULL; lpc = lpc->next) {
         pe_node_t *node = (pe_node_t *) lpc->data;
 
         if (!printed) {
             out->begin_list(out, NULL, NULL, "Nodes");
             printed = true;
             rc = pcmk_rc_ok;
         }
 
         if (out->is_quiet(out)) {
             out->list_item(out, "node", "%s", node->details->uname);
         } else {
             const char *state = "";
 
             if (!pe_rsc_is_clone(rsc) && rsc->fns->state(rsc, TRUE) == RSC_ROLE_MASTER) {
                 state = " Master";
             }
             out->list_item(out, "node", "resource %s is running on: %s%s",
                            requested_name, node->details->uname, state);
         }
     }
 
     if (printed) {
         out->end_list(out);
     }
 
     return rc;
 }
 
 PCMK__OUTPUT_ARGS("resource-search-list", "GList *", "pe_resource_t *", "gchar *")
 static int
 resource_search_list_xml(pcmk__output_t *out, va_list args)
 {
     GList *nodes = va_arg(args, GList *);
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     gchar *requested_name = va_arg(args, gchar *);
 
     pcmk__output_xml_create_parent(out, "nodes",
                                    "resource", requested_name,
                                    NULL);
 
     for (GList *lpc = nodes; lpc != NULL; lpc = lpc->next) {
         pe_node_t *node = (pe_node_t *) lpc->data;
         xmlNodePtr sub_node = pcmk__output_create_xml_text_node(out, "node", node->details->uname);
 
         if (!pe_rsc_is_clone(rsc) && rsc->fns->state(rsc, TRUE) == RSC_ROLE_MASTER) {
             crm_xml_add(sub_node, "state", "promoted");
         }
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("resource-reasons-list", "cib_t *", "GList *", "pe_resource_t *",
                   "pe_node_t *")
 static int
 resource_reasons_list_default(pcmk__output_t *out, va_list args)
 {
     cib_t *cib_conn = va_arg(args, cib_t *);
     GList *resources = va_arg(args, GList *);
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     pe_node_t *node = va_arg(args, pe_node_t *);
 
     const char *host_uname = (node == NULL)? NULL : node->details->uname;
 
     out->begin_list(out, NULL, NULL, "Resource Reasons");
 
     if ((rsc == NULL) && (host_uname == NULL)) {
         GList *lpc = NULL;
         GList *hosts = NULL;
 
         for (lpc = resources; lpc != NULL; lpc = lpc->next) {
             pe_resource_t *rsc = (pe_resource_t *) lpc->data;
             rsc->fns->location(rsc, &hosts, TRUE);
 
             if (hosts == NULL) {
                 out->list_item(out, "reason", "Resource %s is not running", rsc->id);
             } else {
                 out->list_item(out, "reason", "Resource %s is running", rsc->id);
             }
 
             cli_resource_check(out, cib_conn, rsc);
             g_list_free(hosts);
             hosts = NULL;
         }
 
     } else if ((rsc != NULL) && (host_uname != NULL)) {
         if (resource_is_running_on(rsc, host_uname)) {
             out->list_item(out, "reason", "Resource %s is running on host %s",
                            rsc->id, host_uname);
         } else {
             out->list_item(out, "reason", "Resource %s is not running on host %s",
                            rsc->id, host_uname);
         }
 
         cli_resource_check(out, cib_conn, rsc);
 
     } else if ((rsc == NULL) && (host_uname != NULL)) {
         const char* host_uname =  node->details->uname;
         GList *allResources = node->details->allocated_rsc;
         GList *activeResources = node->details->running_rsc;
         GList *unactiveResources = pcmk__subtract_lists(allResources, activeResources, (GCompareFunc) strcmp);
         GList *lpc = NULL;
 
         for (lpc = activeResources; lpc != NULL; lpc = lpc->next) {
             pe_resource_t *rsc = (pe_resource_t *) lpc->data;
             out->list_item(out, "reason", "Resource %s is running on host %s",
                            rsc->id, host_uname);
             cli_resource_check(out, cib_conn, rsc);
         }
 
         for(lpc = unactiveResources; lpc != NULL; lpc = lpc->next) {
             pe_resource_t *rsc = (pe_resource_t *) lpc->data;
             out->list_item(out, "reason", "Resource %s is assigned to host %s but not running",
                            rsc->id, host_uname);
             cli_resource_check(out, cib_conn, rsc);
         }
 
         g_list_free(allResources);
         g_list_free(activeResources);
         g_list_free(unactiveResources);
 
     } else if ((rsc != NULL) && (host_uname == NULL)) {
         GList *hosts = NULL;
 
         rsc->fns->location(rsc, &hosts, TRUE);
         out->list_item(out, "reason", "Resource %s is %srunning",
                        rsc->id, (hosts? "" : "not "));
         cli_resource_check(out, cib_conn, rsc);
         g_list_free(hosts);
     }
 
     out->end_list(out);
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("resource-reasons-list", "cib_t *", "GList *", "pe_resource_t *",
                   "pe_node_t *")
 static int
 resource_reasons_list_xml(pcmk__output_t *out, va_list args)
 {
     cib_t *cib_conn = va_arg(args, cib_t *);
     GList *resources = va_arg(args, GList *);
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     pe_node_t *node = va_arg(args, pe_node_t *);
 
     const char *host_uname = (node == NULL)? NULL : node->details->uname;
 
     xmlNodePtr xml_node = pcmk__output_xml_create_parent(out, "reason", NULL);
 
     if ((rsc == NULL) && (host_uname == NULL)) {
         GList *lpc = NULL;
         GList *hosts = NULL;
 
         pcmk__output_xml_create_parent(out, "resources", NULL);
 
         for (lpc = resources; lpc != NULL; lpc = lpc->next) {
             pe_resource_t *rsc = (pe_resource_t *) lpc->data;
 
             rsc->fns->location(rsc, &hosts, TRUE);
 
             pcmk__output_xml_create_parent(out, "resource",
                                            "id", rsc->id,
                                            "running", pcmk__btoa(hosts != NULL),
                                            NULL);
 
             cli_resource_check(out, cib_conn, rsc);
             pcmk__output_xml_pop_parent(out);
             g_list_free(hosts);
             hosts = NULL;
         }
 
         pcmk__output_xml_pop_parent(out);
 
     } else if ((rsc != NULL) && (host_uname != NULL)) {
         if (resource_is_running_on(rsc, host_uname)) {
             crm_xml_add(xml_node, "running_on", host_uname);
         }
 
         cli_resource_check(out, cib_conn, rsc);
 
     } else if ((rsc == NULL) && (host_uname != NULL)) {
         const char* host_uname =  node->details->uname;
         GList *allResources = node->details->allocated_rsc;
         GList *activeResources = node->details->running_rsc;
         GList *unactiveResources = pcmk__subtract_lists(allResources, activeResources, (GCompareFunc) strcmp);
         GList *lpc = NULL;
 
         pcmk__output_xml_create_parent(out, "resources", NULL);
 
         for (lpc = activeResources; lpc != NULL; lpc = lpc->next) {
             pe_resource_t *rsc = (pe_resource_t *) lpc->data;
 
             pcmk__output_xml_create_parent(out, "resource",
                                            "id", rsc->id,
                                            "running", "true",
                                            "host", host_uname,
                                            NULL);
 
             cli_resource_check(out, cib_conn, rsc);
             pcmk__output_xml_pop_parent(out);
         }
 
         for(lpc = unactiveResources; lpc != NULL; lpc = lpc->next) {
             pe_resource_t *rsc = (pe_resource_t *) lpc->data;
 
             pcmk__output_xml_create_parent(out, "resource",
                                            "id", rsc->id,
                                            "running", "false",
                                            "host", host_uname,
                                            NULL);
 
             cli_resource_check(out, cib_conn, rsc);
             pcmk__output_xml_pop_parent(out);
         }
 
         pcmk__output_xml_pop_parent(out);
         g_list_free(allResources);
         g_list_free(activeResources);
         g_list_free(unactiveResources);
 
     } else if ((rsc != NULL) && (host_uname == NULL)) {
         GList *hosts = NULL;
 
         rsc->fns->location(rsc, &hosts, TRUE);
         crm_xml_add(xml_node, "running", pcmk__btoa(hosts != NULL));
         cli_resource_check(out, cib_conn, rsc);
         g_list_free(hosts);
     }
 
     return pcmk_rc_ok;
 }
 
 static void
 add_resource_name(pcmk__output_t *out, pe_resource_t *rsc) {
     if (rsc->children == NULL) {
         out->list_item(out, "resource", "%s", rsc->id);
     } else {
         for (GListPtr lpc = rsc->children; lpc != NULL; lpc = lpc->next) {
             pe_resource_t *child = (pe_resource_t *) lpc->data;
             add_resource_name(out, child);
         }
     }
 }
 
 PCMK__OUTPUT_ARGS("resource-names-list", "GList *")
 static int
 resource_names(pcmk__output_t *out, va_list args) {
     GList *resources = va_arg(args, GList *);
 
     if (resources == NULL) {
         out->err(out, "NO resources configured\n");
         return pcmk_rc_no_output;
     }
 
     out->begin_list(out, NULL, NULL, "Resource Names");
 
     for (GList *lpc = resources; lpc != NULL; lpc = lpc->next) {
         pe_resource_t *rsc = (pe_resource_t *) lpc->data;
         add_resource_name(out, rsc);
     }
 
     out->end_list(out);
     return pcmk_rc_ok;
 }
 
 static pcmk__message_entry_t fmt_functions[] = {
     { "attribute-list", "default", attribute_list_default },
     { "attribute-list", "text", attribute_list_text },
     { "property-list", "default", property_list_default },
     { "property-list", "text", property_list_text },
     { "resource-check-list", "default", resource_check_list_default },
     { "resource-check-list", "xml", resource_check_list_xml },
     { "resource-search-list", "default", resource_search_list_default },
     { "resource-search-list", "xml", resource_search_list_xml },
     { "resource-reasons-list", "default", resource_reasons_list_default },
     { "resource-reasons-list", "xml", resource_reasons_list_xml },
     { "resource-names-list", "default", resource_names },
 
     { NULL, NULL, NULL }
 };
 
 void
 crm_resource_register_messages(pcmk__output_t *out) {
     pcmk__register_messages(out, fmt_functions);
 }
diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c
index e0804fc4b6..9ff9e96dcd 100644
--- a/tools/crm_resource_runtime.c
+++ b/tools/crm_resource_runtime.c
@@ -1,1955 +1,1945 @@
 /*
  * Copyright 2004-2020 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_resource.h>
 #include <crm/common/ipc_controld.h>
 #include <crm/common/lists_internal.h>
 #include <crm/common/xml_internal.h>
 
 resource_checks_t *
 cli_check_resource(pe_resource_t *rsc, char *role_s, char *managed)
 {
     pe_resource_t *parent = uber_parent(rsc);
     resource_checks_t *rc = calloc(1, sizeof(resource_checks_t));
 
     if (role_s) {
         enum rsc_role_e role = text2role(role_s);
 
         if (role == RSC_ROLE_STOPPED) {
             rc->flags |= rsc_remain_stopped;
         } else if (pcmk_is_set(parent->flags, pe_rsc_promotable) &&
                    role == RSC_ROLE_SLAVE) {
             rc->flags |= rsc_unpromotable;
         }
     }
 
     if (managed && !crm_is_true(managed)) {
         rc->flags |= rsc_unmanaged;
     }
 
     if (rsc->lock_node) {
         rc->lock_node = rsc->lock_node->details->uname;
     }
 
     rc->rsc = rsc;
     return rc;
 }
 
 GListPtr
 cli_resource_search(pcmk__output_t *out, pe_resource_t *rsc, const char *requested_name,
                     pe_working_set_t *data_set)
 {
     GListPtr found = NULL;
     pe_resource_t *parent = uber_parent(rsc);
 
     if (pe_rsc_is_clone(rsc)) {
         for (GListPtr iter = rsc->children; iter != NULL; iter = iter->next) {
             GListPtr extra = ((pe_resource_t *) iter->data)->running_on;
             if (extra != NULL) {
                 found = g_list_concat(found, extra);
             }
         }
 
     /* The anonymous clone children's common ID is supplied */
     } else if (pe_rsc_is_clone(parent)
                && !pcmk_is_set(rsc->flags, pe_rsc_unique)
                && rsc->clone_name
                && pcmk__str_eq(requested_name, rsc->clone_name, pcmk__str_casei)
                && !pcmk__str_eq(requested_name, rsc->id, pcmk__str_casei)) {
 
         for (GListPtr iter = parent->children; iter; iter = iter->next) {
             GListPtr extra = ((pe_resource_t *) iter->data)->running_on;
             if (extra != NULL) {
                 found = g_list_concat(found, extra);
             }
         }
 
     } else if (rsc->running_on != NULL) {
         found = g_list_concat(found, rsc->running_on);
     }
 
     return found;
 }
 
 #define XPATH_MAX 1024
 
 // \return Standard Pacemaker return code
 static int
 find_resource_attr(pcmk__output_t *out, cib_t * the_cib, const char *attr,
                    const char *rsc, const char *attr_set_type, const char *set_name,
                    const char *attr_id, const char *attr_name, char **value)
 {
     int offset = 0;
     int rc = pcmk_rc_ok;
     xmlNode *xml_search = NULL;
     char *xpath_string = NULL;
 
     if(value) {
         *value = NULL;
     }
 
     if(the_cib == NULL) {
         return ENOTCONN;
     }
 
     xpath_string = calloc(1, XPATH_MAX);
     offset +=
         snprintf(xpath_string + offset, XPATH_MAX - offset, "%s", get_object_path("resources"));
 
     offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "//*[@id=\"%s\"]", rsc);
 
     if (attr_set_type) {
         offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "/%s", attr_set_type);
         if (set_name) {
             offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "[@id=\"%s\"]", set_name);
         }
     }
 
     offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "//nvpair[");
     if (attr_id) {
         offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "@id=\"%s\"", attr_id);
     }
 
     if (attr_name) {
         if (attr_id) {
             offset += snprintf(xpath_string + offset, XPATH_MAX - offset, " and ");
         }
         offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "@name=\"%s\"", attr_name);
     }
     offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "]");
     CRM_LOG_ASSERT(offset > 0);
 
     rc = the_cib->cmds->query(the_cib, xpath_string, &xml_search,
                               cib_sync_call | cib_scope_local | cib_xpath);
     rc = pcmk_legacy2rc(rc);
 
     if (rc != pcmk_rc_ok) {
         goto bail;
     }
 
     crm_log_xml_debug(xml_search, "Match");
     if (xml_has_children(xml_search)) {
         xmlNode *child = NULL;
 
         rc = EINVAL;
         out->info(out, "Multiple attributes match name=%s", attr_name);
 
         for (child = pcmk__xml_first_child(xml_search); child != NULL;
              child = pcmk__xml_next(child)) {
             out->info(out, "  Value: %s \t(id=%s)",
                       crm_element_value(child, XML_NVPAIR_ATTR_VALUE), ID(child));
         }
 
         out->spacer(out);
 
     } else if(value) {
         const char *tmp = crm_element_value(xml_search, attr);
 
         if (tmp) {
             *value = strdup(tmp);
         }
     }
 
   bail:
     free(xpath_string);
     free_xml(xml_search);
     return rc;
 }
 
 /* PRIVATE. Use the find_matching_attr_resources instead. */
 static void
 find_matching_attr_resources_recursive(pcmk__output_t *out, GList/* <pe_resource_t*> */ ** result,
                                        pe_resource_t * rsc, const char * rsc_id,
                                        const char * attr_set, const char * attr_set_type,
                                        const char * attr_id, const char * attr_name,
                                        cib_t * cib, const char * cmd, int depth)
 {
     int rc = pcmk_rc_ok;
     char *lookup_id = clone_strip(rsc->id);
     char *local_attr_id = NULL;
 
     /* visit the children */
     for(GList *gIter = rsc->children; gIter; gIter = gIter->next) {
         find_matching_attr_resources_recursive(out, result, (pe_resource_t*)gIter->data,
                                                rsc_id, attr_set, attr_set_type,
                                                attr_id, attr_name, cib, cmd, depth+1);
         /* do it only once for clones */
         if(pe_clone == rsc->variant) {
             break;
         }
     }
 
     rc = find_resource_attr(out, cib, XML_ATTR_ID, lookup_id, attr_set_type,
                             attr_set, attr_id, attr_name, &local_attr_id);
     /* Post-order traversal. 
      * The root is always on the list and it is the last item. */
     if((0 == depth) || (pcmk_rc_ok == rc)) {
         /* push the head */
         *result = g_list_append(*result, rsc);
     }
 
     free(local_attr_id);
     free(lookup_id);
 }
 
 
 /* The result is a linearized pre-ordered tree of resources. */
 static GList/*<pe_resource_t*>*/ *
 find_matching_attr_resources(pcmk__output_t *out, pe_resource_t * rsc,
                              const char * rsc_id, const char * attr_set,
                              const char * attr_set_type, const char * attr_id,
                              const char * attr_name, cib_t * cib, const char * cmd,
                              gboolean force)
 {
     int rc = pcmk_rc_ok;
     char *lookup_id = NULL;
     char *local_attr_id = NULL;
     GList * result = NULL;
     /* If --force is used, update only the requested resource (clone or primitive).
      * Otherwise, if the primitive has the attribute, use that.
      * Otherwise use the clone. */
     if(force == TRUE) {
         return g_list_append(result, rsc);
     }
     if(rsc->parent && pe_clone == rsc->parent->variant) {
         int rc = pcmk_rc_ok;
         char *local_attr_id = NULL;
         rc = find_resource_attr(out, cib, XML_ATTR_ID, rsc_id, attr_set_type,
                                 attr_set, attr_id, attr_name, &local_attr_id);
         free(local_attr_id);
 
         if(rc != pcmk_rc_ok) {
             rsc = rsc->parent;
             if (!out->is_quiet(out)) {
                 out->info(out, "Performing %s of '%s' on '%s', the parent of '%s'",
                           cmd, attr_name, rsc->id, rsc_id);
             }
         }
         return g_list_append(result, rsc);
     } else if(rsc->parent == NULL && rsc->children && pe_clone == rsc->variant) {
         pe_resource_t *child = rsc->children->data;
 
         if(child->variant == pe_native) {
             lookup_id = clone_strip(child->id); /* Could be a cloned group! */
             rc = find_resource_attr(out, cib, XML_ATTR_ID, lookup_id, attr_set_type,
                                     attr_set, attr_id, attr_name, &local_attr_id);
 
             if(rc == pcmk_rc_ok) {
                 rsc = child;
                 if (!out->is_quiet(out)) {
                     out->info(out, "A value for '%s' already exists in child '%s', performing %s on that instead of '%s'",
                               attr_name, lookup_id, cmd, rsc_id);
                 }
             }
 
             free(local_attr_id);
             free(lookup_id);
         }
         return g_list_append(result, rsc);
     }
     /* If the resource is a group ==> children inherit the attribute if defined. */
     find_matching_attr_resources_recursive(out, &result, rsc, rsc_id, attr_set,
                                            attr_set_type, attr_id, attr_name,
                                            cib, cmd, 0);
     return result;
 }
 
 // \return Standard Pacemaker return code
 int
 cli_resource_update_attribute(pcmk__output_t *out, pe_resource_t *rsc,
                               const char *requested_name, const char *attr_set,
                               const char *attr_set_type, const char *attr_id,
                               const char *attr_name, const char *attr_value,
                               gboolean recursive, cib_t *cib, int cib_options,
                               pe_working_set_t *data_set, gboolean force)
 {
     int rc = pcmk_rc_ok;
     static bool need_init = TRUE;
 
     char *local_attr_id = NULL;
     char *local_attr_set = NULL;
 
     GList/*<pe_resource_t*>*/ *resources = NULL;
     const char *common_attr_id = attr_id;
 
     if (attr_id == NULL && force == FALSE) {
         find_resource_attr (out, cib, XML_ATTR_ID, uber_parent(rsc)->id, NULL,
                             NULL, NULL, attr_name, NULL);
     }
 
     if (pcmk__str_eq(attr_set_type, XML_TAG_ATTR_SETS, pcmk__str_casei)) {
         if (force == FALSE) {
             rc = find_resource_attr(out, cib, XML_ATTR_ID, uber_parent(rsc)->id,
                                     XML_TAG_META_SETS, attr_set, attr_id,
                                     attr_name, &local_attr_id);
             if (rc == pcmk_rc_ok && !out->is_quiet(out)) {
                 out->err(out, "WARNING: There is already a meta attribute for '%s' called '%s' (id=%s)",
                          uber_parent(rsc)->id, attr_name, local_attr_id);
                 out->err(out, "         Delete '%s' first or use the force option to override",
                          local_attr_id);
             }
             free(local_attr_id);
             if (rc == pcmk_rc_ok) {
                 return ENOTUNIQ;
             }
         }
         resources = g_list_append(resources, rsc);
 
     } else {
         resources = find_matching_attr_resources(out, rsc, requested_name, attr_set, attr_set_type,
                                                  attr_id, attr_name, cib, "update", force);
     }
 
     /* If either attr_set or attr_id is specified,
      * one clearly intends to modify a single resource.
      * It is the last item on the resource list.*/
     for(GList *gIter = (attr_set||attr_id) ? g_list_last(resources) : resources
             ; gIter; gIter = gIter->next) {
         char *lookup_id = NULL;
 
         xmlNode *xml_top = NULL;
         xmlNode *xml_obj = NULL;
         local_attr_id = NULL;
         local_attr_set = NULL;
 
         rsc = (pe_resource_t*)gIter->data;
         attr_id = common_attr_id;
 
         lookup_id = clone_strip(rsc->id); /* Could be a cloned group! */
         rc = find_resource_attr(out, cib, XML_ATTR_ID, lookup_id, attr_set_type,
                                 attr_set, attr_id, attr_name, &local_attr_id);
 
         if (rc == pcmk_rc_ok) {
             crm_debug("Found a match for name=%s: id=%s", attr_name, local_attr_id);
             attr_id = local_attr_id;
 
         } else if (rc != ENXIO) {
             free(lookup_id);
             free(local_attr_id);
             g_list_free(resources);
             return rc;
 
         } else {
             const char *tag = crm_element_name(rsc->xml);
 
             if (attr_set == NULL) {
                 local_attr_set = crm_strdup_printf("%s-%s", lookup_id,
                                                    attr_set_type);
                 attr_set = local_attr_set;
             }
             if (attr_id == NULL) {
                 local_attr_id = crm_strdup_printf("%s-%s", attr_set, attr_name);
                 attr_id = local_attr_id;
             }
 
             xml_top = create_xml_node(NULL, tag);
             crm_xml_add(xml_top, XML_ATTR_ID, lookup_id);
 
             xml_obj = create_xml_node(xml_top, attr_set_type);
             crm_xml_add(xml_obj, XML_ATTR_ID, attr_set);
         }
 
         xml_obj = crm_create_nvpair_xml(xml_obj, attr_id, attr_name, attr_value);
         if (xml_top == NULL) {
             xml_top = xml_obj;
         }
 
         crm_log_xml_debug(xml_top, "Update");
 
         rc = cib->cmds->modify(cib, XML_CIB_TAG_RESOURCES, xml_top, cib_options);
         rc = pcmk_legacy2rc(rc);
 
         if (rc == pcmk_rc_ok && !out->is_quiet(out)) {
             out->info(out, "Set '%s' option: id=%s%s%s%s%s value=%s", lookup_id, local_attr_id,
                       attr_set ? " set=" : "", attr_set ? attr_set : "",
                       attr_name ? " name=" : "", attr_name ? attr_name : "", attr_value);
         }
 
         free_xml(xml_top);
 
         free(lookup_id);
         free(local_attr_id);
         free(local_attr_set);
 
         if(recursive && pcmk__str_eq(attr_set_type, XML_TAG_META_SETS, pcmk__str_casei)) {
             GListPtr lpc = NULL;
 
             if(need_init) {
                 xmlNode *cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, data_set->input);
 
                 need_init = FALSE;
                 unpack_constraints(cib_constraints, data_set);
 
                 pe__clear_resource_flags_on_all(data_set, pe_rsc_allocating);
             }
 
             crm_debug("Looking for dependencies %p", rsc->rsc_cons_lhs);
             pe__set_resource_flags(rsc, pe_rsc_allocating);
             for (lpc = rsc->rsc_cons_lhs; lpc != NULL; lpc = lpc->next) {
                 pcmk__colocation_t *cons = (pcmk__colocation_t *) lpc->data;
                 pe_resource_t *peer = cons->rsc_lh;
 
                 crm_debug("Checking %s %d", cons->id, cons->score);
                 if (cons->score > 0 && !pcmk_is_set(peer->flags, pe_rsc_allocating)) {
                     /* Don't get into colocation loops */
                     crm_debug("Setting %s=%s for dependent resource %s", attr_name, attr_value, peer->id);
                     cli_resource_update_attribute(out, peer, peer->id, NULL, attr_set_type,
                                                   NULL, attr_name, attr_value, recursive,
                                                   cib, cib_options, data_set, force);
                 }
             }
         }
     }
     g_list_free(resources);
     return rc;
 }
 
 // \return Standard Pacemaker return code
 int
 cli_resource_delete_attribute(pcmk__output_t *out, pe_resource_t *rsc,
                               const char *requested_name, const char *attr_set,
                               const char *attr_set_type, const char *attr_id,
                               const char *attr_name, cib_t *cib, int cib_options,
                               pe_working_set_t *data_set, gboolean force)
 {
     int rc = pcmk_rc_ok;
     GList/*<pe_resource_t*>*/ *resources = NULL;
 
     if (attr_id == NULL && force == FALSE) {
         find_resource_attr(out, cib, XML_ATTR_ID, uber_parent(rsc)->id, NULL,
                            NULL, NULL, attr_name, NULL);
     }
 
     if(pcmk__str_eq(attr_set_type, XML_TAG_META_SETS, pcmk__str_casei)) {
         resources = find_matching_attr_resources(out, rsc, requested_name, attr_set, attr_set_type,
                                                  attr_id, attr_name, cib, "delete", force);
     } else {
         resources = g_list_append(resources, rsc);
     }
 
     for(GList *gIter = resources; gIter; gIter = gIter->next) {
         char *lookup_id = NULL;
         xmlNode *xml_obj = NULL;
         char *local_attr_id = NULL;
 
         rsc = (pe_resource_t*)gIter->data;
 
         lookup_id = clone_strip(rsc->id);
         rc = find_resource_attr(out, cib, XML_ATTR_ID, lookup_id, attr_set_type,
                                 attr_set, attr_id, attr_name, &local_attr_id);
 
         if (rc == ENXIO) {
             free(lookup_id);
             rc = pcmk_rc_ok;
             continue;
 
         } else if (rc != pcmk_rc_ok) {
             free(lookup_id);
             g_list_free(resources);
             return rc;
         }
 
         if (attr_id == NULL) {
             attr_id = local_attr_id;
         }
 
         xml_obj = crm_create_nvpair_xml(NULL, attr_id, attr_name, NULL);
         crm_log_xml_debug(xml_obj, "Delete");
 
         CRM_ASSERT(cib);
         rc = cib->cmds->remove(cib, XML_CIB_TAG_RESOURCES, xml_obj, cib_options);
         rc = pcmk_legacy2rc(rc);
 
         if (rc == pcmk_rc_ok && !out->is_quiet(out)) {
             out->info(out, "Deleted '%s' option: id=%s%s%s%s%s", lookup_id, local_attr_id,
                       attr_set ? " set=" : "", attr_set ? attr_set : "",
                       attr_name ? " name=" : "", attr_name ? attr_name : "");
         }
 
         free(lookup_id);
         free_xml(xml_obj);
         free(local_attr_id);
     }
     g_list_free(resources);
     return rc;
 }
 
 // \return Standard Pacemaker return code
 static int
 send_lrm_rsc_op(pcmk__output_t *out, pcmk_ipc_api_t *controld_api, bool do_fail_resource,
                 const char *host_uname, const char *rsc_id, pe_working_set_t *data_set)
 {
     const char *router_node = host_uname;
     const char *rsc_api_id = NULL;
     const char *rsc_long_id = NULL;
     const char *rsc_class = NULL;
     const char *rsc_provider = NULL;
     const char *rsc_type = NULL;
     bool cib_only = false;
     pe_resource_t *rsc = pe_find_resource(data_set->resources, rsc_id);
 
     if (rsc == NULL) {
         out->err(out, "Resource %s not found", rsc_id);
         return ENXIO;
 
     } else if (rsc->variant != pe_native) {
         out->err(out, "We can only process primitive resources, not %s", rsc_id);
         return EINVAL;
     }
 
     rsc_class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
     rsc_provider = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER),
     rsc_type = crm_element_value(rsc->xml, XML_ATTR_TYPE);
     if ((rsc_class == NULL) || (rsc_type == NULL)) {
         out->err(out, "Resource %s does not have a class and type", rsc_id);
         return EINVAL;
     }
 
     if (host_uname == NULL) {
         out->err(out, "Please specify a node name");
         return EINVAL;
 
     } else {
         pe_node_t *node = pe_find_node(data_set->nodes, host_uname);
 
         if (node == NULL) {
             out->err(out, "Node %s not found", host_uname);
             return pcmk_rc_node_unknown;
         }
 
         if (!(node->details->online)) {
             if (do_fail_resource) {
                 out->err(out, "Node %s is not online", host_uname);
                 return ENOTCONN;
             } else {
                 cib_only = true;
             }
         }
         if (!cib_only && pe__is_guest_or_remote_node(node)) {
             node = pe__current_node(node->details->remote_rsc);
             if (node == NULL) {
                 out->err(out, "No cluster connection to Pacemaker Remote node %s detected",
                          host_uname);
                 return ENOTCONN;
             }
             router_node = node->details->uname;
         }
     }
 
     if (rsc->clone_name) {
         rsc_api_id = rsc->clone_name;
         rsc_long_id = rsc->id;
     } else {
         rsc_api_id = rsc->id;
     }
     if (do_fail_resource) {
         return pcmk_controld_api_fail(controld_api, host_uname, router_node,
                                       rsc_api_id, rsc_long_id,
                                       rsc_class, rsc_provider, rsc_type);
     } else {
         return pcmk_controld_api_refresh(controld_api, host_uname, router_node,
                                          rsc_api_id, rsc_long_id, rsc_class,
                                          rsc_provider, rsc_type, cib_only);
     }
 }
 
 /*!
  * \internal
  * \brief Get resource name as used in failure-related node attributes
  *
  * \param[in] rsc  Resource to check
  *
  * \return Newly allocated string containing resource's fail name
  * \note The caller is responsible for freeing the result.
  */
 static inline char *
 rsc_fail_name(pe_resource_t *rsc)
 {
     const char *name = (rsc->clone_name? rsc->clone_name : rsc->id);
 
     return pcmk_is_set(rsc->flags, pe_rsc_unique)? strdup(name) : clone_strip(name);
 }
 
 // \return Standard Pacemaker return code
 static int
 clear_rsc_history(pcmk__output_t *out, pcmk_ipc_api_t *controld_api,
                   const char *host_uname, const char *rsc_id,
                   pe_working_set_t *data_set)
 {
     int rc = pcmk_rc_ok;
 
     /* Erase the resource's entire LRM history in the CIB, even if we're only
      * clearing a single operation's fail count. If we erased only entries for a
      * single operation, we might wind up with a wrong idea of the current
      * resource state, and we might not re-probe the resource.
      */
     rc = send_lrm_rsc_op(out, controld_api, false, host_uname, rsc_id, data_set);
     if (rc != pcmk_rc_ok) {
         return rc;
     }
 
     crm_trace("Processing %d mainloop inputs",
               pcmk_controld_api_replies_expected(controld_api));
     while (g_main_context_iteration(NULL, FALSE)) {
         crm_trace("Processed mainloop input, %d still remaining",
                   pcmk_controld_api_replies_expected(controld_api));
     }
     return rc;
 }
 
 // \return Standard Pacemaker return code
 static int
 clear_rsc_failures(pcmk__output_t *out, pcmk_ipc_api_t *controld_api,
                    const char *node_name, const char *rsc_id, const char *operation,
                    const char *interval_spec, pe_working_set_t *data_set)
 {
     int rc = pcmk_rc_ok;
     const char *failed_value = NULL;
     const char *failed_id = NULL;
     const char *interval_ms_s = NULL;
     GHashTable *rscs = NULL;
     GHashTableIter iter;
 
     /* Create a hash table to use as a set of resources to clean. This lets us
      * clean each resource only once (per node) regardless of how many failed
      * operations it has.
      */
     rscs = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, NULL);
 
     // Normalize interval to milliseconds for comparison to history entry
     if (operation) {
         interval_ms_s = crm_strdup_printf("%u",
                                           crm_parse_interval_spec(interval_spec));
     }
 
     for (xmlNode *xml_op = pcmk__xml_first_child(data_set->failed);
          xml_op != NULL;
          xml_op = pcmk__xml_next(xml_op)) {
 
         failed_id = crm_element_value(xml_op, XML_LRM_ATTR_RSCID);
         if (failed_id == NULL) {
             // Malformed history entry, should never happen
             continue;
         }
 
         // No resource specified means all resources match
         if (rsc_id) {
             pe_resource_t *fail_rsc = pe_find_resource_with_flags(data_set->resources,
                                                                   failed_id,
                                                                   pe_find_renamed|pe_find_anon);
 
             if (!fail_rsc || !pcmk__str_eq(rsc_id, fail_rsc->id, pcmk__str_casei)) {
                 continue;
             }
         }
 
         // Host name should always have been provided by this point
         failed_value = crm_element_value(xml_op, XML_ATTR_UNAME);
         if (!pcmk__str_eq(node_name, failed_value, pcmk__str_casei)) {
             continue;
         }
 
         // No operation specified means all operations match
         if (operation) {
             failed_value = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
             if (!pcmk__str_eq(operation, failed_value, pcmk__str_casei)) {
                 continue;
             }
 
             // Interval (if operation was specified) defaults to 0 (not all)
             failed_value = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL_MS);
             if (!pcmk__str_eq(interval_ms_s, failed_value, pcmk__str_casei)) {
                 continue;
             }
         }
 
         /* not available until glib 2.32
         g_hash_table_add(rscs, (gpointer) failed_id);
         */
         g_hash_table_insert(rscs, (gpointer) failed_id, (gpointer) failed_id);
     }
 
     g_hash_table_iter_init(&iter, rscs);
     while (g_hash_table_iter_next(&iter, (gpointer *) &failed_id, NULL)) {
         crm_debug("Erasing failures of %s on %s", failed_id, node_name);
         rc = clear_rsc_history(out, controld_api, node_name, failed_id, data_set);
         if (rc != pcmk_rc_ok) {
             return rc;
         }
     }
     g_hash_table_destroy(rscs);
     return rc;
 }
 
 // \return Standard Pacemaker return code
 static int
 clear_rsc_fail_attrs(pe_resource_t *rsc, const char *operation,
                      const char *interval_spec, pe_node_t *node)
 {
     int rc = pcmk_rc_ok;
     int attr_options = pcmk__node_attr_none;
     char *rsc_name = rsc_fail_name(rsc);
 
     if (pe__is_guest_or_remote_node(node)) {
         attr_options |= pcmk__node_attr_remote;
     }
     rc = pcmk__node_attr_request_clear(NULL, node->details->uname, rsc_name,
                                        operation, interval_spec, NULL,
                                        attr_options);
     free(rsc_name);
     return rc;
 }
 
 // \return Standard Pacemaker return code
 int
 cli_resource_delete(pcmk__output_t *out, pcmk_ipc_api_t *controld_api,
                     const char *host_uname, pe_resource_t *rsc, const char *operation,
                     const char *interval_spec, bool just_failures,
                     pe_working_set_t *data_set, gboolean force)
 {
     int rc = pcmk_rc_ok;
     pe_node_t *node = NULL;
 
     if (rsc == NULL) {
         return ENXIO;
 
     } else if (rsc->children) {
         GListPtr lpc = NULL;
 
         for (lpc = rsc->children; lpc != NULL; lpc = lpc->next) {
             pe_resource_t *child = (pe_resource_t *) lpc->data;
 
             rc = cli_resource_delete(out, controld_api, host_uname, child, operation,
                                      interval_spec, just_failures, data_set,
                                      force);
             if (rc != pcmk_rc_ok) {
                 return rc;
             }
         }
         return pcmk_rc_ok;
 
     } else if (host_uname == NULL) {
         GListPtr lpc = NULL;
         GListPtr nodes = g_hash_table_get_values(rsc->known_on);
 
         if(nodes == NULL && force) {
             nodes = pcmk__copy_node_list(data_set->nodes, false);
 
         } else if(nodes == NULL && rsc->exclusive_discover) {
             GHashTableIter iter;
             pe_node_t *node = NULL;
 
             g_hash_table_iter_init(&iter, rsc->allowed_nodes);
             while (g_hash_table_iter_next(&iter, NULL, (void**)&node)) {
                 if(node->weight >= 0) {
                     nodes = g_list_prepend(nodes, node);
                 }
             }
 
         } else if(nodes == NULL) {
             nodes = g_hash_table_get_values(rsc->allowed_nodes);
         }
 
         for (lpc = nodes; lpc != NULL; lpc = lpc->next) {
             node = (pe_node_t *) lpc->data;
 
             if (node->details->online) {
                 rc = cli_resource_delete(out, controld_api, node->details->uname,
                                          rsc, operation, interval_spec,
                                          just_failures, data_set, force);
             }
             if (rc != pcmk_rc_ok) {
                 g_list_free(nodes);
                 return rc;
             }
         }
 
         g_list_free(nodes);
         return pcmk_rc_ok;
     }
 
     node = pe_find_node(data_set->nodes, host_uname);
 
     if (node == NULL) {
         out->err(out, "Unable to clean up %s because node %s not found",
                  rsc->id, host_uname);
         return ENODEV;
     }
 
     if (!node->details->rsc_discovery_enabled) {
         out->err(out, "Unable to clean up %s because resource discovery disabled on %s",
                  rsc->id, host_uname);
         return EOPNOTSUPP;
     }
 
     if (controld_api == NULL) {
         out->err(out, "Dry run: skipping clean-up of %s on %s due to CIB_file",
                  rsc->id, host_uname);
         return pcmk_rc_ok;
     }
 
     rc = clear_rsc_fail_attrs(rsc, operation, interval_spec, node);
     if (rc != pcmk_rc_ok) {
         out->err(out, "Unable to clean up %s failures on %s: %s",
                  rsc->id, host_uname, pcmk_rc_str(rc));
         return rc;
     }
 
     if (just_failures) {
         rc = clear_rsc_failures(out, controld_api, host_uname, rsc->id, operation,
                                 interval_spec, data_set);
     } else {
         rc = clear_rsc_history(out, controld_api, host_uname, rsc->id, data_set);
     }
     if (rc != pcmk_rc_ok) {
         out->err(out, "Cleaned %s failures on %s, but unable to clean history: %s",
                  rsc->id, host_uname, pcmk_strerror(rc));
     } else {
         out->info(out, "Cleaned up %s on %s", rsc->id, host_uname);
     }
     return rc;
 }
 
 // \return Standard Pacemaker return code
 int
 cli_cleanup_all(pcmk__output_t *out, pcmk_ipc_api_t *controld_api,
                 const char *node_name, const char *operation,
                 const char *interval_spec, pe_working_set_t *data_set)
 {
     int rc = pcmk_rc_ok;
     int attr_options = pcmk__node_attr_none;
     const char *display_name = node_name? node_name : "all nodes";
 
     if (controld_api == NULL) {
         out->info(out, "Dry run: skipping clean-up of %s due to CIB_file",
                   display_name);
         return rc;
     }
 
     if (node_name) {
         pe_node_t *node = pe_find_node(data_set->nodes, node_name);
 
         if (node == NULL) {
             out->err(out, "Unknown node: %s", node_name);
             return ENXIO;
         }
         if (pe__is_guest_or_remote_node(node)) {
             attr_options |= pcmk__node_attr_remote;
         }
     }
 
     rc = pcmk__node_attr_request_clear(NULL, node_name, NULL, operation,
                                        interval_spec, NULL, attr_options);
     if (rc != pcmk_rc_ok) {
         out->err(out, "Unable to clean up all failures on %s: %s",
                  display_name, pcmk_rc_str(rc));
         return rc;
     }
 
     if (node_name) {
         rc = clear_rsc_failures(out, controld_api, node_name, NULL,
                                 operation, interval_spec, data_set);
         if (rc != pcmk_rc_ok) {
             out->err(out, "Cleaned all resource failures on %s, but unable to clean history: %s",
                      node_name, pcmk_strerror(rc));
             return rc;
         }
     } else {
         for (GList *iter = data_set->nodes; iter; iter = iter->next) {
             pe_node_t *node = (pe_node_t *) iter->data;
 
             rc = clear_rsc_failures(out, controld_api, node->details->uname, NULL,
                                     operation, interval_spec, data_set);
             if (rc != pcmk_rc_ok) {
                 out->err(out, "Cleaned all resource failures on all nodes, but unable to clean history: %s",
                          pcmk_strerror(rc));
                 return rc;
             }
         }
     }
 
     out->info(out, "Cleaned up all resources on %s", display_name);
     return rc;
 }
 
 int
 cli_resource_check(pcmk__output_t *out, cib_t * cib_conn, pe_resource_t *rsc)
 {
     char *role_s = NULL;
     char *managed = NULL;
     pe_resource_t *parent = uber_parent(rsc);
     int rc = pcmk_rc_no_output;
     resource_checks_t *checks = NULL;
 
     find_resource_attr(out, cib_conn, XML_NVPAIR_ATTR_VALUE, parent->id,
                        NULL, NULL, NULL, XML_RSC_ATTR_MANAGED, &managed);
 
     find_resource_attr(out, cib_conn, XML_NVPAIR_ATTR_VALUE, parent->id,
                        NULL, NULL, NULL, XML_RSC_ATTR_TARGET_ROLE, &role_s);
 
     checks = cli_check_resource(rsc, role_s, managed);
 
     if (checks->flags != 0 || checks->lock_node != NULL) {
         rc = out->message(out, "resource-check-list", checks);
     }
 
     free(role_s);
     free(managed);
     free(checks);
     return rc;
 }
 
 // \return Standard Pacemaker return code
 int
 cli_resource_fail(pcmk__output_t *out, pcmk_ipc_api_t *controld_api,
                   const char *host_uname, const char *rsc_id,
                   pe_working_set_t *data_set)
 {
     crm_notice("Failing %s on %s", rsc_id, host_uname);
     return send_lrm_rsc_op(out, controld_api, true, host_uname, rsc_id, data_set);
 }
 
 static GHashTable *
-generate_resource_params(pe_resource_t * rsc, pe_working_set_t * data_set)
+generate_resource_params(pe_resource_t *rsc, pe_node_t *node,
+                         pe_working_set_t *data_set)
 {
     GHashTable *params = NULL;
     GHashTable *meta = NULL;
     GHashTable *combined = NULL;
     GHashTableIter iter;
+    char *key = NULL;
+    char *value = NULL;
 
-    if (!rsc) {
-        crm_err("Resource does not exist in config");
-        return NULL;
-    }
-
-    params = crm_str_table_new();
-    meta = crm_str_table_new();
     combined = crm_str_table_new();
 
-    get_rsc_attributes(params, rsc, NULL /* TODO: Pass in local node */ , data_set);
-    get_meta_attributes(meta, rsc, NULL /* TODO: Pass in local node */ , data_set);
-
-    if (params) {
-        char *key = NULL;
-        char *value = NULL;
-
+    params = pe_rsc_params(rsc, node, data_set);
+    if (params != NULL) {
         g_hash_table_iter_init(&iter, params);
         while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) {
             g_hash_table_insert(combined, strdup(key), strdup(value));
         }
-        g_hash_table_destroy(params);
     }
 
-    if (meta) {
-        char *key = NULL;
-        char *value = NULL;
-
+    meta = crm_str_table_new();
+    get_meta_attributes(meta, rsc, node, data_set);
+    if (meta != NULL) {
         g_hash_table_iter_init(&iter, meta);
         while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) {
             char *crm_name = crm_meta_name(key);
 
             g_hash_table_insert(combined, crm_name, strdup(value));
         }
         g_hash_table_destroy(meta);
     }
 
     return combined;
 }
 
 bool resource_is_running_on(pe_resource_t *rsc, const char *host)
 {
     bool found = TRUE;
     GListPtr hIter = NULL;
     GListPtr hosts = NULL;
 
     if(rsc == NULL) {
         return FALSE;
     }
 
     rsc->fns->location(rsc, &hosts, TRUE);
     for (hIter = hosts; host != NULL && hIter != NULL; hIter = hIter->next) {
         pe_node_t *node = (pe_node_t *) hIter->data;
 
         if(strcmp(host, node->details->uname) == 0) {
             crm_trace("Resource %s is running on %s\n", rsc->id, host);
             goto done;
         } else if(strcmp(host, node->details->id) == 0) {
             crm_trace("Resource %s is running on %s\n", rsc->id, host);
             goto done;
         }
     }
 
     if(host != NULL) {
         crm_trace("Resource %s is not running on: %s\n", rsc->id, host);
         found = FALSE;
 
     } else if(host == NULL && hosts == NULL) {
         crm_trace("Resource %s is not running\n", rsc->id);
         found = FALSE;
     }
 
   done:
 
     g_list_free(hosts);
     return found;
 }
 
 /*!
  * \internal
  * \brief Create a list of all resources active on host from a given list
  *
  * \param[in] host      Name of host to check whether resources are active
  * \param[in] rsc_list  List of resources to check
  *
  * \return New list of resources from list that are active on host
  */
 static GList *
 get_active_resources(const char *host, GList *rsc_list)
 {
     GList *rIter = NULL;
     GList *active = NULL;
 
     for (rIter = rsc_list; rIter != NULL; rIter = rIter->next) {
         pe_resource_t *rsc = (pe_resource_t *) rIter->data;
 
         /* Expand groups to their members, because if we're restarting a member
          * other than the first, we can't otherwise tell which resources are
          * stopping and starting.
          */
         if (rsc->variant == pe_group) {
             active = g_list_concat(active,
                                    get_active_resources(host, rsc->children));
         } else if (resource_is_running_on(rsc, host)) {
             active = g_list_append(active, strdup(rsc->id));
         }
     }
     return active;
 }
 
 static void dump_list(GList *items, const char *tag) 
 {
     int lpc = 0;
     GList *item = NULL;
 
     for (item = items; item != NULL; item = item->next) {
         crm_trace("%s[%d]: %s", tag, lpc, (char*)item->data);
         lpc++;
     }
 }
 
 static void display_list(pcmk__output_t *out, GList *items, const char *tag)
 {
     GList *item = NULL;
 
     for (item = items; item != NULL; item = item->next) {
         out->info(out, "%s%s", tag, (const char *)item->data);
     }
 }
 
 /*!
  * \internal
  * \brief Upgrade XML to latest schema version and use it as working set input
  *
  * This also updates the working set timestamp to the current time.
  *
  * \param[in] data_set   Working set instance to update
  * \param[in] xml        XML to use as input
  *
  * \return Standard Pacemaker return code
  * \note On success, caller is responsible for freeing memory allocated for
  *       data_set->now.
  * \todo This follows the example of other callers of cli_config_update()
  *       and returns ENOKEY ("Required key not available") if that fails,
  *       but perhaps pcmk_rc_schema_validation would be better in that case.
  */
 int
 update_working_set_xml(pe_working_set_t *data_set, xmlNode **xml)
 {
     if (cli_config_update(xml, NULL, FALSE) == FALSE) {
         return ENOKEY;
     }
     data_set->input = *xml;
     data_set->now = crm_time_new(NULL);
     return pcmk_rc_ok;
 }
 
 /*!
  * \internal
  * \brief Update a working set's XML input based on a CIB query
  *
  * \param[in] data_set   Data set instance to initialize
  * \param[in] cib        Connection to the CIB manager
  *
  * \return Standard Pacemaker return code
  * \note On success, caller is responsible for freeing memory allocated for
  *       data_set->input and data_set->now.
  */
 static int
 update_working_set_from_cib(pcmk__output_t *out, pe_working_set_t * data_set,
                             cib_t *cib)
 {
     xmlNode *cib_xml_copy = NULL;
     int rc = pcmk_rc_ok;
 
     rc = cib->cmds->query(cib, NULL, &cib_xml_copy, cib_scope_local | cib_sync_call);
     rc = pcmk_legacy2rc(rc);
 
     if (rc != pcmk_rc_ok) {
         out->err(out, "Could not obtain the current CIB: %s (%d)", pcmk_strerror(rc), rc);
         return rc;
     }
     rc = update_working_set_xml(data_set, &cib_xml_copy);
     if (rc != pcmk_rc_ok) {
         out->err(out, "Could not upgrade the current CIB XML");
         free_xml(cib_xml_copy);
         return rc;
     }
 
     return rc;
 }
 
 // \return Standard Pacemaker return code
 static int
 update_dataset(pcmk__output_t *out, cib_t *cib, pe_working_set_t * data_set,
                bool simulate)
 {
     char *pid = NULL;
     char *shadow_file = NULL;
     cib_t *shadow_cib = NULL;
     int rc = pcmk_rc_ok;
 
     pe_reset_working_set(data_set);
     rc = update_working_set_from_cib(out, data_set, cib);
     if (rc != pcmk_rc_ok) {
         return rc;
     }
 
     if(simulate) {
         pid = pcmk__getpid_s();
         shadow_cib = cib_shadow_new(pid);
         shadow_file = get_shadow_file(pid);
 
         if (shadow_cib == NULL) {
             out->err(out, "Could not create shadow cib: '%s'", pid);
             rc = ENXIO;
             goto cleanup;
         }
 
         rc = write_xml_file(data_set->input, shadow_file, FALSE);
 
         if (rc < 0) {
             out->err(out, "Could not populate shadow cib: %s (%d)", pcmk_strerror(rc), rc);
             goto cleanup;
         }
 
         rc = shadow_cib->cmds->signon(shadow_cib, crm_system_name, cib_command);
         rc = pcmk_legacy2rc(rc);
 
         if (rc != pcmk_rc_ok) {
             out->err(out, "Could not connect to shadow cib: %s (%d)", pcmk_strerror(rc), rc);
             goto cleanup;
         }
 
         pcmk__schedule_actions(data_set, data_set->input, NULL);
         run_simulation(data_set, shadow_cib, NULL, TRUE);
         rc = update_dataset(out, shadow_cib, data_set, FALSE);
 
     } else {
         cluster_status(data_set);
     }
 
   cleanup:
     /* Do not free data_set->input here, we need rsc->xml to be valid later on */
     cib_delete(shadow_cib);
     free(pid);
 
     if(shadow_file) {
         unlink(shadow_file);
         free(shadow_file);
     }
 
     return rc;
 }
 
 static int
 max_delay_for_resource(pe_working_set_t * data_set, pe_resource_t *rsc) 
 {
     int delay = 0;
     int max_delay = 0;
 
     if(rsc && rsc->children) {
         GList *iter = NULL;
 
         for(iter = rsc->children; iter; iter = iter->next) {
             pe_resource_t *child = (pe_resource_t *)iter->data;
 
             delay = max_delay_for_resource(data_set, child);
             if(delay > max_delay) {
                 double seconds = delay / 1000.0;
                 crm_trace("Calculated new delay of %.1fs due to %s", seconds, child->id);
                 max_delay = delay;
             }
         }
 
     } else if(rsc) {
         char *key = crm_strdup_printf("%s_%s_0", rsc->id, RSC_STOP);
         pe_action_t *stop = custom_action(rsc, key, RSC_STOP, NULL, TRUE, FALSE, data_set);
         const char *value = g_hash_table_lookup(stop->meta, XML_ATTR_TIMEOUT);
 
         max_delay = value? (int) crm_parse_ll(value, NULL) : -1;
         pe_free_action(stop);
     }
 
 
     return max_delay;
 }
 
 static int
 max_delay_in(pe_working_set_t * data_set, GList *resources) 
 {
     int max_delay = 0;
     GList *item = NULL;
 
     for (item = resources; item != NULL; item = item->next) {
         int delay = 0;
         pe_resource_t *rsc = pe_find_resource(data_set->resources, (const char *)item->data);
 
         delay = max_delay_for_resource(data_set, rsc);
 
         if(delay > max_delay) {
             double seconds = delay / 1000.0;
             crm_trace("Calculated new delay of %.1fs due to %s", seconds, rsc->id);
             max_delay = delay;
         }
     }
 
     return 5 + (max_delay / 1000);
 }
 
 #define waiting_for_starts(d, r, h) ((d != NULL) || \
                                     (resource_is_running_on((r), (h)) == FALSE))
 
 /*!
  * \internal
  * \brief Restart a resource (on a particular host if requested).
  *
  * \param[in] rsc        The resource to restart
  * \param[in] host       The host to restart the resource on (or NULL for all)
  * \param[in] timeout_ms Consider failed if actions do not complete in this time
  *                       (specified in milliseconds, but a two-second
  *                       granularity is actually used; if 0, a timeout will be
  *                       calculated based on the resource timeout)
  * \param[in] cib        Connection to the CIB manager
  *
  * \return Standard Pacemaker return code (exits on certain failures)
  */
 int
 cli_resource_restart(pcmk__output_t *out, pe_resource_t *rsc, const char *host,
                      const char *move_lifetime, int timeout_ms, cib_t *cib,
                      int cib_options, gboolean promoted_role_only, gboolean force)
 {
     int rc = pcmk_rc_ok;
     int lpc = 0;
     int before = 0;
     int step_timeout_s = 0;
     int sleep_interval = 2;
     int timeout = timeout_ms / 1000;
 
     bool stop_via_ban = FALSE;
     char *rsc_id = NULL;
     char *orig_target_role = NULL;
 
     GList *list_delta = NULL;
     GList *target_active = NULL;
     GList *current_active = NULL;
     GList *restart_target_active = NULL;
 
     pe_working_set_t *data_set = NULL;
 
     if(resource_is_running_on(rsc, host) == FALSE) {
         const char *id = rsc->clone_name?rsc->clone_name:rsc->id;
         if(host) {
             out->err(out, "%s is not running on %s and so cannot be restarted", id, host);
         } else {
             out->err(out, "%s is not running anywhere and so cannot be restarted", id);
         }
         return ENXIO;
     }
 
     rsc_id = strdup(rsc->id);
     if ((pe_rsc_is_clone(rsc) || pe_bundle_replicas(rsc)) && host) {
         stop_via_ban = TRUE;
     }
 
     /*
       grab full cib
       determine originally active resources
       disable or ban
       poll cib and watch for affected resources to get stopped
       without --timeout, calculate the stop timeout for each step and wait for that
       if we hit --timeout or the service timeout, re-enable or un-ban, report failure and indicate which resources we couldn't take down
       if everything stopped, re-enable or un-ban
       poll cib and watch for affected resources to get started
       without --timeout, calculate the start timeout for each step and wait for that
       if we hit --timeout or the service timeout, report (different) failure and indicate which resources we couldn't bring back up
       report success
 
       Optimizations:
       - use constraints to determine ordered list of affected resources
       - Allow a --no-deps option (aka. --force-restart)
     */
 
     data_set = pe_new_working_set();
     if (data_set == NULL) {
         crm_perror(LOG_ERR, "Could not allocate working set");
         rc = ENOMEM;
         goto done;
     }
     pe__set_working_set_flags(data_set, pe_flag_no_counts|pe_flag_no_compat);
     rc = update_dataset(out, cib, data_set, FALSE);
     if(rc != pcmk_rc_ok) {
         out->err(out, "Could not get new resource list: %s (%d)", pcmk_strerror(rc), rc);
         goto done;
     }
 
     restart_target_active = get_active_resources(host, data_set->resources);
     current_active = get_active_resources(host, data_set->resources);
 
     dump_list(current_active, "Origin");
 
     if (stop_via_ban) {
         /* Stop the clone or bundle instance by banning it from the host */
         out->quiet = true;
         rc = cli_resource_ban(out, rsc_id, host, move_lifetime, NULL, cib,
                               cib_options, promoted_role_only);
 
     } else {
         /* Stop the resource by setting target-role to Stopped.
          * Remember any existing target-role so we can restore it later
          * (though it only makes any difference if it's Slave).
          */
         char *lookup_id = clone_strip(rsc->id);
 
         find_resource_attr(out, cib, XML_NVPAIR_ATTR_VALUE, lookup_id, NULL, NULL,
                            NULL, XML_RSC_ATTR_TARGET_ROLE, &orig_target_role);
         free(lookup_id);
         rc = cli_resource_update_attribute(out, rsc, rsc_id, NULL, XML_TAG_META_SETS,
                                            NULL, XML_RSC_ATTR_TARGET_ROLE,
                                            RSC_STOPPED, FALSE, cib, cib_options,
                                            data_set, force);
     }
     if(rc != pcmk_rc_ok) {
         out->err(out, "Could not set target-role for %s: %s (%d)", rsc_id, pcmk_strerror(rc), rc);
         if (current_active) {
             g_list_free_full(current_active, free);
         }
         if (restart_target_active) {
             g_list_free_full(restart_target_active, free);
         }
         goto done;
     }
 
     rc = update_dataset(out, cib, data_set, TRUE);
     if(rc != pcmk_rc_ok) {
         out->err(out, "Could not determine which resources would be stopped");
         goto failure;
     }
 
     target_active = get_active_resources(host, data_set->resources);
     dump_list(target_active, "Target");
 
     list_delta = pcmk__subtract_lists(current_active, target_active, (GCompareFunc) strcmp);
     out->info(out, "Waiting for %d resources to stop:", g_list_length(list_delta));
     display_list(out, list_delta, " * ");
 
     step_timeout_s = timeout / sleep_interval;
     while (list_delta != NULL) {
         before = g_list_length(list_delta);
         if(timeout_ms == 0) {
             step_timeout_s = max_delay_in(data_set, list_delta) / sleep_interval;
         }
 
         /* We probably don't need the entire step timeout */
         for(lpc = 0; (lpc < step_timeout_s) && (list_delta != NULL); lpc++) {
             sleep(sleep_interval);
             if(timeout) {
                 timeout -= sleep_interval;
                 crm_trace("%ds remaining", timeout);
             }
             rc = update_dataset(out, cib, data_set, FALSE);
             if(rc != pcmk_rc_ok) {
                 out->err(out, "Could not determine which resources were stopped");
                 goto failure;
             }
 
             if (current_active) {
                 g_list_free_full(current_active, free);
             }
             current_active = get_active_resources(host, data_set->resources);
             g_list_free(list_delta);
             list_delta = pcmk__subtract_lists(current_active, target_active, (GCompareFunc) strcmp);
             dump_list(current_active, "Current");
             dump_list(list_delta, "Delta");
         }
 
         crm_trace("%d (was %d) resources remaining", g_list_length(list_delta), before);
         if(before == g_list_length(list_delta)) {
             /* aborted during stop phase, print the contents of list_delta */
             out->info(out, "Could not complete shutdown of %s, %d resources remaining", rsc_id, g_list_length(list_delta));
             display_list(out, list_delta, " * ");
             rc = ETIME;
             goto failure;
         }
 
     }
 
     if (stop_via_ban) {
         rc = cli_resource_clear(rsc_id, host, NULL, cib, cib_options, TRUE, force);
 
     } else if (orig_target_role) {
         rc = cli_resource_update_attribute(out, rsc, rsc_id, NULL, XML_TAG_META_SETS,
                                            NULL, XML_RSC_ATTR_TARGET_ROLE,
                                            orig_target_role, FALSE, cib,
                                            cib_options, data_set, force);
         free(orig_target_role);
         orig_target_role = NULL;
     } else {
         rc = cli_resource_delete_attribute(out, rsc, rsc_id, NULL, XML_TAG_META_SETS,
                                            NULL, XML_RSC_ATTR_TARGET_ROLE, cib,
                                            cib_options, data_set, force);
     }
 
     if(rc != pcmk_rc_ok) {
         out->err(out, "Could not unset target-role for %s: %s (%d)", rsc_id, pcmk_strerror(rc), rc);
         goto done;
     }
 
     if (target_active) {
         g_list_free_full(target_active, free);
     }
     target_active = restart_target_active;
     list_delta = pcmk__subtract_lists(target_active, current_active, (GCompareFunc) strcmp);
     out->info(out, "Waiting for %d resources to start again:", g_list_length(list_delta));
     display_list(out, list_delta, " * ");
 
     step_timeout_s = timeout / sleep_interval;
     while (waiting_for_starts(list_delta, rsc, host)) {
         before = g_list_length(list_delta);
         if(timeout_ms == 0) {
             step_timeout_s = max_delay_in(data_set, list_delta) / sleep_interval;
         }
 
         /* We probably don't need the entire step timeout */
         for (lpc = 0; (lpc < step_timeout_s) && waiting_for_starts(list_delta, rsc, host); lpc++) {
 
             sleep(sleep_interval);
             if(timeout) {
                 timeout -= sleep_interval;
                 crm_trace("%ds remaining", timeout);
             }
 
             rc = update_dataset(out, cib, data_set, FALSE);
             if(rc != pcmk_rc_ok) {
                 out->err(out, "Could not determine which resources were started");
                 goto failure;
             }
 
             if (current_active) {
                 g_list_free_full(current_active, free);
             }
 
             /* It's OK if dependent resources moved to a different node,
              * so we check active resources on all nodes.
              */
             current_active = get_active_resources(NULL, data_set->resources);
             g_list_free(list_delta);
             list_delta = pcmk__subtract_lists(target_active, current_active, (GCompareFunc) strcmp);
             dump_list(current_active, "Current");
             dump_list(list_delta, "Delta");
         }
 
         if(before == g_list_length(list_delta)) {
             /* aborted during start phase, print the contents of list_delta */
             out->info(out, "Could not complete restart of %s, %d resources remaining", rsc_id, g_list_length(list_delta));
             display_list(out, list_delta, " * ");
             rc = ETIME;
             goto failure;
         }
 
     }
 
     rc = pcmk_rc_ok;
     goto done;
 
   failure:
     if (stop_via_ban) {
         cli_resource_clear(rsc_id, host, NULL, cib, cib_options, TRUE, force);
     } else if (orig_target_role) {
         cli_resource_update_attribute(out, rsc, rsc_id, NULL, XML_TAG_META_SETS, NULL,
                                       XML_RSC_ATTR_TARGET_ROLE, orig_target_role,
                                       FALSE, cib, cib_options, data_set, force);
         free(orig_target_role);
     } else {
         cli_resource_delete_attribute(out, rsc, rsc_id, NULL, XML_TAG_META_SETS, NULL,
                                       XML_RSC_ATTR_TARGET_ROLE, cib, cib_options,
                                       data_set, force);
     }
 
 done:
     if (list_delta) {
         g_list_free(list_delta);
     }
     if (current_active) {
         g_list_free_full(current_active, free);
     }
     if (target_active && (target_active != restart_target_active)) {
         g_list_free_full(target_active, free);
     }
     if (restart_target_active) {
         g_list_free_full(restart_target_active, free);
     }
     free(rsc_id);
     pe_free_working_set(data_set);
     return rc;
 }
 
 static inline bool action_is_pending(pe_action_t *action)
 {
     if (pcmk_any_flags_set(action->flags, pe_action_optional|pe_action_pseudo)
         || !pcmk_is_set(action->flags, pe_action_runnable)
         || pcmk__str_eq("notify", action->task, pcmk__str_casei)) {
         return false;
     }
     return true;
 }
 
 /*!
  * \internal
  * \brief Return TRUE if any actions in a list are pending
  *
  * \param[in] actions   List of actions to check
  *
  * \return TRUE if any actions in the list are pending, FALSE otherwise
  */
 static bool
 actions_are_pending(GListPtr actions)
 {
     GListPtr action;
 
     for (action = actions; action != NULL; action = action->next) {
         pe_action_t *a = (pe_action_t *)action->data;
         if (action_is_pending(a)) {
             crm_notice("Waiting for %s (flags=0x%.8x)", a->uuid, a->flags);
             return TRUE;
         }
     }
     return FALSE;
 }
 
 static void
 print_pending_actions(pcmk__output_t *out, GListPtr actions)
 {
     GListPtr action;
 
     out->info(out, "Pending actions:");
     for (action = actions; action != NULL; action = action->next) {
         pe_action_t *a = (pe_action_t *) action->data;
 
         if (!action_is_pending(a)) {
             continue;
         }
 
         if (a->node) {
             out->info(out, "\tAction %d: %s\ton %s", a->id, a->uuid, a->node->details->uname);
         } else {
             out->info(out, "\tAction %d: %s", a->id, a->uuid);
         }
     }
 }
 
 /* For --wait, timeout (in seconds) to use if caller doesn't specify one */
 #define WAIT_DEFAULT_TIMEOUT_S (60 * 60)
 
 /* For --wait, how long to sleep between cluster state checks */
 #define WAIT_SLEEP_S (2)
 
 /*!
  * \internal
  * \brief Wait until all pending cluster actions are complete
  *
  * This waits until either the CIB's transition graph is idle or a timeout is
  * reached.
  *
  * \param[in] timeout_ms Consider failed if actions do not complete in this time
  *                       (specified in milliseconds, but one-second granularity
  *                       is actually used; if 0, a default will be used)
  * \param[in] cib        Connection to the CIB manager
  *
  * \return Standard Pacemaker return code
  */
 int
 wait_till_stable(pcmk__output_t *out, int timeout_ms, cib_t * cib)
 {
     pe_working_set_t *data_set = NULL;
     int rc = pcmk_rc_ok;
     int timeout_s = timeout_ms? ((timeout_ms + 999) / 1000) : WAIT_DEFAULT_TIMEOUT_S;
     time_t expire_time = time(NULL) + timeout_s;
     time_t time_diff;
     bool printed_version_warning = out->is_quiet(out); // i.e. don't print if quiet
 
     data_set = pe_new_working_set();
     if (data_set == NULL) {
         return ENOMEM;
     }
     pe__set_working_set_flags(data_set, pe_flag_no_counts|pe_flag_no_compat);
 
     do {
 
         /* Abort if timeout is reached */
         time_diff = expire_time - time(NULL);
         if (time_diff > 0) {
             crm_info("Waiting up to %ld seconds for cluster actions to complete", time_diff);
         } else {
             print_pending_actions(out, data_set->actions);
             pe_free_working_set(data_set);
             return ETIME;
         }
         if (rc == pcmk_rc_ok) { /* this avoids sleep on first loop iteration */
             sleep(WAIT_SLEEP_S);
         }
 
         /* Get latest transition graph */
         pe_reset_working_set(data_set);
         rc = update_working_set_from_cib(out, data_set, cib);
         if (rc != pcmk_rc_ok) {
             pe_free_working_set(data_set);
             return rc;
         }
         pcmk__schedule_actions(data_set, data_set->input, NULL);
 
         if (!printed_version_warning) {
             /* If the DC has a different version than the local node, the two
              * could come to different conclusions about what actions need to be
              * done. Warn the user in this case.
              *
              * @TODO A possible long-term solution would be to reimplement the
              * wait as a new controller operation that would be forwarded to the
              * DC. However, that would have potential problems of its own.
              */
             const char *dc_version = g_hash_table_lookup(data_set->config_hash,
                                                          "dc-version");
 
             if (!pcmk__str_eq(dc_version, PACEMAKER_VERSION "-" BUILD_VERSION, pcmk__str_casei)) {
                 out->info(out, "warning: wait option may not work properly in "
                           "mixed-version cluster");
                 printed_version_warning = TRUE;
             }
         }
 
     } while (actions_are_pending(data_set->actions));
 
     pe_free_working_set(data_set);
     return rc;
 }
 
 crm_exit_t
 cli_resource_execute_from_params(pcmk__output_t *out, const char *rsc_name,
                                  const char *rsc_class, const char *rsc_prov,
                                  const char *rsc_type, const char *action,
                                  GHashTable *params, GHashTable *override_hash,
                                  int timeout_ms, int resource_verbose, gboolean force)
 {
     GHashTable *params_copy = NULL;
     crm_exit_t exit_code = CRM_EX_OK;
     svc_action_t *op = NULL;
 
     if (pcmk__str_eq(rsc_class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) {
         out->err(out, "Sorry, the %s option doesn't support %s resources yet",
                  action, rsc_class);
         crm_exit(CRM_EX_UNIMPLEMENT_FEATURE);
     }
 
     /* If no timeout was provided, grab the default. */
     if (timeout_ms == 0) {
         timeout_ms = crm_get_msec(CRM_DEFAULT_OP_TIMEOUT_S);
     }
 
     /* add meta_timeout env needed by some resource agents */
     g_hash_table_insert(params, strdup("CRM_meta_timeout"),
                         crm_strdup_printf("%d", timeout_ms));
 
     /* add crm_feature_set env needed by some resource agents */
     g_hash_table_insert(params, strdup(XML_ATTR_CRM_VERSION), strdup(CRM_FEATURE_SET));
 
     /* resources_action_create frees the params hash table it's passed, but we
      * may need to reuse it in a second call to resources_action_create.  Thus
      * we'll make a copy here so that gets freed and the original remains for
      * reuse.
      */
     params_copy = crm_str_table_dup(params);
 
     op = resources_action_create(rsc_name, rsc_class, rsc_prov, rsc_type, action, 0,
                                  timeout_ms, params_copy, 0);
     if (op == NULL) {
         /* Re-run with stderr enabled so we can display a sane error message */
         crm_enable_stderr(TRUE);
         params_copy = crm_str_table_dup(params);
         op = resources_action_create(rsc_name, rsc_class, rsc_prov, rsc_type, action, 0,
                                      timeout_ms, params_copy, 0);
 
         /* Callers of cli_resource_execute expect that the params hash table will
          * be freed.  That function uses this one, so for that reason and for
          * making the two act the same, we should free the hash table here too.
          */
         g_hash_table_destroy(params);
 
         /* We know op will be NULL, but this makes static analysis happy */
         services_action_free(op);
         crm_exit(CRM_EX_DATAERR);
         return exit_code; // Never reached, but helps static analysis
     }
 
     setenv("HA_debug", resource_verbose > 0 ? "1" : "0", 1);
     if(resource_verbose > 1) {
         setenv("OCF_TRACE_RA", "1", 1);
     }
 
     /* A resource agent using the standard ocf-shellfuncs library will not print
      * messages to stderr if it doesn't have a controlling terminal (e.g. if
      * crm_resource is called via script or ssh). This forces it to do so.
      */
     setenv("OCF_TRACE_FILE", "/dev/stderr", 0);
 
     if (override_hash) {
         GHashTableIter iter;
         char *name = NULL;
         char *value = NULL;
 
         g_hash_table_iter_init(&iter, override_hash);
         while (g_hash_table_iter_next(&iter, (gpointer *) & name, (gpointer *) & value)) {
             out->info(out, "Overriding the cluster configuration for '%s' with '%s' = '%s'",
                       rsc_name, name, value);
             g_hash_table_replace(op->params, strdup(name), strdup(value));
         }
     }
 
     if (services_action_sync(op)) {
         exit_code = op->rc;
 
         if (op->status == PCMK_LRM_OP_DONE) {
             out->info(out, "Operation %s for %s (%s:%s:%s) returned: '%s' (%d)",
                       action, rsc_name, rsc_class, rsc_prov ? rsc_prov : "", rsc_type,
                       services_ocf_exitcode_str(op->rc), op->rc);
         } else {
             out->info(out, "Operation %s for %s (%s:%s:%s) failed: '%s' (%d)",
                       action, rsc_name, rsc_class, rsc_prov ? rsc_prov : "", rsc_type,
                       services_lrm_status_str(op->status), op->status);
         }
 
         /* hide output for validate-all if not in verbose */
         if (resource_verbose == 0 && pcmk__str_eq(action, "validate-all", pcmk__str_casei))
             goto done;
 
         if (op->stdout_data || op->stderr_data) {
             out->subprocess_output(out, op->rc, op->stdout_data, op->stderr_data);
         }
     } else {
         exit_code = op->rc == 0 ? CRM_EX_ERROR : op->rc;
     }
 
 done:
     services_action_free(op);
     /* See comment above about why we free params here. */
     g_hash_table_destroy(params);
     return exit_code;
 }
 
 crm_exit_t
 cli_resource_execute(pcmk__output_t *out, pe_resource_t *rsc,
                      const char *requested_name, const char *rsc_action,
                      GHashTable *override_hash, int timeout_ms,
                      cib_t * cib, pe_working_set_t *data_set,
                      int resource_verbose, gboolean force)
 {
     crm_exit_t exit_code = CRM_EX_OK;
     const char *rid = NULL;
     const char *rtype = NULL;
     const char *rprov = NULL;
     const char *rclass = NULL;
     const char *action = NULL;
     GHashTable *params = NULL;
 
     if (pcmk__str_eq(rsc_action, "validate", pcmk__str_casei)) {
         action = "validate-all";
 
     } else if (pcmk__str_eq(rsc_action, "force-check", pcmk__str_casei)) {
         action = "monitor";
 
     } else if (pcmk__str_eq(rsc_action, "force-stop", pcmk__str_casei)) {
         action = rsc_action+6;
 
     } else if (pcmk__strcase_any_of(rsc_action, "force-start", "force-demote",
                                     "force-promote", NULL)) {
         action = rsc_action+6;
 
         if(pe_rsc_is_clone(rsc)) {
             GListPtr rscs = cli_resource_search(out, rsc, requested_name, data_set);
             if(rscs != NULL && force == FALSE) {
                 out->err(out, "It is not safe to %s %s here: the cluster claims it is already active",
                          action, rsc->id);
                 out->err(out, "Try setting target-role=Stopped first or specifying "
                          "the force option");
                 return CRM_EX_UNSAFE;
             }
         }
 
     } else {
         action = rsc_action;
     }
 
     if(pe_rsc_is_clone(rsc)) {
         /* Grab the first child resource in the hope it's not a group */
         rsc = rsc->children->data;
     }
 
     if(rsc->variant == pe_group) {
         out->err(out, "Sorry, the %s option doesn't support group resources", rsc_action);
         return CRM_EX_UNIMPLEMENT_FEATURE;
     }
 
     rclass = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
     rprov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER);
     rtype = crm_element_value(rsc->xml, XML_ATTR_TYPE);
 
-    params = generate_resource_params(rsc, data_set);
+    params = generate_resource_params(rsc, NULL /* @TODO use local node */,
+                                      data_set);
 
     if (timeout_ms == 0) {
         timeout_ms = pe_get_configured_timeout(rsc, action, data_set);
     }
 
     rid = pe_rsc_is_anon_clone(rsc->parent)? requested_name : rsc->id;
 
     exit_code = cli_resource_execute_from_params(out, rid, rclass, rprov, rtype, action,
                                                  params, override_hash, timeout_ms,
                                                  resource_verbose, force);
     return exit_code;
 }
 
 // \return Standard Pacemaker return code
 int
 cli_resource_move(pcmk__output_t *out, pe_resource_t *rsc, const char *rsc_id,
                   const char *host_name, const char *move_lifetime, cib_t *cib,
                   int cib_options, pe_working_set_t *data_set,
                   gboolean promoted_role_only, gboolean force)
 {
     int rc = pcmk_rc_ok;
     unsigned int count = 0;
     pe_node_t *current = NULL;
     pe_node_t *dest = pe_find_node(data_set->nodes, host_name);
     bool cur_is_dest = FALSE;
 
     if (dest == NULL) {
         return pcmk_rc_node_unknown;
     }
 
     if (promoted_role_only && !pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
         pe_resource_t *p = uber_parent(rsc);
 
         if (pcmk_is_set(p->flags, pe_rsc_promotable)) {
             out->info(out, "Using parent '%s' for move instead of '%s'.", rsc->id, rsc_id);
             rsc_id = p->id;
             rsc = p;
 
         } else {
             out->info(out, "Ignoring master option: %s is not promotable", rsc_id);
             promoted_role_only = FALSE;
         }
     }
 
     current = pe__find_active_requires(rsc, &count);
 
     if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
         GListPtr iter = NULL;
         unsigned int master_count = 0;
         pe_node_t *master_node = NULL;
 
         for(iter = rsc->children; iter; iter = iter->next) {
             pe_resource_t *child = (pe_resource_t *)iter->data;
             enum rsc_role_e child_role = child->fns->state(child, TRUE);
 
             if(child_role == RSC_ROLE_MASTER) {
                 rsc = child;
                 master_node = pe__current_node(child);
                 master_count++;
             }
         }
         if (promoted_role_only || master_count) {
             count = master_count;
             current = master_node;
         }
 
     }
 
     if (count > 1) {
         if (pe_rsc_is_clone(rsc)) {
             current = NULL;
         } else {
             return pcmk_rc_multiple;
         }
     }
 
     if (current && (current->details == dest->details)) {
         cur_is_dest = TRUE;
         if (force) {
             crm_info("%s is already %s on %s, reinforcing placement with location constraint.",
                      rsc_id, promoted_role_only?"promoted":"active", dest->details->uname);
         } else {
             return pcmk_rc_already;
         }
     }
 
     /* Clear any previous prefer constraints across all nodes. */
     cli_resource_clear(rsc_id, NULL, data_set->nodes, cib, cib_options, FALSE, force);
 
     /* Clear any previous ban constraints on 'dest'. */
     cli_resource_clear(rsc_id, dest->details->uname, data_set->nodes, cib,
                        cib_options, TRUE, force);
 
     /* Record an explicit preference for 'dest' */
     rc = cli_resource_prefer(out, rsc_id, dest->details->uname, move_lifetime,
                              cib, cib_options, promoted_role_only);
 
     crm_trace("%s%s now prefers node %s%s",
               rsc->id, promoted_role_only?" (master)":"", dest->details->uname, force?"(forced)":"");
 
     /* only ban the previous location if current location != destination location.
      * it is possible to use -M to enforce a location without regard of where the
      * resource is currently located */
     if(force && (cur_is_dest == FALSE)) {
         /* Ban the original location if possible */
         if(current) {
             (void)cli_resource_ban(out, rsc_id, current->details->uname, move_lifetime,
                                    NULL, cib, cib_options, promoted_role_only);
 
         } else if(count > 1) {
             out->info(out, "Resource '%s' is currently %s in %d locations. "
                       "One may now move to %s",
                       rsc_id, (promoted_role_only? "promoted" : "active"),
                       count, dest->details->uname);
             out->info(out, "To prevent '%s' from being %s at a specific location, "
                       "specify a node.",
                       rsc_id, (promoted_role_only? "promoted" : "active"));
 
         } else {
             crm_trace("Not banning %s from its current location: not active", rsc_id);
         }
     }
 
     return rc;
 }