diff --git a/cts/cli/regression.crm_mon.exp b/cts/cli/regression.crm_mon.exp
new file mode 100644
index 0000000000..65a9a21995
--- /dev/null
+++ b/cts/cli/regression.crm_mon.exp
@@ -0,0 +1,855 @@
+=#=#=#= Begin test: Basic text output =#=#=#=
+Cluster Summary:
+  * Stack: corosync
+  * Current DC: cluster02 (version) - partition with quorum
+  * Last updated:
+  * Last change:
+  * 5 nodes configured
+  * 19 resource instances configured (1 DISABLED)
+
+Node List:
+  * Online: [ cluster01 cluster02 ]
+
+Active Resources:
+  * Clone Set: ping-clone [ping]:
+    * Started: [ cluster01 cluster02 ]
+  * Fencing	(stonith:fence_xvm):	 Started cluster01
+  * dummy	(ocf::pacemaker:Dummy):	 Started cluster02
+=#=#=#= End test: Basic text output - OK (0) =#=#=#=
+* Passed: crm_mon        - Basic text output
+=#=#=#= Begin test: XML output =#=#=#=
+<pacemaker-result api-version="2.2" request="crm_mon --output-as=xml">
+  <summary>
+    <stack type="corosync"/>
+    <current_dc present="true" version="" with_quorum="true"/>
+    <last_update time=""/>
+    <last_change time=""/>
+    <nodes_configured number="5"/>
+    <resources_configured number="19" disabled="1" blocked="0"/>
+    <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
+  </summary>
+  <nodes>
+    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="2" type="member"/>
+    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="2" type="member"/>
+    <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
+    <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
+    <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
+  </nodes>
+  <resources>
+    <clone id="ping-clone" multi_state="false" unique="false" managed="true" failed="false" failure_ignored="false">
+      <resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+        <node name="cluster02" id="2" cached="true"/>
+      </resource>
+      <resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+        <node name="cluster01" id="1" cached="true"/>
+      </resource>
+    </clone>
+    <resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+      <node name="cluster01" id="1" cached="true"/>
+    </resource>
+    <resource id="dummy" resource_agent="ocf::pacemaker:Dummy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+      <node name="cluster02" id="2" cached="true"/>
+    </resource>
+    <resource id="inactive-dummy" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+    <clone id="inactive-clone-master" multi_state="true" unique="false" managed="true" failed="false" failure_ignored="false">
+      <resource id="inactive-clone" resource_agent="ocf::pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+      <resource id="inactive-clone" resource_agent="ocf::pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+    </clone>
+    <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
+      <replica id="0">
+        <resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+        <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+        <resource id="httpd-bundle-docker-0" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+        <resource id="httpd-bundle-0" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+      </replica>
+      <replica id="1">
+        <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+        <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+        <resource id="httpd-bundle-docker-1" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+        <resource id="httpd-bundle-1" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+      </replica>
+      <replica id="2">
+        <resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+        <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+        <resource id="httpd-bundle-docker-2" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+        <resource id="httpd-bundle-2" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+      </replica>
+    </bundle>
+  </resources>
+  <node_attributes>
+    <node name="cluster01">
+      <attribute name="location" value="office"/>
+      <attribute name="pingd" value="1000" expected="1000"/>
+    </node>
+    <node name="cluster02">
+      <attribute name="pingd" value="1000" expected="1000"/>
+    </node>
+  </node_attributes>
+  <node_history>
+    <node name="cluster02">
+      <resource_history id="ping" orphan="false" migration-threshold="1000000">
+        <operation_history call="11" task="start" exec-time="2044ms" queue-time="0ms" rc="0" rc_text="ok"/>
+        <operation_history call="12" task="monitor" interval="10000ms" exec-time="2031ms" queue-time="0ms" rc="0" rc_text="ok"/>
+      </resource_history>
+      <resource_history id="dummy" orphan="false" migration-threshold="1000000">
+        <operation_history call="18" task="start" exec-time="6020ms" queue-time="0ms" rc="0" rc_text="ok"/>
+        <operation_history call="19" task="monitor" interval="60000ms" exec-time="6015ms" queue-time="0ms" rc="0" rc_text="ok"/>
+      </resource_history>
+    </node>
+    <node name="cluster01">
+      <resource_history id="ping" orphan="false" migration-threshold="1000000">
+        <operation_history call="17" task="start" exec-time="2038ms" queue-time="0ms" rc="0" rc_text="ok"/>
+        <operation_history call="18" task="monitor" interval="10000ms" exec-time="2034ms" queue-time="0ms" rc="0" rc_text="ok"/>
+      </resource_history>
+      <resource_history id="Fencing" orphan="false" migration-threshold="1000000">
+        <operation_history call="15" task="start" exec-time="36ms" queue-time="0ms" rc="0" rc_text="ok"/>
+        <operation_history call="19" task="monitor" interval="60000ms" exec-time="24ms" queue-time="0ms" rc="0" rc_text="ok"/>
+      </resource_history>
+      <resource_history id="dummy" orphan="false" migration-threshold="1000000">
+        <operation_history call="16" task="stop" exec-time="6048ms" queue-time="0ms" rc="0" rc_text="ok"/>
+      </resource_history>
+    </node>
+  </node_history>
+  <status code="0" message="OK"/>
+</pacemaker-result>
+=#=#=#= End test: XML output - OK (0) =#=#=#=
+* Passed: crm_mon        - XML output
+=#=#=#= Begin test: Basic text output without node section =#=#=#=
+Cluster Summary:
+  * Stack: corosync
+  * Current DC: cluster02 (version) - partition with quorum
+  * Last updated:
+  * Last change:
+  * 5 nodes configured
+  * 19 resource instances configured (1 DISABLED)
+
+Active Resources:
+  * Clone Set: ping-clone [ping]:
+    * Started: [ cluster01 cluster02 ]
+  * Fencing	(stonith:fence_xvm):	 Started cluster01
+  * dummy	(ocf::pacemaker:Dummy):	 Started cluster02
+=#=#=#= End test: Basic text output without node section - OK (0) =#=#=#=
+* Passed: crm_mon        - Basic text output without node section
+=#=#=#= Begin test: XML output without the node section =#=#=#=
+<pacemaker-result api-version="2.2" request="crm_mon --output-as=xml --exclude=nodes">
+  <summary>
+    <stack type="corosync"/>
+    <current_dc present="true" version="" with_quorum="true"/>
+    <last_update time=""/>
+    <last_change time=""/>
+    <nodes_configured number="5"/>
+    <resources_configured number="19" disabled="1" blocked="0"/>
+    <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
+  </summary>
+  <resources>
+    <clone id="ping-clone" multi_state="false" unique="false" managed="true" failed="false" failure_ignored="false">
+      <resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+        <node name="cluster02" id="2" cached="true"/>
+      </resource>
+      <resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+        <node name="cluster01" id="1" cached="true"/>
+      </resource>
+    </clone>
+    <resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+      <node name="cluster01" id="1" cached="true"/>
+    </resource>
+    <resource id="dummy" resource_agent="ocf::pacemaker:Dummy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+      <node name="cluster02" id="2" cached="true"/>
+    </resource>
+    <resource id="inactive-dummy" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+    <clone id="inactive-clone-master" multi_state="true" unique="false" managed="true" failed="false" failure_ignored="false">
+      <resource id="inactive-clone" resource_agent="ocf::pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+      <resource id="inactive-clone" resource_agent="ocf::pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+    </clone>
+    <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
+      <replica id="0">
+        <resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+        <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+        <resource id="httpd-bundle-docker-0" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+        <resource id="httpd-bundle-0" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+      </replica>
+      <replica id="1">
+        <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+        <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+        <resource id="httpd-bundle-docker-1" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+        <resource id="httpd-bundle-1" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+      </replica>
+      <replica id="2">
+        <resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+        <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+        <resource id="httpd-bundle-docker-2" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+        <resource id="httpd-bundle-2" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+      </replica>
+    </bundle>
+  </resources>
+  <node_attributes>
+    <node name="cluster01">
+      <attribute name="location" value="office"/>
+      <attribute name="pingd" value="1000" expected="1000"/>
+    </node>
+    <node name="cluster02">
+      <attribute name="pingd" value="1000" expected="1000"/>
+    </node>
+  </node_attributes>
+  <node_history>
+    <node name="cluster02">
+      <resource_history id="ping" orphan="false" migration-threshold="1000000">
+        <operation_history call="11" task="start" exec-time="2044ms" queue-time="0ms" rc="0" rc_text="ok"/>
+        <operation_history call="12" task="monitor" interval="10000ms" exec-time="2031ms" queue-time="0ms" rc="0" rc_text="ok"/>
+      </resource_history>
+      <resource_history id="dummy" orphan="false" migration-threshold="1000000">
+        <operation_history call="18" task="start" exec-time="6020ms" queue-time="0ms" rc="0" rc_text="ok"/>
+        <operation_history call="19" task="monitor" interval="60000ms" exec-time="6015ms" queue-time="0ms" rc="0" rc_text="ok"/>
+      </resource_history>
+    </node>
+    <node name="cluster01">
+      <resource_history id="ping" orphan="false" migration-threshold="1000000">
+        <operation_history call="17" task="start" exec-time="2038ms" queue-time="0ms" rc="0" rc_text="ok"/>
+        <operation_history call="18" task="monitor" interval="10000ms" exec-time="2034ms" queue-time="0ms" rc="0" rc_text="ok"/>
+      </resource_history>
+      <resource_history id="Fencing" orphan="false" migration-threshold="1000000">
+        <operation_history call="15" task="start" exec-time="36ms" queue-time="0ms" rc="0" rc_text="ok"/>
+        <operation_history call="19" task="monitor" interval="60000ms" exec-time="24ms" queue-time="0ms" rc="0" rc_text="ok"/>
+      </resource_history>
+      <resource_history id="dummy" orphan="false" migration-threshold="1000000">
+        <operation_history call="16" task="stop" exec-time="6048ms" queue-time="0ms" rc="0" rc_text="ok"/>
+      </resource_history>
+    </node>
+  </node_history>
+  <status code="0" message="OK"/>
+</pacemaker-result>
+=#=#=#= End test: XML output without the node section - OK (0) =#=#=#=
+* Passed: crm_mon        - XML output without the node section
+=#=#=#= Begin test: Text output with only the node section =#=#=#=
+Node List:
+  * Online: [ cluster01 cluster02 ]
+=#=#=#= End test: Text output with only the node section - OK (0) =#=#=#=
+* Passed: crm_mon        - Text output with only the node section
+=#=#=#= Begin test: Complete text output =#=#=#=
+Cluster Summary:
+  * Stack: corosync
+  * Current DC: cluster02 (version) - partition with quorum
+  * Last updated:
+  * Last change:
+  * 5 nodes configured
+  * 19 resource instances configured (1 DISABLED)
+
+Node List:
+  * Online: [ cluster01 cluster02 ]
+
+Active Resources:
+  * Clone Set: ping-clone [ping]:
+    * Started: [ cluster01 cluster02 ]
+  * Fencing	(stonith:fence_xvm):	 Started cluster01
+  * dummy	(ocf::pacemaker:Dummy):	 Started cluster02
+
+Node Attributes:
+  * Node: cluster01:
+    * location                        	: office    
+    * pingd                           	: 1000      
+  * Node: cluster02:
+    * pingd                           	: 1000      
+
+Operations:
+  * Node: cluster02:
+    * ping: migration-threshold=1000000:
+      * (11) start
+      * (12) monitor: interval="10000ms"
+    * dummy: migration-threshold=1000000:
+      * (18) start
+      * (19) monitor: interval="60000ms"
+  * Node: cluster01:
+    * ping: migration-threshold=1000000:
+      * (17) start
+      * (18) monitor: interval="10000ms"
+    * Fencing: migration-threshold=1000000:
+      * (15) start
+      * (19) monitor: interval="60000ms"
+    * dummy: migration-threshold=1000000:
+      * (16) stop
+=#=#=#= End test: Complete text output - OK (0) =#=#=#=
+* Passed: crm_mon        - Complete text output
+=#=#=#= Begin test: Complete text output filtered by node =#=#=#=
+Cluster Summary:
+  * Stack: corosync
+  * Current DC: cluster02 (version) - partition with quorum
+  * Last updated:
+  * Last change:
+  * 5 nodes configured
+  * 19 resource instances configured (1 DISABLED)
+
+Node List:
+  * Online: [ cluster01 ]
+
+Active Resources:
+  * Clone Set: ping-clone [ping]:
+    * Started: [ cluster01 ]
+  * Fencing	(stonith:fence_xvm):	 Started cluster01
+
+Node Attributes:
+  * Node: cluster01:
+    * location                        	: office    
+    * pingd                           	: 1000      
+
+Operations:
+  * Node: cluster01:
+    * ping: migration-threshold=1000000:
+      * (17) start
+      * (18) monitor: interval="10000ms"
+    * Fencing: migration-threshold=1000000:
+      * (15) start
+      * (19) monitor: interval="60000ms"
+    * dummy: migration-threshold=1000000:
+      * (16) stop
+=#=#=#= End test: Complete text output filtered by node - OK (0) =#=#=#=
+* Passed: crm_mon        - Complete text output filtered by node
+=#=#=#= Begin test: XML output filtered by node =#=#=#=
+<pacemaker-result api-version="2.2" request="crm_mon --output-as xml --include=all --node=cluster01">
+  <summary>
+    <stack type="corosync"/>
+    <current_dc present="true" version="" with_quorum="true"/>
+    <last_update time=""/>
+    <last_change time=""/>
+    <nodes_configured number="5"/>
+    <resources_configured number="19" disabled="1" blocked="0"/>
+    <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
+  </summary>
+  <nodes>
+    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="2" type="member"/>
+  </nodes>
+  <resources>
+    <clone id="ping-clone" multi_state="false" unique="false" managed="true" failed="false" failure_ignored="false">
+      <resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+        <node name="cluster01" id="1" cached="true"/>
+      </resource>
+    </clone>
+    <resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+      <node name="cluster01" id="1" cached="true"/>
+    </resource>
+    <resource id="inactive-dummy" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+    <clone id="inactive-clone-master" multi_state="true" unique="false" managed="true" failed="false" failure_ignored="false">
+      <resource id="inactive-clone" resource_agent="ocf::pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+      <resource id="inactive-clone" resource_agent="ocf::pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+    </clone>
+    <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
+      <replica id="0">
+        <resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+        <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+        <resource id="httpd-bundle-docker-0" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+        <resource id="httpd-bundle-0" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+      </replica>
+      <replica id="1">
+        <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+        <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+        <resource id="httpd-bundle-docker-1" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+        <resource id="httpd-bundle-1" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+      </replica>
+      <replica id="2">
+        <resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+        <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+        <resource id="httpd-bundle-docker-2" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+        <resource id="httpd-bundle-2" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+      </replica>
+    </bundle>
+  </resources>
+  <node_attributes>
+    <node name="cluster01">
+      <attribute name="location" value="office"/>
+      <attribute name="pingd" value="1000" expected="1000"/>
+    </node>
+  </node_attributes>
+  <node_history>
+    <node name="cluster01">
+      <resource_history id="ping" orphan="false" migration-threshold="1000000">
+        <operation_history call="17" task="start" exec-time="2038ms" queue-time="0ms" rc="0" rc_text="ok"/>
+        <operation_history call="18" task="monitor" interval="10000ms" exec-time="2034ms" queue-time="0ms" rc="0" rc_text="ok"/>
+      </resource_history>
+      <resource_history id="Fencing" orphan="false" migration-threshold="1000000">
+        <operation_history call="15" task="start" exec-time="36ms" queue-time="0ms" rc="0" rc_text="ok"/>
+        <operation_history call="19" task="monitor" interval="60000ms" exec-time="24ms" queue-time="0ms" rc="0" rc_text="ok"/>
+      </resource_history>
+      <resource_history id="dummy" orphan="false" migration-threshold="1000000">
+        <operation_history call="16" task="stop" exec-time="6048ms" queue-time="0ms" rc="0" rc_text="ok"/>
+      </resource_history>
+    </node>
+  </node_history>
+  <status code="0" message="OK"/>
+</pacemaker-result>
+=#=#=#= End test: XML output filtered by node - OK (0) =#=#=#=
+* Passed: crm_mon        - XML output filtered by node
+=#=#=#= Begin test: Complete text output filtered by tag =#=#=#=
+Cluster Summary:
+  * Stack: corosync
+  * Current DC: cluster02 (version) - partition with quorum
+  * Last updated:
+  * Last change:
+  * 5 nodes configured
+  * 19 resource instances configured (1 DISABLED)
+
+Node List:
+  * Online: [ cluster02 ]
+
+Active Resources:
+  * Clone Set: ping-clone [ping]:
+    * Started: [ cluster02 ]
+  * dummy	(ocf::pacemaker:Dummy):	 Started cluster02
+
+Node Attributes:
+  * Node: cluster02:
+    * pingd                           	: 1000      
+
+Operations:
+  * Node: cluster02:
+    * ping: migration-threshold=1000000:
+      * (11) start
+      * (12) monitor: interval="10000ms"
+    * dummy: migration-threshold=1000000:
+      * (18) start
+      * (19) monitor: interval="60000ms"
+=#=#=#= End test: Complete text output filtered by tag - OK (0) =#=#=#=
+* Passed: crm_mon        - Complete text output filtered by tag
+=#=#=#= Begin test: XML output filtered by tag =#=#=#=
+<pacemaker-result api-version="2.2" request="crm_mon --output-as=xml --include=all --node=even-nodes">
+  <summary>
+    <stack type="corosync"/>
+    <current_dc present="true" version="" with_quorum="true"/>
+    <last_update time=""/>
+    <last_change time=""/>
+    <nodes_configured number="5"/>
+    <resources_configured number="19" disabled="1" blocked="0"/>
+    <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
+  </summary>
+  <nodes>
+    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="2" type="member"/>
+  </nodes>
+  <resources>
+    <clone id="ping-clone" multi_state="false" unique="false" managed="true" failed="false" failure_ignored="false">
+      <resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+        <node name="cluster02" id="2" cached="true"/>
+      </resource>
+    </clone>
+    <resource id="dummy" resource_agent="ocf::pacemaker:Dummy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+      <node name="cluster02" id="2" cached="true"/>
+    </resource>
+    <resource id="inactive-dummy" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+    <clone id="inactive-clone-master" multi_state="true" unique="false" managed="true" failed="false" failure_ignored="false">
+      <resource id="inactive-clone" resource_agent="ocf::pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+      <resource id="inactive-clone" resource_agent="ocf::pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+    </clone>
+    <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
+      <replica id="0">
+        <resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+        <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+        <resource id="httpd-bundle-docker-0" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+        <resource id="httpd-bundle-0" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+      </replica>
+      <replica id="1">
+        <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+        <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+        <resource id="httpd-bundle-docker-1" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+        <resource id="httpd-bundle-1" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+      </replica>
+      <replica id="2">
+        <resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+        <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+        <resource id="httpd-bundle-docker-2" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+        <resource id="httpd-bundle-2" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+      </replica>
+    </bundle>
+  </resources>
+  <node_attributes>
+    <node name="cluster02">
+      <attribute name="pingd" value="1000" expected="1000"/>
+    </node>
+  </node_attributes>
+  <node_history>
+    <node name="cluster02">
+      <resource_history id="ping" orphan="false" migration-threshold="1000000">
+        <operation_history call="11" task="start" exec-time="2044ms" queue-time="0ms" rc="0" rc_text="ok"/>
+        <operation_history call="12" task="monitor" interval="10000ms" exec-time="2031ms" queue-time="0ms" rc="0" rc_text="ok"/>
+      </resource_history>
+      <resource_history id="dummy" orphan="false" migration-threshold="1000000">
+        <operation_history call="18" task="start" exec-time="6020ms" queue-time="0ms" rc="0" rc_text="ok"/>
+        <operation_history call="19" task="monitor" interval="60000ms" exec-time="6015ms" queue-time="0ms" rc="0" rc_text="ok"/>
+      </resource_history>
+    </node>
+  </node_history>
+  <status code="0" message="OK"/>
+</pacemaker-result>
+=#=#=#= End test: XML output filtered by tag - OK (0) =#=#=#=
+* Passed: crm_mon        - XML output filtered by tag
+=#=#=#= Begin test: Basic text output filtered by node that doesn't exist =#=#=#=
+Cluster Summary:
+  * Stack: corosync
+  * Current DC: cluster02 (version) - partition with quorum
+  * Last updated:
+  * Last change:
+  * 5 nodes configured
+  * 19 resource instances configured (1 DISABLED)
+
+Active Resources:
+  * No active resources
+=#=#=#= End test: Basic text output filtered by node that doesn't exist - OK (0) =#=#=#=
+* Passed: crm_mon        - Basic text output filtered by node that doesn't exist
+=#=#=#= Begin test: XML output filtered by node that doesn't exist =#=#=#=
+<pacemaker-result api-version="2.2" request="crm_mon --output-as=xml --node=blah">
+  <summary>
+    <stack type="corosync"/>
+    <current_dc present="true" version="" with_quorum="true"/>
+    <last_update time=""/>
+    <last_change time=""/>
+    <nodes_configured number="5"/>
+    <resources_configured number="19" disabled="1" blocked="0"/>
+    <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
+  </summary>
+  <nodes/>
+  <resources>
+    <resource id="inactive-dummy" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+    <clone id="inactive-clone-master" multi_state="true" unique="false" managed="true" failed="false" failure_ignored="false">
+      <resource id="inactive-clone" resource_agent="ocf::pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+      <resource id="inactive-clone" resource_agent="ocf::pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+    </clone>
+    <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
+      <replica id="0">
+        <resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+        <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+        <resource id="httpd-bundle-docker-0" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+        <resource id="httpd-bundle-0" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+      </replica>
+      <replica id="1">
+        <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+        <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+        <resource id="httpd-bundle-docker-1" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+        <resource id="httpd-bundle-1" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+      </replica>
+      <replica id="2">
+        <resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+        <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+        <resource id="httpd-bundle-docker-2" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+        <resource id="httpd-bundle-2" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+      </replica>
+    </bundle>
+  </resources>
+  <status code="0" message="OK"/>
+</pacemaker-result>
+=#=#=#= End test: XML output filtered by node that doesn't exist - OK (0) =#=#=#=
+* Passed: crm_mon        - XML output filtered by node that doesn't exist
+=#=#=#= Begin test: Detailed text output =#=#=#=
+Cluster Summary:
+  * Stack: corosync
+  * Current DC: cluster02 (2) (version) - partition with quorum
+  * Last updated:
+  * Last change:
+  * 5 nodes configured
+  * 19 resource instances configured (1 DISABLED)
+
+Node List:
+  * Online: [ cluster01 (1) cluster02 (2) ]
+
+Active Resources:
+  * Clone Set: ping-clone [ping]:
+    * ping	(ocf::pacemaker:ping):	 Started cluster02
+    * ping	(ocf::pacemaker:ping):	 Started cluster01
+  * Fencing	(stonith:fence_xvm):	 Started cluster01
+  * dummy	(ocf::pacemaker:Dummy):	 Started cluster02
+=#=#=#= End test: Detailed text output - OK (0) =#=#=#=
+* Passed: crm_mon        - Detailed text output
+=#=#=#= Begin test: Basic text output with inactive resources =#=#=#=
+Cluster Summary:
+  * Stack: corosync
+  * Current DC: cluster02 (version) - partition with quorum
+  * Last updated:
+  * Last change:
+  * 5 nodes configured
+  * 19 resource instances configured (1 DISABLED)
+
+Node List:
+  * Online: [ cluster01 cluster02 ]
+
+Full List of Resources:
+  * Clone Set: ping-clone [ping]:
+    * Started: [ cluster01 cluster02 ]
+  * Fencing	(stonith:fence_xvm):	 Started cluster01
+  * dummy	(ocf::pacemaker:Dummy):	 Started cluster02
+  * inactive-dummy	(ocf::pacemaker:Dummy):	 Stopped (disabled)
+  * Clone Set: inactive-clone-master [inactive-clone] (promotable):
+    * Stopped: [ cluster01 cluster02 ]
+  * Container bundle set: httpd-bundle [pcmk:http]:
+    * httpd-bundle-0 (192.168.122.131)	(ocf::heartbeat:apache):	 Stopped
+    * httpd-bundle-1 (192.168.122.132)	(ocf::heartbeat:apache):	 Stopped
+    * httpd-bundle-2 (192.168.122.133)	(ocf::heartbeat:apache):	 Stopped
+=#=#=#= End test: Basic text output with inactive resources - OK (0) =#=#=#=
+* Passed: crm_mon        - Basic text output with inactive resources
+=#=#=#= Begin test: Basic text output with inactive resources, filtered by node =#=#=#=
+Cluster Summary:
+  * Stack: corosync
+  * Current DC: cluster02 (version) - partition with quorum
+  * Last updated:
+  * Last change:
+  * 5 nodes configured
+  * 19 resource instances configured (1 DISABLED)
+
+Node List:
+  * Online: [ cluster02 ]
+
+Full List of Resources:
+  * Clone Set: ping-clone [ping]:
+    * Started: [ cluster02 ]
+  * dummy	(ocf::pacemaker:Dummy):	 Started cluster02
+  * inactive-dummy	(ocf::pacemaker:Dummy):	 Stopped (disabled)
+  * Clone Set: inactive-clone-master [inactive-clone] (promotable):
+    * Stopped: [ cluster02 ]
+  * Container bundle set: httpd-bundle [pcmk:http]:
+    * httpd-bundle-0 (192.168.122.131)	(ocf::heartbeat:apache):	 Stopped
+    * httpd-bundle-1 (192.168.122.132)	(ocf::heartbeat:apache):	 Stopped
+    * httpd-bundle-2 (192.168.122.133)	(ocf::heartbeat:apache):	 Stopped
+=#=#=#= End test: Basic text output with inactive resources, filtered by node - OK (0) =#=#=#=
+* Passed: crm_mon        - Basic text output with inactive resources, filtered by node
+=#=#=#= Begin test: Text output of partially active resources =#=#=#=
+Cluster Summary:
+  * Stack: corosync
+  * Current DC: cluster02 (version) - partition with quorum
+  * Last updated:
+  * Last change:
+  * 4 nodes configured
+  * 11 resource instances configured
+
+Node List:
+  * Online: [ cluster01 cluster02 ]
+  * GuestOnline: [ httpd-bundle-0@cluster02 httpd-bundle-1@cluster01 ]
+
+Active Resources:
+  * Clone Set: ping-clone [ping]:
+    * Started: [ cluster01 ]
+  * Fencing	(stonith:fence_xvm):	 Started cluster01
+  * Container bundle set: httpd-bundle [pcmk:http]:
+    * httpd-bundle-0 (192.168.122.131)	(ocf::heartbeat:apache):	 Started
+    * httpd-bundle-1 (192.168.122.132)	(ocf::heartbeat:apache):	 Stopped
+=#=#=#= End test: Text output of partially active resources - OK (0) =#=#=#=
+* Passed: crm_mon        - Text output of partially active resources
+=#=#=#= Begin test: XML output of partially active resources =#=#=#=
+<pacemaker-result api-version="2.2" request="crm_mon -1 --output-as=xml">
+  <summary>
+    <stack type="corosync"/>
+    <current_dc present="true" version="" with_quorum="true"/>
+    <last_update time=""/>
+    <last_change time=""/>
+    <nodes_configured number="4"/>
+    <resources_configured number="11" disabled="0" blocked="0"/>
+    <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
+  </summary>
+  <nodes>
+    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="5" type="member"/>
+    <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="3" type="member"/>
+    <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
+    <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
+  </nodes>
+  <resources>
+    <clone id="ping-clone" multi_state="false" unique="false" managed="true" failed="false" failure_ignored="false">
+      <resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+        <node name="cluster01" id="1" cached="true"/>
+      </resource>
+      <resource id="ping" resource_agent="ocf::pacemaker:ping" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+    </clone>
+    <resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+      <node name="cluster01" id="1" cached="true"/>
+    </resource>
+    <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
+      <replica id="0">
+        <resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf::heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+          <node name="cluster02" id="2" cached="true"/>
+        </resource>
+        <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+          <node name="httpd-bundle-0" id="httpd-bundle-0" cached="true"/>
+        </resource>
+        <resource id="httpd-bundle-docker-0" resource_agent="ocf::heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+          <node name="cluster02" id="2" cached="true"/>
+        </resource>
+        <resource id="httpd-bundle-0" resource_agent="ocf::pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+          <node name="cluster02" id="2" cached="true"/>
+        </resource>
+      </replica>
+      <replica id="1">
+        <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf::heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+          <node name="cluster01" id="1" cached="true"/>
+        </resource>
+        <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+        <resource id="httpd-bundle-docker-1" resource_agent="ocf::heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+          <node name="cluster01" id="1" cached="true"/>
+        </resource>
+        <resource id="httpd-bundle-1" resource_agent="ocf::pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+          <node name="cluster01" id="1" cached="true"/>
+        </resource>
+      </replica>
+    </bundle>
+  </resources>
+  <node_attributes>
+    <node name="cluster01">
+      <attribute name="pingd" value="1000"/>
+    </node>
+    <node name="cluster02">
+      <attribute name="pingd" value="1000"/>
+    </node>
+  </node_attributes>
+  <node_history>
+    <node name="cluster02">
+      <resource_history id="httpd-bundle-ip-192.168.122.131" orphan="false" migration-threshold="1000000">
+        <operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+        <operation_history call="3" task="monitor" interval="60000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+      </resource_history>
+      <resource_history id="httpd-bundle-docker-0" orphan="false" migration-threshold="1000000">
+        <operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+        <operation_history call="3" task="monitor" interval="60000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+      </resource_history>
+      <resource_history id="httpd-bundle-0" orphan="false" migration-threshold="1000000">
+        <operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+        <operation_history call="3" task="monitor" interval="30000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+      </resource_history>
+    </node>
+    <node name="cluster01">
+      <resource_history id="Fencing" orphan="false" migration-threshold="1000000">
+        <operation_history call="15" task="start" exec-time="36ms" queue-time="0ms" rc="0" rc_text="ok"/>
+        <operation_history call="20" task="monitor" interval="60000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+      </resource_history>
+      <resource_history id="ping" orphan="false" migration-threshold="1000000">
+        <operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+        <operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+      </resource_history>
+      <resource_history id="httpd-bundle-ip-192.168.122.132" orphan="false" migration-threshold="1000000">
+        <operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+        <operation_history call="3" task="monitor" interval="60000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+      </resource_history>
+      <resource_history id="httpd-bundle-docker-1" orphan="false" migration-threshold="1000000">
+        <operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+        <operation_history call="3" task="monitor" interval="60000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+      </resource_history>
+      <resource_history id="httpd-bundle-1" orphan="false" migration-threshold="1000000">
+        <operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+        <operation_history call="3" task="monitor" interval="30000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+      </resource_history>
+    </node>
+    <node name="httpd-bundle-0">
+      <resource_history id="httpd" orphan="false" migration-threshold="1000000">
+        <operation_history call="1" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+      </resource_history>
+    </node>
+  </node_history>
+  <status code="0" message="OK"/>
+</pacemaker-result>
+=#=#=#= End test: XML output of partially active resources - OK (0) =#=#=#=
+* Passed: crm_mon        - XML output of partially active resources
+=#=#=#= Begin test: Text output of partially active resources, with inactive resources =#=#=#=
+Cluster Summary:
+  * Stack: corosync
+  * Current DC: cluster02 (version) - partition with quorum
+  * Last updated:
+  * Last change:
+  * 4 nodes configured
+  * 11 resource instances configured
+
+Node List:
+  * Online: [ cluster01 cluster02 ]
+  * GuestOnline: [ httpd-bundle-0@cluster02 httpd-bundle-1@cluster01 ]
+
+Full List of Resources:
+  * Clone Set: ping-clone [ping]:
+    * Started: [ cluster01 ]
+    * Stopped: [ cluster02 ]
+  * Fencing	(stonith:fence_xvm):	 Started cluster01
+  * Container bundle set: httpd-bundle [pcmk:http]:
+    * httpd-bundle-0 (192.168.122.131)	(ocf::heartbeat:apache):	 Started
+    * httpd-bundle-1 (192.168.122.132)	(ocf::heartbeat:apache):	 Stopped
+=#=#=#= End test: Text output of partially active resources, with inactive resources - OK (0) =#=#=#=
+* Passed: crm_mon        - Text output of partially active resources, with inactive resources
+=#=#=#= Begin test: Text output of partially active resources, with inactive resources, filtered by node =#=#=#=
+Cluster Summary:
+  * Stack: corosync
+  * Current DC: cluster02 (version) - partition with quorum
+  * Last updated:
+  * Last change:
+  * 4 nodes configured
+  * 11 resource instances configured
+
+Node List:
+  * Online: [ cluster01 ]
+
+Full List of Resources:
+  * Clone Set: ping-clone [ping]:
+    * Started: [ cluster01 ]
+  * Fencing	(stonith:fence_xvm):	 Started cluster01
+  * Container bundle set: httpd-bundle [pcmk:http]:
+    * httpd-bundle-1 (192.168.122.132)	(ocf::heartbeat:apache):	 Stopped
+=#=#=#= End test: Text output of partially active resources, with inactive resources, filtered by node - OK (0) =#=#=#=
+* Passed: crm_mon        - Text output of partially active resources, with inactive resources, filtered by node
+=#=#=#= Begin test: Text output of partially active resources, filtered by node =#=#=#=
+<pacemaker-result api-version="2.2" request="crm_mon -1 --output-as=xml --node=cluster01">
+  <summary>
+    <stack type="corosync"/>
+    <current_dc present="true" version="" with_quorum="true"/>
+    <last_update time=""/>
+    <last_change time=""/>
+    <nodes_configured number="4"/>
+    <resources_configured number="11" disabled="0" blocked="0"/>
+    <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
+  </summary>
+  <nodes>
+    <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="5" type="member"/>
+  </nodes>
+  <resources>
+    <clone id="ping-clone" multi_state="false" unique="false" managed="true" failed="false" failure_ignored="false">
+      <resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+        <node name="cluster01" id="1" cached="true"/>
+      </resource>
+      <resource id="ping" resource_agent="ocf::pacemaker:ping" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+    </clone>
+    <resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+      <node name="cluster01" id="1" cached="true"/>
+    </resource>
+    <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
+      <replica id="1">
+        <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf::heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+          <node name="cluster01" id="1" cached="true"/>
+        </resource>
+        <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+        <resource id="httpd-bundle-docker-1" resource_agent="ocf::heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+          <node name="cluster01" id="1" cached="true"/>
+        </resource>
+        <resource id="httpd-bundle-1" resource_agent="ocf::pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+          <node name="cluster01" id="1" cached="true"/>
+        </resource>
+      </replica>
+    </bundle>
+  </resources>
+  <node_attributes>
+    <node name="cluster01">
+      <attribute name="pingd" value="1000"/>
+    </node>
+  </node_attributes>
+  <node_history>
+    <node name="cluster01">
+      <resource_history id="Fencing" orphan="false" migration-threshold="1000000">
+        <operation_history call="15" task="start" exec-time="36ms" queue-time="0ms" rc="0" rc_text="ok"/>
+        <operation_history call="20" task="monitor" interval="60000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+      </resource_history>
+      <resource_history id="ping" orphan="false" migration-threshold="1000000">
+        <operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+        <operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+      </resource_history>
+      <resource_history id="httpd-bundle-ip-192.168.122.132" orphan="false" migration-threshold="1000000">
+        <operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+        <operation_history call="3" task="monitor" interval="60000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+      </resource_history>
+      <resource_history id="httpd-bundle-docker-1" orphan="false" migration-threshold="1000000">
+        <operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+        <operation_history call="3" task="monitor" interval="60000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+      </resource_history>
+      <resource_history id="httpd-bundle-1" orphan="false" migration-threshold="1000000">
+        <operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+        <operation_history call="3" task="monitor" interval="30000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+      </resource_history>
+    </node>
+  </node_history>
+  <status code="0" message="OK"/>
+</pacemaker-result>
+=#=#=#= End test: Text output of partially active resources, filtered by node - OK (0) =#=#=#=
+* Passed: crm_mon        - Text output of partially active resources, filtered by node
diff --git a/cts/crm_mon-partial.xml b/cts/crm_mon-partial.xml
new file mode 100644
index 0000000000..bc60e3a22a
--- /dev/null
+++ b/cts/crm_mon-partial.xml
@@ -0,0 +1,144 @@
+<cib crm_feature_set="3.3.0" validate-with="pacemaker-3.3" epoch="1" num_updates="37" admin_epoch="1" cib-last-written="Tue May  5 12:04:36 2020" update-origin="cluster01" update-client="crmd" update-user="hacluster" have-quorum="1" dc-uuid="2">
+  <configuration>
+    <crm_config>
+      <cluster_property_set id="cib-bootstrap-options">
+        <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+        <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.0.4-1.e97f9675f.git.el7-e97f9675f"/>
+        <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+        <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="test-cluster"/>
+        <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
+        <nvpair id="cib-bootstrap-options-maintenance-mode" name="maintenance-mode" value="false"/>
+      </cluster_property_set>
+    </crm_config>
+    <nodes>
+      <node id="1" uname="cluster01"/>
+      <node id="2" uname="cluster02"/>
+    </nodes>
+    <resources>
+      <clone id="ping-clone">
+        <primitive class="ocf" id="ping" provider="pacemaker" type="ping">
+          <instance_attributes id="ping-instance_attributes">
+            <nvpair id="ping-instance_attributes-dampen" name="dampen" value="5s"/>
+            <nvpair id="ping-instance_attributes-host_list" name="host_list" value="192.168.122.1"/>
+            <nvpair id="ping-instance_attributes-multiplier" name="multiplier" value="1000"/>
+          </instance_attributes>
+          <operations>
+            <op id="ping-monitor-interval-10s" interval="10s" name="monitor" timeout="60s"/>
+            <op id="ping-start-interval-0s" interval="0s" name="start" timeout="60s"/>
+            <op id="ping-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+          </operations>
+        </primitive>
+      </clone>
+      <primitive class="stonith" id="Fencing" type="fence_xvm">
+        <instance_attributes id="Fencing-instance_attributes">
+          <nvpair id="Fencing-instance_attributes-ip_family" name="ip_family" value="ipv4"/>
+        </instance_attributes>
+        <operations>
+          <op id="Fencing-monitor-interval-60s" interval="60s" name="monitor"/>
+        </operations>
+      </primitive>
+      <bundle id="httpd-bundle">
+        <docker image="pcmk:http" replicas="2"/>
+        <network ip-range-start="192.168.122.131" host-netmask="24" host-interface="eth0">
+          <port-mapping id="httpd-port" port="80"/>
+        </network>
+        <storage>
+          <storage-mapping id="httpd-syslog" source-dir="/dev/log" target-dir="/dev/log" options="rw"/>
+          <storage-mapping id="httpd-root" source-dir="/srv/html" target-dir="/var/www/html" options="rw"/>
+          <storage-mapping id="httpd-logs" source-dir-root="/var/log/pacemaker/bundles" target-dir="/etc/httpd/logs" options="rw"/>
+        </storage>
+        <primitive class="ocf" id="httpd" provider="heartbeat" type="apache"/>
+        <meta_attributes id="bundle-meta_attributes">
+          <nvpair id="bundle-meta_attributes-target-role" name="target-role" value="Started"/>
+        </meta_attributes>
+      </bundle>
+    </resources>
+    <constraints/>
+  </configuration>
+  <status>
+    <node_state id="2" uname="cluster02" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
+      <lrm id="2">
+        <lrm_resources>
+          <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+            <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="5:0:7:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:7;5:0:7:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster02" call-id="10" rc-code="7" op-status="0" interval="0" last-rc-change="1588951263" last-run="1588951263" exec-time="3" queue-time="0" op-digest="7da16842ab2328e41f737cab5e5fc89c"/>
+          </lrm_resource>
+          <lrm_resource id="httpd-bundle-ip-192.168.122.131" class="ocf" provider="heartbeat" type="IPaddr2">
+            <lrm_rsc_op id="httpd-bundle-ip-192.168.122.131_last_0" operation_key="httpd-bundle-ip-192.168.122.131_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.3.0" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1590608589" last-run="1590608589" exec-time="0" queue-time="0" op-digest="8656419d4ed26465c724189832393477"/>
+            <lrm_rsc_op id="httpd-bundle-ip-192.168.122.131_monitor_60000" operation_key="httpd-bundle-ip-192.168.122.131_monitor_60000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.3.0" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="60000" last-rc-change="1590608589" exec-time="0" queue-time="0" op-digest="617f27ac5fff521f401e6707063e2b5e"/>
+          </lrm_resource>
+          <lrm_resource id="httpd-bundle-docker-0" class="ocf" provider="heartbeat" type="docker">
+            <lrm_rsc_op id="httpd-bundle-docker-0_last_0" operation_key="httpd-bundle-docker-0_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.3.0" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1590608589" last-run="1590608589" exec-time="0" queue-time="0" op-digest="02a1a0b2dfa1cade1893713b56939c55"/>
+            <lrm_rsc_op id="httpd-bundle-docker-0_monitor_60000" operation_key="httpd-bundle-docker-0_monitor_60000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.3.0" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="60000" last-rc-change="1590608589" exec-time="0" queue-time="0" op-digest="775c93499f09f739ccbabe79d043f5ef"/>
+          </lrm_resource>
+          <lrm_resource id="httpd-bundle-ip-192.168.122.132" class="ocf" provider="heartbeat" type="IPaddr2">
+            <lrm_rsc_op id="httpd-bundle-ip-192.168.122.132_last_0" operation_key="httpd-bundle-ip-192.168.122.132_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.3.0" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1590608589" last-run="1590608589" exec-time="0" queue-time="0" op-digest="c3d96a2922c2946905f760df9a177cd1"/>
+          </lrm_resource>
+          <lrm_resource id="httpd-bundle-docker-1" class="ocf" provider="heartbeat" type="docker">
+            <lrm_rsc_op id="httpd-bundle-docker-1_last_0" operation_key="httpd-bundle-docker-1_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.3.0" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1590608589" last-run="1590608589" exec-time="0" queue-time="0" op-digest="2edb33b196e2261c6b3e30ce579e0590"/>
+          </lrm_resource>
+          <lrm_resource id="httpd-bundle-0" class="ocf" provider="pacemaker" type="remote">
+            <lrm_rsc_op id="httpd-bundle-0_last_0" operation_key="httpd-bundle-0_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.3.0" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1590608589" last-run="1590608589" exec-time="0" queue-time="0" op-digest="c535429017a9ee0785106fbef2858a41"/>
+            <lrm_rsc_op id="httpd-bundle-0_monitor_30000" operation_key="httpd-bundle-0_monitor_30000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.3.0" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="30000" last-rc-change="1590608589" exec-time="0" queue-time="0" op-digest="6d63e20548871f169e287d33f3711637"/>
+          </lrm_resource>
+          <lrm_resource id="httpd-bundle-1" class="ocf" provider="pacemaker" type="remote">
+            <lrm_rsc_op id="httpd-bundle-1_last_0" operation_key="httpd-bundle-1_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.3.0" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1590608589" last-run="1590608589" exec-time="0" queue-time="0" op-digest="791bcda8f6693465cc318cba5302a8df"/>
+          </lrm_resource>
+        </lrm_resources>
+      </lrm>
+      <transient_attributes id="2">
+        <instance_attributes id="status-2">
+          <nvpair id="status-2-pingd" name="pingd" value="1000"/>
+        </instance_attributes>
+      </transient_attributes>
+    </node_state>
+    <node_state id="1" uname="cluster01" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
+      <lrm id="1">
+        <lrm_resources>
+          <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+            <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="12:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;12:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster01" call-id="15" rc-code="0" op-status="0" interval="0" last-rc-change="1588951272" last-run="1588951272" exec-time="36" queue-time="0" op-digest="7da16842ab2328e41f737cab5e5fc89c"/>
+            <lrm_rsc_op id="Fencing_monitor_60000" operation_key="Fencing_monitor_60000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.3.0" transition-key="20:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;20:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="cluster01" call-id="20" rc-code="0" op-status="0" interval="60000" last-rc-change="1590608589" exec-time="0" queue-time="0" op-digest="a88218bb6c7dc47e6586fc75fc2a8d69"/>
+          </lrm_resource>
+          <lrm_resource id="ping" class="ocf" provider="pacemaker" type="ping">
+            <lrm_rsc_op id="ping_last_0" operation_key="ping_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.3.0" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1590608589" last-run="1590608589" exec-time="0" queue-time="0" op-digest="769dd6f95f1494d416ae9dc690960e17"/>
+            <lrm_rsc_op id="ping_monitor_10000" operation_key="ping_monitor_10000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.3.0" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="10000" last-rc-change="1590608589" exec-time="0" queue-time="0" op-digest="7beffd8be749b787fabea4aef5df21c9"/>
+          </lrm_resource>
+          <lrm_resource id="httpd-bundle-ip-192.168.122.131" class="ocf" provider="heartbeat" type="IPaddr2">
+            <lrm_rsc_op id="httpd-bundle-ip-192.168.122.131_last_0" operation_key="httpd-bundle-ip-192.168.122.131_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.3.0" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1590608589" last-run="1590608589" exec-time="0" queue-time="0" op-digest="8656419d4ed26465c724189832393477"/>
+          </lrm_resource>
+          <lrm_resource id="httpd-bundle-docker-0" class="ocf" provider="heartbeat" type="docker">
+            <lrm_rsc_op id="httpd-bundle-docker-0_last_0" operation_key="httpd-bundle-docker-0_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.3.0" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1590608589" last-run="1590608589" exec-time="0" queue-time="0" op-digest="02a1a0b2dfa1cade1893713b56939c55"/>
+          </lrm_resource>
+          <lrm_resource id="httpd-bundle-ip-192.168.122.132" class="ocf" provider="heartbeat" type="IPaddr2">
+            <lrm_rsc_op id="httpd-bundle-ip-192.168.122.132_last_0" operation_key="httpd-bundle-ip-192.168.122.132_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.3.0" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1590608589" last-run="1590608589" exec-time="0" queue-time="0" op-digest="c3d96a2922c2946905f760df9a177cd1"/>
+            <lrm_rsc_op id="httpd-bundle-ip-192.168.122.132_monitor_60000" operation_key="httpd-bundle-ip-192.168.122.132_monitor_60000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.3.0" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="60000" last-rc-change="1590608589" exec-time="0" queue-time="0" op-digest="33ef2404fd1954b12433f676cffd08ec"/>
+          </lrm_resource>
+          <lrm_resource id="httpd-bundle-docker-1" class="ocf" provider="heartbeat" type="docker">
+            <lrm_rsc_op id="httpd-bundle-docker-1_last_0" operation_key="httpd-bundle-docker-1_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.3.0" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1590608589" last-run="1590608589" exec-time="0" queue-time="0" op-digest="2edb33b196e2261c6b3e30ce579e0590"/>
+            <lrm_rsc_op id="httpd-bundle-docker-1_monitor_60000" operation_key="httpd-bundle-docker-1_monitor_60000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.3.0" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="60000" last-rc-change="1590608589" exec-time="0" queue-time="0" op-digest="a2605826ef42e23316e4d27d9cb28f8e"/>
+          </lrm_resource>
+          <lrm_resource id="httpd-bundle-0" class="ocf" provider="pacemaker" type="remote">
+            <lrm_rsc_op id="httpd-bundle-0_last_0" operation_key="httpd-bundle-0_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.3.0" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1590608589" last-run="1590608589" exec-time="0" queue-time="0" op-digest="c535429017a9ee0785106fbef2858a41"/>
+          </lrm_resource>
+          <lrm_resource id="httpd-bundle-1" class="ocf" provider="pacemaker" type="remote">
+            <lrm_rsc_op id="httpd-bundle-1_last_0" operation_key="httpd-bundle-1_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.3.0" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1590608589" last-run="1590608589" exec-time="0" queue-time="0" op-digest="791bcda8f6693465cc318cba5302a8df"/>
+            <lrm_rsc_op id="httpd-bundle-1_monitor_30000" operation_key="httpd-bundle-1_monitor_30000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.3.0" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="30000" last-rc-change="1590608589" exec-time="0" queue-time="0" op-digest="7592cb10fa1499772a031adfd385f558"/>
+          </lrm_resource>
+        </lrm_resources>
+      </lrm>
+      <transient_attributes id="1">
+        <instance_attributes id="status-1">
+          <nvpair id="status-1-pingd" name="pingd" value="1000"/>
+        </instance_attributes>
+      </transient_attributes>
+    </node_state>
+    <node_state id="httpd-bundle-0" uname="httpd-bundle-0">
+      <lrm id="httpd-bundle-0">
+        <lrm_resources>
+          <lrm_resource id="httpd" class="ocf" provider="heartbeat" type="apache">
+            <lrm_rsc_op id="httpd_last_0" operation_key="httpd_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.3.0" transition-key="1:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;1:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="0" op-status="0" interval="0" last-rc-change="1590608589" last-run="1590608589" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+          </lrm_resource>
+        </lrm_resources>
+      </lrm>
+    </node_state>
+  </status>
+</cib>
diff --git a/cts/crm_mon.xml b/cts/crm_mon.xml
new file mode 100644
index 0000000000..007477c33a
--- /dev/null
+++ b/cts/crm_mon.xml
@@ -0,0 +1,151 @@
+<cib crm_feature_set="3.3.0" validate-with="pacemaker-3.3" epoch="1" num_updates="1" admin_epoch="1" cib-last-written="Tue May  5 12:04:36 2020" update-origin="cluster01" update-client="crmd" update-user="hacluster" have-quorum="1" dc-uuid="2">
+  <configuration>
+    <crm_config>
+      <cluster_property_set id="cib-bootstrap-options">
+        <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+        <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.0.4-1.e97f9675f.git.el7-e97f9675f"/>
+        <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+        <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="test-cluster"/>
+        <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
+        <nvpair id="cib-bootstrap-options-maintenance-mode" name="maintenance-mode" value="false"/>
+      </cluster_property_set>
+    </crm_config>
+    <nodes>
+      <node id="1" uname="cluster01">
+        <instance_attributes id="nodes-1">
+          <nvpair id="nodes-1-location" name="location" value="office"/>
+        </instance_attributes>
+      </node>
+      <node id="2" uname="cluster02"/>
+    </nodes>
+    <resources>
+      <clone id="ping-clone">
+        <primitive class="ocf" id="ping" provider="pacemaker" type="ping">
+          <instance_attributes id="ping-instance_attributes">
+            <nvpair id="ping-instance_attributes-dampen" name="dampen" value="5s"/>
+            <nvpair id="ping-instance_attributes-host_list" name="host_list" value="192.168.122.1"/>
+            <nvpair id="ping-instance_attributes-multiplier" name="multiplier" value="1000"/>
+          </instance_attributes>
+          <operations>
+            <op id="ping-monitor-interval-10s" interval="10s" name="monitor" timeout="60s"/>
+            <op id="ping-start-interval-0s" interval="0s" name="start" timeout="60s"/>
+            <op id="ping-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+          </operations>
+        </primitive>
+      </clone>
+      <primitive class="stonith" id="Fencing" type="fence_xvm">
+        <instance_attributes id="Fencing-instance_attributes">
+          <nvpair id="Fencing-instance_attributes-ip_family" name="ip_family" value="ipv4"/>
+        </instance_attributes>
+        <operations>
+          <op id="Fencing-monitor-interval-60s" interval="60s" name="monitor"/>
+        </operations>
+      </primitive>
+      <primitive class="ocf" id="dummy" provider="pacemaker" type="Dummy">
+        <instance_attributes id="dummy-instance_attributes">
+          <nvpair id="dummy-instance_attributes-op_sleep" name="op_sleep" value="6"/>
+        </instance_attributes>
+        <operations>
+          <op id="dummy-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="20s"/>
+          <op id="dummy-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="20s"/>
+          <op id="dummy-monitor-interval-60s" interval="60s" name="monitor" on-fail="stop"/>
+          <op id="dummy-reload-interval-0s" interval="0s" name="reload" timeout="20s"/>
+          <op id="dummy-start-interval-0s" interval="0s" name="start" timeout="20s"/>
+          <op id="dummy-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+        </operations>
+      </primitive>
+      <primitive class="ocf" id="inactive-dummy" provider="pacemaker" type="Dummy">
+        <meta_attributes id="inactive-dummy.meta">
+          <nvpair id="inactive-dummy.meta.target" name="target-role" value="stopped"/>
+        </meta_attributes>
+      </primitive>
+      <master id="inactive-clone-master">
+        <primitive class="ocf" id="inactive-clone" provider="pacemaker" type="Stateful">
+          <operations>
+            <op id="inactive-clone-monitor-interval-10" interval="10" name="monitor" role="Master" timeout="20"/>
+            <op id="inactive-clone-monitor-interval-11" interval="11" name="monitor" role="Slave" timeout="20"/>
+          </operations>
+        </primitive>
+      </master>
+      <bundle id="httpd-bundle">
+        <docker image="pcmk:http" replicas="3"/>
+        <network ip-range-start="192.168.122.131" host-netmask="24" host-interface="eth0">
+          <port-mapping id="httpd-port" port="80"/>
+        </network>
+        <storage>
+          <storage-mapping id="httpd-syslog" source-dir="/dev/log" target-dir="/dev/log" options="rw"/>
+          <storage-mapping id="httpd-root" source-dir="/srv/html" target-dir="/var/www/html" options="rw"/>
+          <storage-mapping id="httpd-logs" source-dir-root="/var/log/pacemaker/bundles" target-dir="/etc/httpd/logs" options="rw"/>
+        </storage>
+        <primitive class="ocf" id="httpd" provider="heartbeat" type="apache"/>
+        <meta_attributes id="bundle-meta_attributes">
+          <nvpair id="bundle-meta_attributes-target-role" name="target-role" value="Started"/>
+        </meta_attributes>
+      </bundle>
+    </resources>
+    <constraints/>
+    <tags>
+      <tag id="all-nodes">
+        <obj_ref id="1"/>
+        <obj_ref id="2"/>
+      </tag>
+      <tag id="even-nodes">
+        <obj_ref id="2"/>
+      </tag>
+      <tag id="odd-nodes">
+        <obj_ref id="1"/>
+      </tag>
+    </tags>
+    <op_defaults>
+      <meta_attributes id="op_defaults-options">
+        <nvpair id="op_defaults-options-timeout" name="timeout" value="5s"/>
+      </meta_attributes>
+    </op_defaults>
+  </configuration>
+  <status>
+    <node_state id="2" uname="cluster02" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
+      <lrm id="2">
+        <lrm_resources>
+          <lrm_resource id="ping" type="ping" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="ping_last_0" operation_key="ping_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="9:0:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;9:0:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster02" call-id="11" rc-code="0" op-status="0" interval="0" last-rc-change="1588951263" last-run="1588951263" exec-time="2044" queue-time="0" op-digest="769dd6f95f1494d416ae9dc690960e17"/>
+            <lrm_rsc_op id="ping_monitor_10000" operation_key="ping_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="10:0:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;10:0:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster02" call-id="12" rc-code="0" op-status="0" interval="10000" last-rc-change="1588951265" exec-time="2031" queue-time="0" op-digest="7beffd8be749b787fabea4aef5df21c9"/>
+          </lrm_resource>
+          <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+            <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="5:0:7:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:7;5:0:7:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster02" call-id="10" rc-code="7" op-status="0" interval="0" last-rc-change="1588951263" last-run="1588951263" exec-time="3" queue-time="0" op-digest="7da16842ab2328e41f737cab5e5fc89c"/>
+          </lrm_resource>
+          <lrm_resource id="dummy" type="Dummy" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="14:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;14:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster02" call-id="18" rc-code="0" op-status="0" interval="0" last-rc-change="1588951278" last-run="1588951278" exec-time="6020" queue-time="0" op-digest="aa0f9b7caf28600646551adb55bd9b95" op-force-restart=" envfile  op_sleep  passwd  state " op-restart-digest="aa0f9b7caf28600646551adb55bd9b95" op-secure-params=" passwd " op-secure-digest="aa0f9b7caf28600646551adb55bd9b95"/>
+            <lrm_rsc_op id="dummy_monitor_60000" operation_key="dummy_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="16:2:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;16:2:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster02" call-id="19" rc-code="0" op-status="0" interval="60000" last-rc-change="1588951284" exec-time="6015" queue-time="0" op-digest="ccfee4afbb0618907016c9bef210b8b6" op-secure-params=" passwd " op-secure-digest="aa0f9b7caf28600646551adb55bd9b95"/>
+          </lrm_resource>
+        </lrm_resources>
+      </lrm>
+      <transient_attributes id="2">
+        <instance_attributes id="status-2">
+          <nvpair id="status-2-pingd" name="pingd" value="1000"/>
+        </instance_attributes>
+      </transient_attributes>
+    </node_state>
+    <node_state id="1" uname="cluster01" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
+      <lrm id="1">
+        <lrm_resources>
+          <lrm_resource id="ping" type="ping" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="ping_last_0" operation_key="ping_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="6:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;6:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster01" call-id="17" rc-code="0" op-status="0" interval="0" last-rc-change="1588951272" last-run="1588951272" exec-time="2038" queue-time="0" op-digest="769dd6f95f1494d416ae9dc690960e17"/>
+            <lrm_rsc_op id="ping_monitor_10000" operation_key="ping_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="7:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;7:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster01" call-id="18" rc-code="0" op-status="0" interval="10000" last-rc-change="1588951274" exec-time="2034" queue-time="0" op-digest="7beffd8be749b787fabea4aef5df21c9"/>
+          </lrm_resource>
+          <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+            <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="12:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;12:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster01" call-id="15" rc-code="0" op-status="0" interval="0" last-rc-change="1588951272" last-run="1588951272" exec-time="36" queue-time="0" op-digest="7da16842ab2328e41f737cab5e5fc89c"/>
+            <lrm_rsc_op id="Fencing_monitor_60000" operation_key="Fencing_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="13:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;13:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster01" call-id="19" rc-code="0" op-status="0" interval="60000" last-rc-change="1588951276" exec-time="24" queue-time="0" op-digest="f85d77708ad4ea02a9099e1e548aff0d"/>
+          </lrm_resource>
+          <lrm_resource id="dummy" type="Dummy" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_stop_0" operation="stop" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="3:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;3:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster01" call-id="16" rc-code="0" op-status="0" interval="0" last-rc-change="1588951272" last-run="1588951272" exec-time="6048" queue-time="0" op-digest="aa0f9b7caf28600646551adb55bd9b95" op-force-restart=" envfile  op_sleep  passwd  state " op-restart-digest="aa0f9b7caf28600646551adb55bd9b95" op-secure-params=" passwd " op-secure-digest="aa0f9b7caf28600646551adb55bd9b95"/>
+          </lrm_resource>
+        </lrm_resources>
+      </lrm>
+      <transient_attributes id="1">
+        <instance_attributes id="status-1">
+          <nvpair id="status-1-pingd" name="pingd" value="1000"/>
+        </instance_attributes>
+      </transient_attributes>
+    </node_state>
+  </status>
+</cib>
diff --git a/cts/cts-cli.in b/cts/cts-cli.in
index b6d873b42d..12f3311dd9 100755
--- a/cts/cts-cli.in
+++ b/cts/cts-cli.in
@@ -1,1223 +1,1339 @@
 #!@BASH_PATH@
 #
 # Copyright 2008-2020 the Pacemaker project contributors
 #
 # The version control history for this file may have further details.
 #
 # This source code is licensed under the GNU General Public License version 2
 # or later (GPLv2+) WITHOUT ANY WARRANTY.
 #
 
 #
 # Note on portable usage of sed: GNU/POSIX/*BSD sed have a limited subset of
 # compatible functionality. Do not use the -i option, alternation (\|),
 # \0, or character sequences such as \n or \s.
 #
 
 USAGE_TEXT="Usage: cts-cli [<options>]
 Options:
  --help          Display this text, then exit
  -V, --verbose   Display any differences from expected output
- -t 'TEST [...]' Run only specified tests (default: 'dates tools acls validity upgrade rules')
+ -t 'TEST [...]' Run only specified tests (default: 'dates tools crm_mon acls validity upgrade rules')
  -p DIR          Look for executables in DIR (may be specified multiple times)
  -v, --valgrind  Run all commands under valgrind
  -s              Save actual output as expected output"
 
 # If readlink supports -e (i.e. GNU), use it
 readlink -e / >/dev/null 2>/dev/null
 if [ $? -eq 0 ]; then
     test_home="$(dirname "$(readlink -e "$0")")"
 else
     test_home="$(dirname "$0")"
 fi
 
 : ${shadow=cts-cli}
 shadow_dir=$(mktemp -d ${TMPDIR:-/tmp}/cts-cli.shadow.XXXXXXXXXX)
 num_errors=0
 num_passed=0
 verbose=0
-tests="dates tools acls validity upgrade rules"
+tests="dates tools crm_mon acls validity upgrade rules"
 do_save=0
 VALGRIND_CMD=
 VALGRIND_OPTS="
     -q
     --gen-suppressions=all
     --show-reachable=no
     --leak-check=full
     --trace-children=no
     --time-stamp=yes
     --num-callers=20
     --suppressions=$test_home/valgrind-pcmk.suppressions
 "
 
 # These constants must track crm_exit_t values
 CRM_EX_OK=0
 CRM_EX_ERROR=1
 CRM_EX_INVALID_PARAM=2
 CRM_EX_UNIMPLEMENT_FEATURE=3
 CRM_EX_INSUFFICIENT_PRIV=4
 CRM_EX_USAGE=64
 CRM_EX_CONFIG=78
 CRM_EX_OLD=103
 CRM_EX_DIGEST=104
 CRM_EX_NOSUCH=105
 CRM_EX_UNSAFE=107
 CRM_EX_EXISTS=108
 CRM_EX_MULTIPLE=109
 CRM_EX_EXPIRED=110
 CRM_EX_NOT_YET_IN_EFFECT=111
 CRM_EX_INDETERMINATE=112
 CRM_EX_UNSATISFIED=113
 
 function test_assert() {
     target=$1; shift
     cib=$1; shift
     app=`echo "$cmd" | sed 's/\ .*//'`
     printf "* Running: $app - $desc\n" 1>&2
 
     printf "=#=#=#= Begin test: $desc =#=#=#=\n"
     eval $VALGRIND_CMD $cmd 2>&1
     rc=$?
 
     if [ x$cib != x0 ]; then
         printf "=#=#=#= Current cib after: $desc =#=#=#=\n"
         CIB_user=root cibadmin -Q
     fi
 
     printf "=#=#=#= End test: $desc - $(crm_error --exit $rc) ($rc) =#=#=#=\n"
 
     if [ $rc -ne $target ]; then
         num_errors=$(( $num_errors + 1 ))
         printf "* Failed (rc=%.3d): %-14s - %s\n" $rc $app "$desc"
         printf "* Failed (rc=%.3d): %-14s - %s\n" $rc $app "$desc (`which $app`)" 1>&2
         return
         exit $CRM_EX_ERROR
     else
         printf "* Passed: %-14s - %s\n" $app "$desc"
         num_passed=$(( $num_passed + 1 ))
     fi
 }
 
+function test_crm_mon() {
+    export CIB_file="$SRCDIR/cts/crm_mon.xml"
+
+    desc="Basic text output"
+    cmd="crm_mon -1"
+    test_assert $CRM_EX_OK 0
+
+    desc="XML output"
+    cmd="crm_mon --output-as=xml"
+    test_assert $CRM_EX_OK 0
+
+    desc="Basic text output without node section"
+    cmd="crm_mon -1 --exclude=nodes"
+    test_assert $CRM_EX_OK 0
+
+    desc="XML output without the node section"
+    cmd="crm_mon --output-as=xml --exclude=nodes"
+    test_assert $CRM_EX_OK 0
+
+    desc="Text output with only the node section"
+    cmd="crm_mon -1 --exclude=all --include=nodes"
+    test_assert $CRM_EX_OK 0
+
+    # The above test doesn't need to be performed for other output formats.  It's
+    # really just a test to make sure that blank lines are correct.
+
+    desc="Complete text output"
+    cmd="crm_mon -1 --include=all"
+    test_assert $CRM_EX_OK 0
+
+    # XML includes everything already so there's no need for a complete test
+
+    desc="Complete text output filtered by node"
+    cmd="crm_mon -1 --include=all --node=cluster01"
+    test_assert $CRM_EX_OK 0
+
+    desc="XML output filtered by node"
+    cmd="crm_mon --output-as xml --include=all --node=cluster01"
+    test_assert $CRM_EX_OK 0
+
+    desc="Complete text output filtered by tag"
+    cmd="crm_mon -1 --include=all --node=even-nodes"
+    test_assert $CRM_EX_OK 0
+
+    desc="XML output filtered by tag"
+    cmd="crm_mon --output-as=xml --include=all --node=even-nodes"
+    test_assert $CRM_EX_OK 0
+
+    desc="Basic text output filtered by node that doesn't exist"
+    cmd="crm_mon -1 --node=blah"
+    test_assert $CRM_EX_OK 0
+
+    desc="XML output filtered by node that doesn't exist"
+    cmd="crm_mon --output-as=xml --node=blah"
+    test_assert $CRM_EX_OK 0
+
+    desc="Detailed text output"
+    cmd="crm_mon -1 --show-detail"
+    test_assert $CRM_EX_OK 0
+
+    # XML already includes extra detail
+
+    desc="Basic text output with inactive resources"
+    cmd="crm_mon -1 -r"
+    test_assert $CRM_EX_OK 0
+
+    # XML already includes inactive resources
+
+    desc="Basic text output with inactive resources, filtered by node"
+    cmd="crm_mon -1 -r --node=cluster02"
+    test_assert $CRM_EX_OK 0
+
+    # XML already includes inactive resources
+
+    unset CIB_file
+
+    export CIB_file="$SRCDIR/cts/crm_mon-partial.xml"
+
+    desc="Text output of partially active resources"
+    cmd="crm_mon -1"
+    test_assert $CRM_EX_OK 0
+
+    desc="XML output of partially active resources"
+    cmd="crm_mon -1 --output-as=xml"
+    test_assert $CRM_EX_OK 0
+
+    desc="Text output of partially active resources, with inactive resources"
+    cmd="crm_mon -1 -r"
+    test_assert $CRM_EX_OK 0
+
+    # XML already includes inactive resources
+
+    desc="Text output of partially active resources, with inactive resources, filtered by node"
+    cmd="crm_mon -1 -r --node=cluster01"
+    test_assert $CRM_EX_OK 0
+
+    desc="Text output of partially active resources, filtered by node"
+    cmd="crm_mon -1 --output-as=xml --node=cluster01"
+    test_assert $CRM_EX_OK 0
+
+    unset CIB_file
+}
+
 function test_tools() {
     local TMPXML
     local TMPORIG
 
     TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX)
     TMPORIG=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.existing.xml.XXXXXXXXXX)
     export CIB_shadow_dir="${shadow_dir}"
 
     $VALGRIND_CMD crm_shadow --batch --force --create-empty $shadow  2>&1
     export CIB_shadow=$shadow
 
     desc="Validate CIB"
     cmd="cibadmin -Q"
     test_assert $CRM_EX_OK
 
     desc="Configure something before erasing"
     cmd="crm_attribute -n cluster-delay -v 60s"
     test_assert $CRM_EX_OK
 
     desc="Require --force for CIB erasure"
     cmd="cibadmin -E"
     test_assert $CRM_EX_UNSAFE
 
     desc="Allow CIB erasure with --force"
     cmd="cibadmin -E --force"
     test_assert $CRM_EX_OK
 
     desc="Query CIB"
     cmd="cibadmin -Q > $TMPORIG"
     test_assert $CRM_EX_OK
 
     desc="Set cluster option"
     cmd="crm_attribute -n cluster-delay -v 60s"
     test_assert $CRM_EX_OK
 
     desc="Query new cluster option"
     cmd="cibadmin -Q -o crm_config | grep cib-bootstrap-options-cluster-delay"
     test_assert $CRM_EX_OK
 
     desc="Query cluster options"
     cmd="cibadmin -Q -o crm_config > $TMPXML"
     test_assert $CRM_EX_OK
 
     desc="Set no-quorum policy"
     cmd="crm_attribute -n no-quorum-policy -v ignore"
     test_assert $CRM_EX_OK
 
     desc="Delete nvpair"
     cmd="cibadmin -D -o crm_config --xml-text '<nvpair id=\"cib-bootstrap-options-cluster-delay\"/>'"
     test_assert $CRM_EX_OK
 
     desc="Create operation should fail"
     cmd="cibadmin -C -o crm_config --xml-file $TMPXML"
     test_assert $CRM_EX_EXISTS
 
     desc="Modify cluster options section"
     cmd="cibadmin -M -o crm_config --xml-file $TMPXML"
     test_assert $CRM_EX_OK
 
     desc="Query updated cluster option"
     cmd="cibadmin -Q -o crm_config | grep cib-bootstrap-options-cluster-delay"
     test_assert $CRM_EX_OK
 
     desc="Set duplicate cluster option"
     cmd="crm_attribute -n cluster-delay -v 40s -s duplicate"
     test_assert $CRM_EX_OK
 
     desc="Setting multiply defined cluster option should fail"
     cmd="crm_attribute -n cluster-delay -v 30s"
     test_assert $CRM_EX_MULTIPLE
 
     desc="Set cluster option with -s"
     cmd="crm_attribute -n cluster-delay -v 30s -s duplicate"
     test_assert $CRM_EX_OK
 
     desc="Delete cluster option with -i"
     cmd="crm_attribute -n cluster-delay -D -i cib-bootstrap-options-cluster-delay"
     test_assert $CRM_EX_OK
 
     desc="Create node1 and bring it online"
     cmd="crm_simulate --live-check --in-place --node-up=node1"
     test_assert $CRM_EX_OK
 
     desc="Create node attribute"
     cmd="crm_attribute -n ram -v 1024M -N node1 -t nodes"
     test_assert $CRM_EX_OK
 
     desc="Query new node attribute"
     cmd="cibadmin -Q -o nodes | grep node1-ram"
     test_assert $CRM_EX_OK
 
     desc="Set a transient (fail-count) node attribute"
     cmd="crm_attribute -n fail-count-foo -v 3 -N node1 -t status"
     test_assert $CRM_EX_OK
 
     desc="Query a fail count"
     cmd="crm_failcount --query -r foo -N node1"
     test_assert $CRM_EX_OK
 
     desc="Delete a transient (fail-count) node attribute"
     cmd="crm_attribute -n fail-count-foo -D -N node1 -t status"
     test_assert $CRM_EX_OK
 
     desc="Digest calculation"
     cmd="cibadmin -Q | cibadmin -5 -p 2>&1 > /dev/null"
     test_assert $CRM_EX_OK
 
     # This update will fail because it has version numbers
     desc="Replace operation should fail"
     cmd="cibadmin -R --xml-file $TMPORIG"
     test_assert $CRM_EX_OLD
 
     desc="Default standby value"
     cmd="crm_standby -N node1 -G"
     test_assert $CRM_EX_OK
  
     desc="Set standby status"
     cmd="crm_standby -N node1 -v true"
     test_assert $CRM_EX_OK
  
     desc="Query standby value"
     cmd="crm_standby -N node1 -G"
     test_assert $CRM_EX_OK
  
     desc="Delete standby value"
     cmd="crm_standby -N node1 -D"
     test_assert $CRM_EX_OK
 
     desc="Create a resource"
     cmd="cibadmin -C -o resources --xml-text '<primitive id=\"dummy\" class=\"ocf\" provider=\"pacemaker\" type=\"Dummy\"/>'"
     test_assert $CRM_EX_OK
 
     desc="Create a resource meta attribute"
     cmd="crm_resource -r dummy --meta -p is-managed -v false"
     test_assert $CRM_EX_OK
 
     desc="Query a resource meta attribute"
     cmd="crm_resource -r dummy --meta -g is-managed"
     test_assert $CRM_EX_OK
 
     desc="Remove a resource meta attribute"
     cmd="crm_resource -r dummy --meta -d is-managed"
     test_assert $CRM_EX_OK
 
     desc="Create a resource attribute"
     cmd="crm_resource -r dummy -p delay -v 10s"
     test_assert $CRM_EX_OK
 
     desc="List the configured resources"
     cmd="crm_resource -L"
     test_assert $CRM_EX_OK
 
     desc="Require a destination when migrating a resource that is stopped"
     cmd="crm_resource -r dummy -M"
     test_assert $CRM_EX_USAGE
 
     desc="Don't support migration to non-existent locations"
     cmd="crm_resource -r dummy -M -N i.do.not.exist"
     test_assert $CRM_EX_NOSUCH
 
     desc="Create a fencing resource"
     cmd="cibadmin -C -o resources --xml-text '<primitive id=\"Fence\" class=\"stonith\" type=\"fence_true\"/>'"
     test_assert $CRM_EX_OK
 
     desc="Bring resources online"
     cmd="crm_simulate --live-check --in-place -S"
     test_assert $CRM_EX_OK
 
     desc="Try to move a resource to its existing location"
     cmd="crm_resource -r dummy --move --host node1"
     test_assert $CRM_EX_EXISTS
 
     desc="Move a resource from its existing location"
     cmd="crm_resource -r dummy --move"
     test_assert $CRM_EX_OK
 
     desc="Clear out constraints generated by --move"
     cmd="crm_resource -r dummy --clear"
     test_assert $CRM_EX_OK
 
     desc="Default ticket granted state"
     cmd="crm_ticket -t ticketA -G granted -d false"
     test_assert $CRM_EX_OK
 
     desc="Set ticket granted state"
     cmd="crm_ticket -t ticketA -r --force"
     test_assert $CRM_EX_OK
 
     desc="Query ticket granted state"
     cmd="crm_ticket -t ticketA -G granted"
     test_assert $CRM_EX_OK
 
     desc="Delete ticket granted state"
     cmd="crm_ticket -t ticketA -D granted --force"
     test_assert $CRM_EX_OK
 
     desc="Make a ticket standby"
     cmd="crm_ticket -t ticketA -s"
     test_assert $CRM_EX_OK
 
     desc="Query ticket standby state"
     cmd="crm_ticket -t ticketA -G standby"
     test_assert $CRM_EX_OK
 
     desc="Activate a ticket"
     cmd="crm_ticket -t ticketA -a"
     test_assert $CRM_EX_OK
 
     desc="Delete ticket standby state"
     cmd="crm_ticket -t ticketA -D standby"
     test_assert $CRM_EX_OK
 
     desc="Ban a resource on unknown node"
     cmd="crm_resource -r dummy -B -N host1"
     test_assert $CRM_EX_NOSUCH
 
     desc="Create two more nodes and bring them online"
     cmd="crm_simulate --live-check --in-place --node-up=node2 --node-up=node3"
     test_assert $CRM_EX_OK
 
     desc="Ban dummy from node1"
     cmd="crm_resource -r dummy -B -N node1"
     test_assert $CRM_EX_OK
 
     desc="Ban dummy from node2"
     cmd="crm_resource -r dummy -B -N node2"
     test_assert $CRM_EX_OK
 
     desc="Relocate resources due to ban"
     cmd="crm_simulate --live-check --in-place -S"
     test_assert $CRM_EX_OK
 
     desc="Move dummy to node1"
     cmd="crm_resource -r dummy -M -N node1"
     test_assert $CRM_EX_OK
 
     desc="Clear implicit constraints for dummy on node2"
     cmd="crm_resource -r dummy -U -N node2"
     test_assert $CRM_EX_OK
 
     desc="Drop the status section"
     cmd="cibadmin -R -o status --xml-text '<status/>'"
     test_assert $CRM_EX_OK 0
     
     desc="Create a clone"
     cmd="cibadmin -C -o resources --xml-text '<clone id=\"test-clone\"><primitive id=\"test-primitive\" class=\"ocf\" provider=\"pacemaker\" type=\"Dummy\"/></clone>'"
     test_assert $CRM_EX_OK 0
 
     desc="Create a resource meta attribute"
     cmd="crm_resource -r test-primitive --meta -p is-managed -v false"
     test_assert $CRM_EX_OK
 
     desc="Create a resource meta attribute in the primitive"
     cmd="crm_resource -r test-primitive --meta -p is-managed -v false --force"
     test_assert $CRM_EX_OK
 
     desc="Update resource meta attribute with duplicates"
     cmd="crm_resource -r test-clone --meta -p is-managed -v true"
     test_assert $CRM_EX_OK
 
     desc="Update resource meta attribute with duplicates (force clone)"
     cmd="crm_resource -r test-clone --meta -p is-managed -v true --force"
     test_assert $CRM_EX_OK
 
     desc="Update child resource meta attribute with duplicates"
     cmd="crm_resource -r test-primitive --meta -p is-managed -v false"
     test_assert $CRM_EX_OK
 
     desc="Delete resource meta attribute with duplicates"
     cmd="crm_resource -r test-clone --meta -d is-managed"
     test_assert $CRM_EX_OK
 
     desc="Delete resource meta attribute in parent"
     cmd="crm_resource -r test-primitive --meta -d is-managed"
     test_assert $CRM_EX_OK
 
     desc="Create a resource meta attribute in the primitive"
     cmd="crm_resource -r test-primitive --meta -p is-managed -v false --force"
     test_assert $CRM_EX_OK
 
     desc="Update existing resource meta attribute"
     cmd="crm_resource -r test-clone --meta -p is-managed -v true"
     test_assert $CRM_EX_OK
     
     desc="Create a resource meta attribute in the parent"
     cmd="crm_resource -r test-clone --meta -p is-managed -v true --force"
     test_assert $CRM_EX_OK
 
     desc="Copy resources"
     cmd="cibadmin -Q -o resources > $TMPXML"
     test_assert $CRM_EX_OK 0
 
     desc="Delete resource paremt meta attribute (force)"
     cmd="crm_resource -r test-clone --meta -d is-managed --force"
     test_assert $CRM_EX_OK
 
     desc="Restore duplicates"
     cmd="cibadmin -R -o resources --xml-file $TMPXML"
     test_assert $CRM_EX_OK
 
     desc="Delete resource child meta attribute"
     cmd="crm_resource -r test-primitive --meta -d is-managed"
     test_assert $CRM_EX_OK
 
     cibadmin -C -o resources --xml-text '<group id="dummy-group"> \
         <primitive id="dummy1" class="ocf" provider="pacemaker" type="Dummy"\/> \
         <primitive id="dummy2" class="ocf" provider="pacemaker" type="Dummy"\/> \
       </group>'
 
     desc="Create a resource meta attribute in dummy1"
     cmd="crm_resource -r dummy1 --meta -p is-managed -v true"
     test_assert $CRM_EX_OK
 
     desc="Create a resource meta attribute in dummy-group"
     cmd="crm_resource -r dummy-group --meta -p is-managed -v false"
     test_assert $CRM_EX_OK
 
     cibadmin -D -o resource --xml-text '<group id="dummy-group">'
 
     desc="Specify a lifetime when moving a resource"
     cmd="crm_resource -r dummy --move --node node2 --lifetime=PT1H"
     test_assert $CRM_EX_OK
 
     desc="Try to move a resource previously moved with a lifetime"
     cmd="crm_resource -r dummy --move --node node1"
     test_assert $CRM_EX_OK
 
     desc="Ban dummy from node1 for a short time"
     cmd="crm_resource -r dummy -B -N node1 --lifetime=PT1S"
     test_assert $CRM_EX_OK
 
     desc="Remove expired constraints"
     sleep 2
     cmd="crm_resource --clear --expired"
     test_assert $CRM_EX_OK
 
+    unset CIB_shadow
     unset CIB_shadow_dir
     rm -f "$TMPXML" "$TMPORIG"
 
     desc="Create an XML patchset"
     cmd="crm_diff -o $test_home/cli/crm_diff_old.xml -n $test_home/cli/crm_diff_new.xml"
     test_assert $CRM_EX_ERROR 0
 }
 
 INVALID_PERIODS=(
     "2019-01-01 00:00:00Z"              # Start with no end
     "2019-01-01 00:00:00Z/"             # Start with only a trailing slash
     "PT2S/P1M"                          # Two durations
     "2019-13-01 00:00:00Z/P1M"          # Out-of-range month
     "20191077T15/P1M"                   # Out-of-range day
     "2019-10-01T25:00:00Z/P1M"          # Out-of-range hour
     "2019-10-01T24:00:01Z/P1M"          # Hour 24 with anything but :00:00
     "PT5H/20191001T007000Z"             # Out-of-range minute
     "2019-10-01 00:00:80Z/P1M"          # Out-of-range second
     "2019-10-01 00:00:10 +25:00/P1M"    # Out-of-range offset hour
     "20191001T000010 -00:61/P1M"        # Out-of-range offset minute
     "P1Y/2019-02-29 00:00:00Z"          # Feb. 29 in non-leap-year
     "2019-01-01 00:00:00Z/P"            # Duration with no values
     "P1Z/2019-02-20 00:00:00Z"          # Invalid duration unit
     "P1YM/2019-02-20 00:00:00Z"         # No number for duration unit
 )
         
 function test_dates() {
     # Ensure invalid period specifications are rejected
     for spec in '' "${INVALID_PERIODS[@]}"; do
         desc="Invalid period - [$spec]"
         cmd="iso8601 -p \"$spec\""
         test_assert $CRM_EX_INVALID_PARAM 0
     done
 
     desc="2014-01-01 00:30:00 - 1 Hour"
     cmd="iso8601 -d '2014-01-01 00:30:00Z' -D P-1H -E '2013-12-31 23:30:00Z'"
     test_assert $CRM_EX_OK 0
 
     desc="Valid date - Feb 29 in leap year"
     cmd="iso8601 -d '2020-02-29 00:00:00Z' -E '2020-02-29 00:00:00Z'"
     test_assert $CRM_EX_OK 0
 
     desc="Valid date - using 'T' and offset"
     cmd="iso8601 -d '20191201T131211 -05:00' -E '2019-12-01 18:12:11Z'"
     test_assert $CRM_EX_OK 0
 
     desc="24:00:00 equivalent to 00:00:00 of next day"
     cmd="iso8601 -d '2019-12-31 24:00:00Z' -E '2020-01-01 00:00:00Z'"
     test_assert $CRM_EX_OK 0
 
     for y in 06 07 08 09 10 11 12 13 14 15 16 17 18 40; do
         desc="20$y-W01-7"
         cmd="iso8601 -d '20$y-W01-7 00Z'"
         test_assert $CRM_EX_OK 0
 
         desc="20$y-W01-7 - round-trip"
         cmd="iso8601 -d '20$y-W01-7 00Z' -W -E '20$y-W01-7 00:00:00Z'"
         test_assert $CRM_EX_OK 0
 
         desc="20$y-W01-1"
         cmd="iso8601 -d '20$y-W01-1 00Z'"
         test_assert $CRM_EX_OK 0
 
         desc="20$y-W01-1 - round-trip"
         cmd="iso8601 -d '20$y-W01-1 00Z' -W -E '20$y-W01-1 00:00:00Z'"
         test_assert $CRM_EX_OK 0
     done
 
     desc="2009-W53-07"
     cmd="iso8601 -d '2009-W53-7 00:00:00Z' -W -E '2009-W53-7 00:00:00Z'"
     test_assert $CRM_EX_OK 0
 
     desc="epoch + 2 Years 5 Months 6 Minutes"
     cmd="iso8601 -d 'epoch' -D P2Y5MT6M -E '1972-06-01 00:06:00Z'"
     test_assert $CRM_EX_OK 0
 
     desc="2009-01-31 + 1 Month"
     cmd="iso8601 -d '20090131T000000Z' -D P1M -E '2009-02-28 00:00:00Z'"
     test_assert $CRM_EX_OK 0
 
     desc="2009-01-31 + 2 Months"
     cmd="iso8601 -d '2009-01-31 00:00:00Z' -D P2M -E '2009-03-31 00:00:00Z'"
     test_assert $CRM_EX_OK 0
 
     desc="2009-01-31 + 3 Months"
     cmd="iso8601 -d '2009-01-31 00:00:00Z' -D P3M -E '2009-04-30 00:00:00Z'"
     test_assert $CRM_EX_OK 0
 
     desc="2009-03-31 - 1 Month"
     cmd="iso8601 -d '2009-03-31 01:00:00 +01:00' -D P-1M -E '2009-02-28 00:00:00Z'"
     test_assert $CRM_EX_OK 0
 
     desc="2038-01-01 + 3 Months"
     cmd="iso8601 -d '2038-01-01 00:00:00Z' -D P3M -E '2038-04-01 00:00:00Z'"
     test_assert $CRM_EX_OK 0
 }
 
 function test_acl_loop() {
     local TMPXML
 
     TMPXML="$1"
 
     # Make sure we're rejecting things for the right reasons
     export PCMK_trace_functions=pcmk__check_acl,pcmk__apply_creation_acl
     export PCMK_stderr=1
 
     CIB_user=root cibadmin --replace --xml-text '<resources/>'
 
     export CIB_user=unknownguy
     desc="$CIB_user: Query configuration"
     cmd="cibadmin -Q"
     test_assert $CRM_EX_INSUFFICIENT_PRIV 0
 
     desc="$CIB_user: Set enable-acl"
     cmd="crm_attribute -n enable-acl -v false"
     test_assert $CRM_EX_INSUFFICIENT_PRIV 0
 
     desc="$CIB_user: Set stonith-enabled"
     cmd="crm_attribute -n stonith-enabled -v false"
     test_assert $CRM_EX_INSUFFICIENT_PRIV 0
 
     desc="$CIB_user: Create a resource"
     cmd="cibadmin -C -o resources --xml-text '<primitive id=\"dummy\" class=\"ocf\" provider=\"pacemaker\" type=\"Dummy\"/>'"
     test_assert $CRM_EX_INSUFFICIENT_PRIV 0
 
     export CIB_user=l33t-haxor
     desc="$CIB_user: Query configuration"
     cmd="cibadmin -Q"
     test_assert $CRM_EX_INSUFFICIENT_PRIV 0
 
     desc="$CIB_user: Set enable-acl"
     cmd="crm_attribute -n enable-acl -v false"
     test_assert $CRM_EX_INSUFFICIENT_PRIV 0
 
     desc="$CIB_user: Set stonith-enabled"
     cmd="crm_attribute -n stonith-enabled -v false"
     test_assert $CRM_EX_INSUFFICIENT_PRIV 0
 
     desc="$CIB_user: Create a resource"
     cmd="cibadmin -C -o resources --xml-text '<primitive id=\"dummy\" class=\"ocf\" provider=\"pacemaker\" type=\"Dummy\"/>'"
     test_assert $CRM_EX_INSUFFICIENT_PRIV 0
 
     export CIB_user=niceguy
     desc="$CIB_user: Query configuration"
     cmd="cibadmin -Q"
     test_assert $CRM_EX_OK 0
 
     desc="$CIB_user: Set enable-acl"
     cmd="crm_attribute -n enable-acl -v false"
     test_assert $CRM_EX_INSUFFICIENT_PRIV 0
 
     desc="$CIB_user: Set stonith-enabled"
     cmd="crm_attribute -n stonith-enabled -v false"
     test_assert $CRM_EX_OK
 
     desc="$CIB_user: Create a resource"
     cmd="cibadmin -C -o resources --xml-text '<primitive id=\"dummy\" class=\"ocf\" provider=\"pacemaker\" type=\"Dummy\"/>'"
     test_assert $CRM_EX_INSUFFICIENT_PRIV 0
 
     export CIB_user=root
     desc="$CIB_user: Query configuration"
     cmd="cibadmin -Q"
     test_assert $CRM_EX_OK 0
 
     desc="$CIB_user: Set stonith-enabled"
     cmd="crm_attribute -n stonith-enabled -v true"
     test_assert $CRM_EX_OK
 
     desc="$CIB_user: Create a resource"
     cmd="cibadmin -C -o resources --xml-text '<primitive id=\"dummy\" class=\"ocf\" provider=\"pacemaker\" type=\"Dummy\"/>'"
     test_assert $CRM_EX_OK
 
     export CIB_user=l33t-haxor
 
     desc="$CIB_user: Create a resource meta attribute"
     cmd="crm_resource -r dummy --meta -p target-role -v Stopped"
     test_assert $CRM_EX_INSUFFICIENT_PRIV 0
 
     desc="$CIB_user: Query a resource meta attribute"
     cmd="crm_resource -r dummy --meta -g target-role"
     test_assert $CRM_EX_INSUFFICIENT_PRIV 0
 
     desc="$CIB_user: Remove a resource meta attribute"
     cmd="crm_resource -r dummy --meta -d target-role"
     test_assert $CRM_EX_INSUFFICIENT_PRIV 0
 
     export CIB_user=niceguy
 
     desc="$CIB_user: Create a resource meta attribute"
     cmd="crm_resource -r dummy --meta -p target-role -v Stopped"
     test_assert $CRM_EX_OK
 
     desc="$CIB_user: Query a resource meta attribute"
     cmd="crm_resource -r dummy --meta -g target-role"
     test_assert $CRM_EX_OK
 
     desc="$CIB_user: Remove a resource meta attribute"
     cmd="crm_resource -r dummy --meta -d target-role"
     test_assert $CRM_EX_OK
 
     desc="$CIB_user: Create a resource meta attribute"
     cmd="crm_resource -r dummy --meta -p target-role -v Started"
     test_assert $CRM_EX_OK
 
     export CIB_user=badidea
     desc="$CIB_user: Query configuration - implied deny"
     cmd="cibadmin -Q"
     test_assert $CRM_EX_OK 0
 
     export CIB_user=betteridea
     desc="$CIB_user: Query configuration - explicit deny"
     cmd="cibadmin -Q"
     test_assert $CRM_EX_OK 0
 
     CIB_user=root cibadmin -Q > "$TMPXML"
     CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --delete --xml-text '<acls/>'
     CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
 
     export CIB_user=niceguy
     desc="$CIB_user: Replace - remove acls"
     cmd="cibadmin --replace --xml-file $TMPXML"
     test_assert $CRM_EX_INSUFFICIENT_PRIV 0
 
     CIB_user=root cibadmin -Q > "$TMPXML"
     CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -C -o resources --xml-text '<primitive id="dummy2" class="ocf" provider="pacemaker" type="Dummy"/>'
     CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
 
     desc="$CIB_user: Replace - create resource"
     cmd="cibadmin --replace --xml-file $TMPXML"
     test_assert $CRM_EX_INSUFFICIENT_PRIV 0
 
     CIB_user=root cibadmin -Q > "$TMPXML"
     CIB_user=root CIB_file="$TMPXML" CIB_shadow="" crm_attribute -n enable-acl -v false
     CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
 
     desc="$CIB_user: Replace - modify attribute (deny)"
     cmd="cibadmin --replace --xml-file $TMPXML"
     test_assert $CRM_EX_INSUFFICIENT_PRIV 0
 
     CIB_user=root cibadmin -Q > "$TMPXML"
     CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --replace --xml-text '<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl"/>'
     CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
 
     desc="$CIB_user: Replace - delete attribute (deny)"
     cmd="cibadmin --replace --xml-file $TMPXML"
     test_assert $CRM_EX_INSUFFICIENT_PRIV 0
 
     CIB_user=root cibadmin -Q > "$TMPXML"
     CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text '<primitive id="dummy" description="nothing interesting"/>'
     CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
 
     desc="$CIB_user: Replace - create attribute (deny)"
     cmd="cibadmin --replace --xml-file $TMPXML"
     test_assert $CRM_EX_INSUFFICIENT_PRIV 0
 
     CIB_user=bob
     CIB_user=root cibadmin -Q > "$TMPXML"
     CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text '<primitive id="dummy" description="nothing interesting"/>'
     CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
 
     desc="$CIB_user: Replace - create attribute (allow)"
     cmd="cibadmin --replace -o resources --xml-file $TMPXML"
     test_assert $CRM_EX_OK 0
 
     CIB_user=root cibadmin -Q > "$TMPXML"
     CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text '<primitive id="dummy" description="something interesting"/>'
     CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
 
     desc="$CIB_user: Replace - modify attribute (allow)"
     cmd="cibadmin --replace -o resources --xml-file $TMPXML"
     test_assert $CRM_EX_OK 0
 
     CIB_user=root cibadmin -Q > "$TMPXML"
     CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --replace -o resources --xml-text '<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy"/>'
     CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
 
     desc="$CIB_user: Replace - delete attribute (allow)"
     cmd="cibadmin --replace -o resources --xml-file $TMPXML"
     test_assert $CRM_EX_OK 0
 }
 
 function test_acls() {
     local SHADOWPATH
     local TMPXML
 
     TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.acls.xml.XXXXXXXXXX)
     export CIB_shadow_dir="${shadow_dir}"
 
     $VALGRIND_CMD crm_shadow --batch --force --create-empty $shadow --validate-with pacemaker-1.3 2>&1
     export CIB_shadow=$shadow
 
     cat <<EOF > "$TMPXML"
     <acls>
       <acl_user id="l33t-haxor">
         <deny id="crook-nothing" xpath="/cib"/>
       </acl_user>
       <acl_user id="niceguy">
         <role_ref id="observer"/>
       </acl_user>
       <acl_user id="bob">
         <role_ref id="admin"/>
       </acl_user>
       <acl_role id="observer">
         <read id="observer-read-1" xpath="/cib"/>
         <write id="observer-write-1" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
         <write id="observer-write-2" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
       </acl_role>
       <acl_role id="admin">
         <read id="admin-read-1" xpath="/cib"/>
         <write id="admin-write-1" xpath="//resources"/>
       </acl_role>
     </acls>
 EOF
 
     desc="Configure some ACLs"
     cmd="cibadmin -M -o acls --xml-file $TMPXML"
     test_assert $CRM_EX_OK
 
     desc="Enable ACLs"
     cmd="crm_attribute -n enable-acl -v true"
     test_assert $CRM_EX_OK
 
     desc="Set cluster option"
     cmd="crm_attribute -n no-quorum-policy -v ignore"
     test_assert $CRM_EX_OK
 
     desc="New ACL"
     cmd="cibadmin --create -o acls --xml-text '<acl_user id=\"badidea\"><read id=\"badidea-resources\" xpath=\"//meta_attributes\"/></acl_user>'"
     test_assert $CRM_EX_OK
 
     desc="Another ACL"
     cmd="cibadmin --create -o acls --xml-text '<acl_user id=\"betteridea\"><read id=\"betteridea-resources\" xpath=\"//meta_attributes\"/></acl_user>'"
     test_assert $CRM_EX_OK
 
     desc="Updated ACL"
     cmd="cibadmin --replace -o acls --xml-text '<acl_user id=\"betteridea\"><deny id=\"betteridea-nothing\" xpath=\"/cib\"/><read id=\"betteridea-resources\" xpath=\"//meta_attributes\"/></acl_user>'"
     test_assert $CRM_EX_OK
 
     test_acl_loop "$TMPXML"
 
     printf "\n\n    !#!#!#!#! Upgrading to latest CIB schema and re-testing !#!#!#!#!\n"
     printf "\nUpgrading to latest CIB schema and re-testing\n" 1>&2
 
     export CIB_user=root
     desc="$CIB_user: Upgrade to latest CIB schema"
     cmd="cibadmin --upgrade --force -V"
     test_assert $CRM_EX_OK
 
     SHADOWPATH="$(crm_shadow --file)"
     # sed -i isn't portable :-(
     cp -p "$SHADOWPATH" "${SHADOWPATH}.$$" # to keep permissions
     sed -e 's/epoch=.2/epoch=\"6/g' -e 's/admin_epoch=.1/admin_epoch=\"0/g' \
         "$SHADOWPATH" > "${SHADOWPATH}.$$"
     mv -- "${SHADOWPATH}.$$" "$SHADOWPATH"
 
     test_acl_loop "$TMPXML"
 
     unset CIB_shadow_dir
     rm -f "$TMPXML"
 }
 
 function test_validity() {
     local TMPGOOD
     local TMPBAD
 
     TMPGOOD=$(mktemp ${TMPDIR:-/tmp}/cts-cli.validity.good.xml.XXXXXXXXXX)
     TMPBAD=$(mktemp ${TMPDIR:-/tmp}/cts-cli.validity.bad.xml.XXXXXXXXXX)
     export CIB_shadow_dir="${shadow_dir}"
 
     $VALGRIND_CMD crm_shadow --batch --force --create-empty $shadow --validate-with pacemaker-1.2 2>&1
     export CIB_shadow=$shadow
     export PCMK_trace_functions=apply_upgrade,update_validation,cli_config_update
     export PCMK_stderr=1
 
     cibadmin -C -o resources --xml-text '<primitive id="dummy1" class="ocf" provider="pacemaker" type="Dummy"/>'
     cibadmin -C -o resources --xml-text '<primitive id="dummy2" class="ocf" provider="pacemaker" type="Dummy"/>'
     cibadmin -C -o constraints --xml-text '<rsc_order id="ord_1-2" first="dummy1" first-action="start" then="dummy2"/>'
     cibadmin -Q > "$TMPGOOD"
 
 
     desc="Try to make resulting CIB invalid (enum violation)"
     cmd="cibadmin -M -o constraints --xml-text '<rsc_order id=\"ord_1-2\" first=\"dummy1\" first-action=\"break\" then=\"dummy2\"/>'"
     test_assert $CRM_EX_CONFIG
 
     sed 's|"start"|"break"|' "$TMPGOOD" > "$TMPBAD"
     desc="Run crm_simulate with invalid CIB (enum violation)"
     cmd="crm_simulate -x $TMPBAD -S"
     test_assert $CRM_EX_CONFIG 0
 
 
     desc="Try to make resulting CIB invalid (unrecognized validate-with)"
     cmd="cibadmin -M --xml-text '<cib validate-with=\"pacemaker-9999.0\"/>'"
     test_assert $CRM_EX_CONFIG
 
     sed 's|"pacemaker-1.2"|"pacemaker-9999.0"|' "$TMPGOOD" > "$TMPBAD"
     desc="Run crm_simulate with invalid CIB (unrecognized validate-with)"
     cmd="crm_simulate -x $TMPBAD -S"
     test_assert $CRM_EX_CONFIG 0
 
 
     desc="Try to make resulting CIB invalid, but possibly recoverable (valid with X.Y+1)"
     cmd="cibadmin -C -o configuration --xml-text '<tags/>'"
     test_assert $CRM_EX_CONFIG
 
     sed 's|</configuration>|<tags/></configuration>|' "$TMPGOOD" > "$TMPBAD"
     desc="Run crm_simulate with invalid, but possibly recoverable CIB (valid with X.Y+1)"
     cmd="crm_simulate -x $TMPBAD -S"
     test_assert $CRM_EX_OK 0
 
 
     sed 's|[ 	][ 	]*validate-with="[^"]*"||' "$TMPGOOD" > "$TMPBAD"
     desc="Make resulting CIB valid, although without validate-with attribute"
     cmd="cibadmin -R --xml-file $TMPBAD"
     test_assert $CRM_EX_OK
 
     desc="Run crm_simulate with valid CIB, but without validate-with attribute"
     cmd="crm_simulate -x $TMPBAD -S"
     test_assert $CRM_EX_OK 0
 
 
     # this will just disable validation and accept the config, outputting
     # validation errors
     sed -e 's|[ 	][ 	]*validate-with="[^"]*"||' \
         -e 's|\([ 	][ 	]*epoch="[^"]*\)"|\10"|' -e 's|"start"|"break"|' \
         "$TMPGOOD" > "$TMPBAD"
     desc="Make resulting CIB invalid, and without validate-with attribute"
     cmd="cibadmin -R --xml-file $TMPBAD"
     test_assert $CRM_EX_OK
 
     desc="Run crm_simulate with invalid CIB, also without validate-with attribute"
     cmd="crm_simulate -x $TMPBAD -S"
     test_assert $CRM_EX_OK 0
 
     unset CIB_shadow_dir
     rm -f "$TMPGOOD" "$TMPBAD"
 }
 
 test_upgrade() {
     local TMPXML
 
     TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX)
     export CIB_shadow_dir="${shadow_dir}"
 
     $VALGRIND_CMD crm_shadow --batch --force --create-empty $shadow --validate-with pacemaker-2.10 2>&1
     export CIB_shadow=$shadow
 
     desc="Set stonith-enabled=false"
     cmd="crm_attribute -n stonith-enabled -v false"
     test_assert $CRM_EX_OK
 
     cat <<EOF > "$TMPXML"
     <resources>
       <primitive id="mySmartFuse" class="ocf" provider="experiment" type="SmartFuse">
         <operations>
           <op id="mySmartFuse-start" name="start" interval="0" timeout="40s"/>
           <op id="mySmartFuse-monitor-inputpower" name="monitor" interval="30s">
             <instance_attributes id="mySmartFuse-inputpower-instanceparams">
               <nvpair id="mySmartFuse-inputpower-requires" name="requires" value="inputpower"/>
             </instance_attributes>
           </op>
           <op id="mySmartFuse-monitor-outputpower" name="monitor" interval="2s">
             <instance_attributes id="mySmartFuse-outputpower-instanceparams">
               <nvpair id="mySmartFuse-outputpower-requires" name="requires" value="outputpower"/>
             </instance_attributes>
           </op>
         </operations>
         <instance_attributes id="mySmartFuse-params">
           <nvpair id="mySmartFuse-params-ip" name="ip" value="192.0.2.10"/>
         </instance_attributes>
 	<!-- a bit hairy but valid -->
         <instance_attributes id-ref="mySmartFuse-outputpower-instanceparams"/>
       </primitive>
     </resources>
 EOF
 
     desc="Configure the initial resource"
     cmd="cibadmin -M -o resources --xml-file $TMPXML"
     test_assert $CRM_EX_OK
 
     desc="Upgrade to latest CIB schema (trigger 2.10.xsl + the wrapping)"
     cmd="cibadmin --upgrade --force -V -V"
     test_assert $CRM_EX_OK
 
     desc="Query a resource instance attribute (shall survive)"
     cmd="crm_resource -r mySmartFuse -g requires"
     test_assert $CRM_EX_OK
 
     unset CIB_shadow_dir
     rm -f "$TMPXML"
 }
 
 test_rules() {
     local TMPXML
 
     export CIB_shadow_dir="${shadow_dir}"
     $VALGRIND_CMD crm_shadow --batch --force --create-empty $shadow 2>&1
     export CIB_shadow=$shadow
 
     cibadmin -C -o resources   --xml-text '<primitive class="ocf" id="dummy" provider="heartbeat" type="Dummy" />'
 
     TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX)
     cat <<EOF > "$TMPXML"
 <rsc_location id="cli-too-many-date-expressions" rsc="dummy">
   <rule id="cli-rule-too-many-date-expressions" score="INFINITY" boolean-op="or">
     <date_expression id="cli-date-expression-1" operation="gt" start="2020-01-01 01:00:00 -0500"/>
     <date_expression id="cli-date-expression-2" operation="lt" end="2019-01-01 01:00:00 -0500"/>
   </rule>
 </rsc_location>
 EOF
 
     cibadmin -C -o constraints -x "$TMPXML"
     rm -f "$TMPXML"
 
     TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX)
     cat <<EOF > "$TMPXML"
 <rsc_location id="cli-prefer-dummy-expired" rsc="dummy">
   <rule id="cli-prefer-rule-dummy-expired" score="INFINITY">
     <date_expression id="cli-prefer-lifetime-end-dummy-expired" operation="lt" end="2019-01-01 12:00:00 -05:00"/>
   </rule>
 </rsc_location>
 EOF
 
     cibadmin -C -o constraints -x "$TMPXML"
     rm -f "$TMPXML"
 
     if [ "$(uname)" == "FreeBSD" ]; then
         tomorrow=$(date -v+1d +"%F %T %z")
     else
         tomorrow=$(date --date=tomorrow +"%F %T %z")
     fi
 
     TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX)
     cat <<EOF > "$TMPXML"
 <rsc_location id="cli-prefer-dummy-not-yet" rsc="dummy">
   <rule id="cli-prefer-rule-dummy-not-yet" score="INFINITY">
     <date_expression id="cli-prefer-lifetime-end-dummy-not-yet" operation="gt" start="${tomorrow}"/>
   </rule>
 </rsc_location>
 EOF
 
     cibadmin -C -o constraints -x "$TMPXML"
     rm -f "$TMPXML"
 
     TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX)
     cat <<EOF > "$TMPXML"
 <rsc_location id="cli-prefer-dummy-date_spec-only-years" rsc="dummy">
   <rule id="cli-prefer-rule-dummy-date_spec-only-years" score="INFINITY">
     <date_expression id="cli-prefer-dummy-date_spec-only-years-expr" operation="date_spec">
       <date_spec id="cli-prefer-dummy-date_spec-only-years-spec" years="2019"/>
     </date_expression>
   </rule>
 </rsc_location>
 EOF
 
     cibadmin -C -o constraints -x "$TMPXML"
     rm -f "$TMPXML"
 
     TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX)
     cat <<EOF > "$TMPXML"
 <rsc_location id="cli-prefer-dummy-date_spec-without-years" rsc="dummy">
   <rule id="cli-prefer-rule-dummy-date_spec-without-years" score="INFINITY">
     <date_expression id="cli-prefer-dummy-date_spec-without-years-expr" operation="date_spec">
       <date_spec id="cli-prefer-dummy-date_spec-without-years-spec" hours="20" months="1,3,5,7"/>
     </date_expression>
   </rule>
 </rsc_location>
 EOF
 
     cibadmin -C -o constraints -x "$TMPXML"
     rm -f "$TMPXML"
 
     TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX)
     cat <<EOF > "$TMPXML"
 <rsc_location id="cli-prefer-dummy-date_spec-years-moon" rsc="dummy">
   <rule id="cli-prefer-rule-dummy-date_spec-years-moon" score="INFINITY">
     <date_expression id="cli-prefer-dummy-date_spec-years-moon-expr" operation="date_spec">
       <date_spec id="cli-prefer-dummy-date_spec-years-moon-spec" years="2019" moon="1"/>
     </date_expression>
   </rule>
 </rsc_location>
 EOF
 
     cibadmin -C -o constraints -x "$TMPXML"
     rm -f "$TMPXML"
 
     TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX)
     cat <<EOF > "$TMPXML"
 <rsc_location id="cli-no-date_expression" rsc="dummy">
   <rule id="cli-no-date_expression-rule" score="INFINITY">
     <expression id="ban-apache-expr" attribute="#uname" operation="eq" value="node3"/>
   </rule>
 </rsc_location>
 EOF
 
     cibadmin -C -o constraints -x "$TMPXML"
     rm -f "$TMPXML"
 
     desc="Try to check a rule that doesn't exist"
     cmd="crm_rule -c -r blahblah"
     test_assert $CRM_EX_NOSUCH
 
     desc="Try to check a rule that has too many date_expressions"
     cmd="crm_rule -c -r cli-rule-too-many-date-expressions"
     test_assert $CRM_EX_UNIMPLEMENT_FEATURE
 
     desc="Verify basic rule is expired"
     cmd="crm_rule -c -r cli-prefer-rule-dummy-expired"
     test_assert $CRM_EX_EXPIRED
 
     desc="Verify basic rule worked in the past"
     cmd="crm_rule -c -r cli-prefer-rule-dummy-expired -d 20180101"
     test_assert $CRM_EX_OK
 
     desc="Verify basic rule is not yet in effect"
     cmd="crm_rule -c -r cli-prefer-rule-dummy-not-yet"
     test_assert $CRM_EX_NOT_YET_IN_EFFECT
 
     desc="Verify date_spec rule with years has expired"
     cmd="crm_rule -c -r cli-prefer-rule-dummy-date_spec-only-years"
     test_assert $CRM_EX_EXPIRED
 
     desc="Verify date_spec rule with years is in effect"
     cmd="crm_rule -c -r cli-prefer-rule-dummy-date_spec-only-years -d 20190201"
     test_assert $CRM_EX_OK
 
     desc="Try to check a rule whose date_spec does not contain years="
     cmd="crm_rule -c -r cli-prefer-rule-dummy-date_spec-without-years"
     test_assert $CRM_EX_NOSUCH
 
     desc="Try to check a rule whose date_spec contains years= and moon="
     cmd="crm_rule -c -r cli-prefer-rule-dummy-date_spec-years-moon"
     test_assert $CRM_EX_NOSUCH
 
     desc="Try to check a rule with no date_expression"
     cmd="crm_rule -c -r cli-no-date_expression-rule"
     test_assert $CRM_EX_UNIMPLEMENT_FEATURE
 
     unset CIB_shadow_dir
 }
 
 # Process command-line arguments
 while [ $# -gt 0 ]; do
     case "$1" in
         -t)
             tests="$2"
             shift 2
             ;;
         -V|--verbose)
             verbose=1
             shift
             ;;
         -v|--valgrind)
             export G_SLICE=always-malloc
             VALGRIND_CMD="valgrind $VALGRIND_OPTS"
             shift
             ;;
         -s)
             do_save=1
             shift
             ;;
         -p)
             export PATH="$2:$PATH"
             shift
             ;;
         --help)
             echo "$USAGE_TEXT"
             exit $CRM_EX_OK
             ;;
         *)
             echo "error: unknown option $1"
             echo
             echo "$USAGE_TEXT"
             exit $CRM_EX_USAGE
             ;;
     esac
 done
 
 for t in $tests; do
     case "$t" in
         dates) ;;
         tools) ;;
         acls) ;;
         validity) ;;
         upgrade) ;;
         rules) ;;
+        crm_mon) ;;
         *)
             echo "error: unknown test $t"
             echo
             echo "$USAGE_TEXT"
             exit $CRM_EX_USAGE
             ;;
     esac
 done
 
 # Check whether we're running from source directory
 SRCDIR=$(dirname $test_home)
 if [ -x "$SRCDIR/tools/crm_simulate" ]; then
     export PATH="$SRCDIR/tools:$PATH"
     echo "Using local binaries from: $SRCDIR/tools"
 
     if [ -x "$SRCDIR/xml" ]; then
         export PCMK_schema_directory="$SRCDIR/xml"
         echo "Using local schemas from: $PCMK_schema_directory"
     fi
 fi
 
 for t in $tests; do
     echo "Testing $t"
     TMPFILE=$(mktemp ${TMPDIR:-/tmp}/cts-cli.$t.XXXXXXXXXX)
     eval TMPFILE_$t="$TMPFILE"
     test_$t > "$TMPFILE"
 
+    # last-run= and last-rc-change= are always numeric in the CIB.  However,
+    # for the crm_mon test we also need to compare against the XML output of
+    # the crm_mon program.  There, these are shown as human readable strings
+    # (like the output of the `date` command).
     sed -e 's/cib-last-written.*>/>/'\
-        -e 's/ last-run=\"[0-9]*\"//'\
+        -e 's/ last-run=\"[A-Za-z0-9: ]*\"//'\
+        -e 's/Last updated: .*/Last updated:/' \
+        -e 's/Last change: .*/Last change:/' \
+        -e 's/(version .*)/(version)/' \
+        -e 's/last_update time=\".*\"/last_update time=\"\"/' \
+        -e 's/last_change time=\".*\"/last_change time=\"\"/' \
+        -e 's/ version=\".*\" / version=\"\" /' \
+        -e 's/request=\".*crm_mon/request=\"crm_mon/' \
         -e 's/crm_feature_set="[^"]*" //'\
         -e 's/validate-with="[^"]*" //'\
         -e 's/Created new pacemaker-.* configuration/Created new pacemaker configuration/'\
         -e 's/.*\(pcmk__.*\)@.*\.c:[0-9][0-9]*)/\1/g' \
         -e 's/.*\(unpack_.*\)@.*\.c:[0-9][0-9]*)/\1/g' \
         -e 's/.*\(update_validation\)@.*\.c:[0-9][0-9]*)/\1/g' \
         -e 's/.*\(apply_upgrade\)@.*\.c:[0-9][0-9]*)/\1/g' \
-        -e 's/ last-rc-change=\"[0-9]*\"//'\
+        -e 's/ last-rc-change=\"[A-Za-z0-9: ]*\"//'\
         -e 's|^/tmp/cts-cli\.validity\.bad.xml\.[^:]*:|validity.bad.xml:|'\
         -e 's/^Entity: line [0-9][0-9]*: //'\
         -e 's/\(validation ([0-9][0-9]* of \)[0-9][0-9]*\().*\)/\1X\2/' \
         -e 's/^Migration will take effect until: .*/Migration will take effect until:/' \
         -e 's/ end=\"[0-9][-+: 0-9]*Z*\"/ end=\"\"/' \
         -e 's/ start=\"[0-9][-+: 0-9]*Z*\"/ start=\"\"/' \
         -e 's/^Error checking rule: Device not configured/Error checking rule: No such device or address/' \
         "$TMPFILE" > "${TMPFILE}.$$"
     mv -- "${TMPFILE}.$$" "$TMPFILE"
 
     if [ $do_save -eq 1 ]; then
         cp "$TMPFILE" $test_home/cli/regression.$t.exp
     fi
 done
 
 rm -rf "${shadow_dir}"
     
 failed=0
 
 if [ $verbose -eq 1 ]; then
     echo -e "\n\nResults"
 fi
 for t in $tests; do
     eval TMPFILE="\$TMPFILE_$t"
     if [ $verbose -eq 1 ]; then
         diff -wu $test_home/cli/regression.$t.exp "$TMPFILE"
     else
         diff -w $test_home/cli/regression.$t.exp "$TMPFILE" >/dev/null 2>&1
     fi
     if [ $? -ne 0 ]; then
         failed=1
     fi
 done
 
 echo -e "\n\nSummary"
 for t in $tests; do
     eval TMPFILE="\$TMPFILE_$t"
     grep -e '^\*' "$TMPFILE"
 done
 
 if [ $num_errors -ne 0 ]; then
     echo "$num_errors tests failed; see output in:"
     for t in $tests; do
         eval TMPFILE="\$TMPFILE_$t"
         echo "    $TMPFILE"
     done
     exit $CRM_EX_ERROR
 
 elif [ $failed -eq 1 ]; then
     echo "$num_passed tests passed but output was unexpected; see output in:"
     for t in $tests; do
         eval TMPFILE="\$TMPFILE_$t"
         echo "    $TMPFILE"
     done
     exit $CRM_EX_DIGEST
 
 else
     echo $num_passed tests passed
     for t in $tests; do
         eval TMPFILE="\$TMPFILE_$t"
         rm -f "$TMPFILE"
     done
     crm_shadow --force --delete $shadow >/dev/null 2>&1
     exit $CRM_EX_OK
 fi
diff --git a/include/crm/pengine/internal.h b/include/crm/pengine/internal.h
index f30c75b478..d29b161da8 100644
--- a/include/crm/pengine/internal.h
+++ b/include/crm/pengine/internal.h
@@ -1,476 +1,478 @@
 /*
  * Copyright 2004-2020 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #ifndef PE_INTERNAL__H
 #  define PE_INTERNAL__H
 #  include <string.h>
 #  include <crm/pengine/status.h>
 #  include <crm/pengine/remote_internal.h>
 #  include <crm/common/output.h>
 
 #  define pe_rsc_info(rsc, fmt, args...)  crm_log_tag(LOG_INFO,  rsc ? rsc->id : "<NULL>", fmt, ##args)
 #  define pe_rsc_debug(rsc, fmt, args...) crm_log_tag(LOG_DEBUG, rsc ? rsc->id : "<NULL>", fmt, ##args)
 #  define pe_rsc_trace(rsc, fmt, args...) crm_log_tag(LOG_TRACE, rsc ? rsc->id : "<NULL>", fmt, ##args)
 
 #  define pe_err(fmt...) { was_processing_error = TRUE; crm_config_error = TRUE; crm_err(fmt); }
 #  define pe_warn(fmt...) { was_processing_warning = TRUE; crm_config_warning = TRUE; crm_warn(fmt); }
 #  define pe_proc_err(fmt...) { was_processing_error = TRUE; crm_err(fmt); }
 #  define pe_proc_warn(fmt...) { was_processing_warning = TRUE; crm_warn(fmt); }
 #  define pe_set_action_bit(action, bit) action->flags = crm_set_bit(__FUNCTION__, __LINE__, action->uuid, action->flags, bit)
 #  define pe_clear_action_bit(action, bit) action->flags = crm_clear_bit(__FUNCTION__, __LINE__, action->uuid, action->flags, bit)
 
 // Some warnings we don't want to print every transition
 
 enum pe_warn_once_e {
     pe_wo_blind         = 0x0001,
     pe_wo_restart_type  = 0x0002,
     pe_wo_role_after    = 0x0004,
     pe_wo_poweroff      = 0x0008,
     pe_wo_require_all   = 0x0010,
     pe_wo_order_score   = 0x0020,
     pe_wo_neg_threshold = 0x0040,
 };
 
 extern uint32_t pe_wo;
 
 #define pe_warn_once(pe_wo_bit, fmt...) do {    \
         if (is_not_set(pe_wo, pe_wo_bit)) {     \
             if (pe_wo_bit == pe_wo_blind) {     \
                 crm_warn(fmt);                  \
             } else {                            \
                 pe_warn(fmt);                   \
             }                                   \
             set_bit(pe_wo, pe_wo_bit);          \
         }                                       \
     } while (0);
 
 
 typedef struct pe__location_constraint_s {
     char *id;                           // Constraint XML ID
     pe_resource_t *rsc_lh;              // Resource being located
     enum rsc_role_e role_filter;        // Role to locate
     enum pe_discover_e discover_mode;   // Resource discovery
     GListPtr node_list_rh;              // List of pe_node_t*
 } pe__location_t;
 
 typedef struct pe__order_constraint_s {
     int id;
     enum pe_ordering type;
 
     void *lh_opaque;
     pe_resource_t *lh_rsc;
     pe_action_t *lh_action;
     char *lh_action_task;
 
     void *rh_opaque;
     pe_resource_t *rh_rsc;
     pe_action_t *rh_action;
     char *rh_action_task;
 } pe__ordering_t;
 
 typedef struct notify_data_s {
     GSList *keys;               // Environment variable name/value pairs
 
     const char *action;
 
     pe_action_t *pre;
     pe_action_t *post;
     pe_action_t *pre_done;
     pe_action_t *post_done;
 
     GListPtr active;            /* notify_entry_t*  */
     GListPtr inactive;          /* notify_entry_t*  */
     GListPtr start;             /* notify_entry_t*  */
     GListPtr stop;              /* notify_entry_t*  */
     GListPtr demote;            /* notify_entry_t*  */
     GListPtr promote;           /* notify_entry_t*  */
     GListPtr master;            /* notify_entry_t*  */
     GListPtr slave;             /* notify_entry_t*  */
     GHashTable *allowed_nodes;
 
 } notify_data_t;
 
 bool pe_can_fence(pe_working_set_t *data_set, pe_node_t *node);
 
 int pe__add_scores(int score1, int score2);
 void add_hash_param(GHashTable * hash, const char *name, const char *value);
 
 char *native_parameter(pe_resource_t * rsc, pe_node_t * node, gboolean create, const char *name,
                        pe_working_set_t * data_set);
 pe_node_t *native_location(const pe_resource_t *rsc, GList **list, int current);
 
 void pe_metadata(void);
 void verify_pe_options(GHashTable * options);
 
 void common_update_score(pe_resource_t * rsc, const char *id, int score);
 void native_add_running(pe_resource_t * rsc, pe_node_t * node, pe_working_set_t * data_set);
 
 gboolean native_unpack(pe_resource_t * rsc, pe_working_set_t * data_set);
 gboolean group_unpack(pe_resource_t * rsc, pe_working_set_t * data_set);
 gboolean clone_unpack(pe_resource_t * rsc, pe_working_set_t * data_set);
 gboolean pe__unpack_bundle(pe_resource_t *rsc, pe_working_set_t *data_set);
 
 pe_resource_t *native_find_rsc(pe_resource_t *rsc, const char *id, const pe_node_t *node,
                                int flags);
 
 gboolean native_active(pe_resource_t * rsc, gboolean all);
 gboolean group_active(pe_resource_t * rsc, gboolean all);
 gboolean clone_active(pe_resource_t * rsc, gboolean all);
 gboolean pe__bundle_active(pe_resource_t *rsc, gboolean all);
 
 void native_print(pe_resource_t * rsc, const char *pre_text, long options, void *print_data);
 void group_print(pe_resource_t * rsc, const char *pre_text, long options, void *print_data);
 void clone_print(pe_resource_t * rsc, const char *pre_text, long options, void *print_data);
 void pe__print_bundle(pe_resource_t *rsc, const char *pre_text, long options,
                       void *print_data);
 
 int pe__name_and_nvpairs_xml(pcmk__output_t *out, bool is_list, const char *tag_name
                          , size_t pairs_count, ...);
 char *pe__node_display_name(pe_node_t *node, bool print_detail);
 
 int pe__ban_html(pcmk__output_t *out, va_list args);
 int pe__ban_text(pcmk__output_t *out, va_list args);
 int pe__ban_xml(pcmk__output_t *out, va_list args);
 int pe__clone_xml(pcmk__output_t *out, va_list args);
 int pe__clone_html(pcmk__output_t *out, va_list args);
 int pe__clone_text(pcmk__output_t *out, va_list args);
 int pe__cluster_counts_html(pcmk__output_t *out, va_list args);
 int pe__cluster_counts_text(pcmk__output_t *out, va_list args);
 int pe__cluster_counts_xml(pcmk__output_t *out, va_list args);
 int pe__cluster_dc_html(pcmk__output_t *out, va_list args);
 int pe__cluster_dc_text(pcmk__output_t *out, va_list args);
 int pe__cluster_dc_xml(pcmk__output_t *out, va_list args);
 int pe__cluster_maint_mode_html(pcmk__output_t *out, va_list args);
 int pe__cluster_maint_mode_text(pcmk__output_t *out, va_list args);
 int pe__cluster_options_html(pcmk__output_t *out, va_list args);
 int pe__cluster_options_log(pcmk__output_t *out, va_list args);
 int pe__cluster_options_text(pcmk__output_t *out, va_list args);
 int pe__cluster_options_xml(pcmk__output_t *out, va_list args);
 int pe__cluster_stack_html(pcmk__output_t *out, va_list args);
 int pe__cluster_stack_text(pcmk__output_t *out, va_list args);
 int pe__cluster_stack_xml(pcmk__output_t *out, va_list args);
 int pe__cluster_summary(pcmk__output_t *out, va_list args);
 int pe__cluster_summary_html(pcmk__output_t *out, va_list args);
 int pe__cluster_times_html(pcmk__output_t *out, va_list args);
 int pe__cluster_times_xml(pcmk__output_t *out, va_list args);
 int pe__cluster_times_text(pcmk__output_t *out, va_list args);
 int pe__failed_action_text(pcmk__output_t *out, va_list args);
 int pe__failed_action_xml(pcmk__output_t *out, va_list args);
 int pe__group_xml(pcmk__output_t *out, va_list args);
 int pe__group_html(pcmk__output_t *out, va_list args);
 int pe__group_text(pcmk__output_t *out, va_list args);
 int pe__bundle_xml(pcmk__output_t *out, va_list args);
 int pe__bundle_html(pcmk__output_t *out, va_list args);
 int pe__bundle_text(pcmk__output_t *out, va_list args);
 int pe__node_html(pcmk__output_t *out, va_list args);
 int pe__node_text(pcmk__output_t *out, va_list args);
 int pe__node_xml(pcmk__output_t *out, va_list args);
 int pe__node_attribute_html(pcmk__output_t *out, va_list args);
 int pe__node_attribute_text(pcmk__output_t *out, va_list args);
 int pe__node_attribute_xml(pcmk__output_t *out, va_list args);
 int pe__node_list_html(pcmk__output_t *out, va_list args);
 int pe__node_list_text(pcmk__output_t *out, va_list args);
 int pe__node_list_xml(pcmk__output_t *out, va_list args);
 int pe__op_history_text(pcmk__output_t *out, va_list args);
 int pe__op_history_xml(pcmk__output_t *out, va_list args);
 int pe__resource_history_text(pcmk__output_t *out, va_list args);
 int pe__resource_history_xml(pcmk__output_t *out, va_list args);
 int pe__resource_xml(pcmk__output_t *out, va_list args);
 int pe__resource_html(pcmk__output_t *out, va_list args);
 int pe__resource_text(pcmk__output_t *out, va_list args);
 int pe__ticket_html(pcmk__output_t *out, va_list args);
 int pe__ticket_text(pcmk__output_t *out, va_list args);
 int pe__ticket_xml(pcmk__output_t *out, va_list args);
 
 void native_free(pe_resource_t * rsc);
 void group_free(pe_resource_t * rsc);
 void clone_free(pe_resource_t * rsc);
 void pe__free_bundle(pe_resource_t *rsc);
 
 enum rsc_role_e native_resource_state(const pe_resource_t * rsc, gboolean current);
 enum rsc_role_e group_resource_state(const pe_resource_t * rsc, gboolean current);
 enum rsc_role_e clone_resource_state(const pe_resource_t * rsc, gboolean current);
 enum rsc_role_e pe__bundle_resource_state(const pe_resource_t *rsc,
                                           gboolean current);
 
 void pe__count_common(pe_resource_t *rsc);
 void pe__count_bundle(pe_resource_t *rsc);
 
 gboolean common_unpack(xmlNode * xml_obj, pe_resource_t ** rsc, pe_resource_t * parent,
                        pe_working_set_t * data_set);
 void common_free(pe_resource_t * rsc);
 
 pe_node_t *pe__copy_node(const pe_node_t *this_node);
 extern time_t get_effective_time(pe_working_set_t * data_set);
 
 /* Failure handling utilities (from failcounts.c) */
 
 // bit flags for fail count handling options
 enum pe_fc_flags_e {
     pe_fc_default   = 0x00,
     pe_fc_effective = 0x01, // don't count expired failures
     pe_fc_fillers   = 0x02, // if container, include filler failures in count
 };
 
 int pe_get_failcount(pe_node_t *node, pe_resource_t *rsc, time_t *last_failure,
                      uint32_t flags, xmlNode *xml_op,
                      pe_working_set_t *data_set);
 
 pe_action_t *pe__clear_failcount(pe_resource_t *rsc, pe_node_t *node,
                                  const char *reason,
                                  pe_working_set_t *data_set);
 
 /* Functions for finding/counting a resource's active nodes */
 
 pe_node_t *pe__find_active_on(const pe_resource_t *rsc,
                               unsigned int *count_all,
                               unsigned int *count_clean);
 pe_node_t *pe__find_active_requires(const pe_resource_t *rsc,
                                     unsigned int *count);
 
 static inline pe_node_t *
 pe__current_node(const pe_resource_t *rsc)
 {
     return pe__find_active_on(rsc, NULL, NULL);
 }
 
 
 /* Binary like operators for lists of nodes */
 extern void node_list_exclude(GHashTable * list, GListPtr list2, gboolean merge_scores);
 
 GHashTable *pe__node_list2table(GList *list);
 
 static inline gpointer
 pe_hash_table_lookup(GHashTable * hash, gconstpointer key)
 {
     if (hash) {
         return g_hash_table_lookup(hash, key);
     }
     return NULL;
 }
 
 extern pe_action_t *get_pseudo_op(const char *name, pe_working_set_t * data_set);
 extern gboolean order_actions(pe_action_t * lh_action, pe_action_t * rh_action, enum pe_ordering order);
 
 /* Printing functions for debug */
 extern void print_node(const char *pre_text, pe_node_t * node, gboolean details);
 extern void print_str_str(gpointer key, gpointer value, gpointer user_data);
 extern void pe__output_node(pe_node_t * node, gboolean details, pcmk__output_t *out);
 
 extern void dump_node_capacity(int level, const char *comment, pe_node_t * node);
 extern void dump_rsc_utilization(int level, const char *comment, pe_resource_t * rsc, pe_node_t * node);
 
 void pe__show_node_weights_as(const char *file, const char *function,
                               int line, bool to_log, pe_resource_t *rsc,
                               const char *comment, GHashTable *nodes);
 
 #define pe__show_node_weights(level, rsc, text, nodes)              \
         pe__show_node_weights_as(__FILE__, __FUNCTION__, __LINE__,  \
                                  (level), (rsc), (text), (nodes))
 
 /* Sorting functions */
 extern gint sort_rsc_priority(gconstpointer a, gconstpointer b);
 extern gint sort_rsc_index(gconstpointer a, gconstpointer b);
 
 extern xmlNode *find_rsc_op_entry(pe_resource_t * rsc, const char *key);
 
 extern pe_action_t *custom_action(pe_resource_t * rsc, char *key, const char *task, pe_node_t * on_node,
                                   gboolean optional, gboolean foo, pe_working_set_t * data_set);
 
 #  define delete_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_DELETE, 0)
 #  define delete_action(rsc, node, optional) custom_action(		\
 		rsc, delete_key(rsc), CRMD_ACTION_DELETE, node,		\
 		optional, TRUE, data_set);
 
 #  define stopped_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_STOPPED, 0)
 #  define stopped_action(rsc, node, optional) custom_action(		\
 		rsc, stopped_key(rsc), CRMD_ACTION_STOPPED, node,	\
 		optional, TRUE, data_set);
 
 #  define stop_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_STOP, 0)
 #  define stop_action(rsc, node, optional) custom_action(			\
 		rsc, stop_key(rsc), CRMD_ACTION_STOP, node,		\
 		optional, TRUE, data_set);
 
 #  define reload_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_RELOAD, 0)
 #  define start_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_START, 0)
 #  define start_action(rsc, node, optional) custom_action(		\
 		rsc, start_key(rsc), CRMD_ACTION_START, node,		\
 		optional, TRUE, data_set)
 
 #  define started_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_STARTED, 0)
 #  define started_action(rsc, node, optional) custom_action(		\
 		rsc, started_key(rsc), CRMD_ACTION_STARTED, node,	\
 		optional, TRUE, data_set)
 
 #  define promote_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_PROMOTE, 0)
 #  define promote_action(rsc, node, optional) custom_action(		\
 		rsc, promote_key(rsc), CRMD_ACTION_PROMOTE, node,	\
 		optional, TRUE, data_set)
 
 #  define promoted_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_PROMOTED, 0)
 #  define promoted_action(rsc, node, optional) custom_action(		\
 		rsc, promoted_key(rsc), CRMD_ACTION_PROMOTED, node,	\
 		optional, TRUE, data_set)
 
 #  define demote_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_DEMOTE, 0)
 #  define demote_action(rsc, node, optional) custom_action(		\
 		rsc, demote_key(rsc), CRMD_ACTION_DEMOTE, node,		\
 		optional, TRUE, data_set)
 
 #  define demoted_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_DEMOTED, 0)
 #  define demoted_action(rsc, node, optional) custom_action(		\
 		rsc, demoted_key(rsc), CRMD_ACTION_DEMOTED, node,	\
 		optional, TRUE, data_set)
 
 extern int pe_get_configured_timeout(pe_resource_t *rsc, const char *action,
                                      pe_working_set_t *data_set);
 
 extern pe_action_t *find_first_action(GListPtr input, const char *uuid, const char *task,
                                       pe_node_t * on_node);
 extern enum action_tasks get_complex_task(pe_resource_t * rsc, const char *name,
                                           gboolean allow_non_atomic);
 
 extern GListPtr find_actions(GListPtr input, const char *key, const pe_node_t *on_node);
 GList *find_actions_exact(GList *input, const char *key,
                           const pe_node_t *on_node);
 extern GListPtr find_recurring_actions(GListPtr input, pe_node_t * not_on_node);
 GList *pe__resource_actions(const pe_resource_t *rsc, const pe_node_t *node,
                             const char *task, bool require_node);
 
 extern void pe_free_action(pe_action_t * action);
 
 extern void resource_location(pe_resource_t * rsc, pe_node_t * node, int score, const char *tag,
                               pe_working_set_t * data_set);
 
 extern gint sort_op_by_callid(gconstpointer a, gconstpointer b);
 extern gboolean get_target_role(pe_resource_t * rsc, enum rsc_role_e *role);
 
 extern pe_resource_t *find_clone_instance(pe_resource_t * rsc, const char *sub_id,
                                           pe_working_set_t * data_set);
 
 extern void destroy_ticket(gpointer data);
 extern pe_ticket_t *ticket_new(const char *ticket_id, pe_working_set_t * data_set);
 
 // Resources for manipulating resource names
 const char *pe_base_name_end(const char *id);
 char *clone_strip(const char *last_rsc_id);
 char *clone_zero(const char *last_rsc_id);
 
 static inline bool
 pe_base_name_eq(pe_resource_t *rsc, const char *id)
 {
     if (id && rsc && rsc->id) {
         // Number of characters in rsc->id before any clone suffix
         size_t base_len = pe_base_name_end(rsc->id) - rsc->id + 1;
 
         return (strlen(id) == base_len) && !strncmp(id, rsc->id, base_len);
     }
     return FALSE;
 }
 
 int pe__target_rc_from_xml(xmlNode *xml_op);
 
 gint sort_node_uname(gconstpointer a, gconstpointer b);
 bool is_set_recursive(pe_resource_t * rsc, long long flag, bool any);
 
 enum rsc_digest_cmp_val {
     /*! Digests are the same */
     RSC_DIGEST_MATCH = 0,
     /*! Params that require a restart changed */
     RSC_DIGEST_RESTART,
     /*! Some parameter changed.  */
     RSC_DIGEST_ALL,
     /*! rsc op didn't have a digest associated with it, so
      *  it is unknown if parameters changed or not. */
     RSC_DIGEST_UNKNOWN,
 };
 
 typedef struct op_digest_cache_s {
     enum rsc_digest_cmp_val rc;
     xmlNode *params_all;
     xmlNode *params_secure;
     xmlNode *params_restart;
     char *digest_all_calc;
     char *digest_secure_calc;
     char *digest_restart_calc;
 } op_digest_cache_t;
 
 op_digest_cache_t *rsc_action_digest_cmp(pe_resource_t * rsc, xmlNode * xml_op, pe_node_t * node,
                                          pe_working_set_t * data_set);
 
 pe_action_t *pe_fence_op(pe_node_t * node, const char *op, bool optional, const char *reason, bool priority_delay, pe_working_set_t * data_set);
 void trigger_unfencing(
     pe_resource_t * rsc, pe_node_t *node, const char *reason, pe_action_t *dependency, pe_working_set_t * data_set);
 
 void pe_action_set_reason(pe_action_t *action, const char *reason, bool overwrite);
 void pe_action_set_flag_reason(const char *function, long line, pe_action_t *action, pe_action_t *reason, const char *text, enum pe_action_flags flags, bool overwrite);
 
 #define pe_action_required(action, reason, text) pe_action_set_flag_reason(__FUNCTION__, __LINE__, action, reason, text, pe_action_optional, FALSE)
 #define pe_action_implies(action, reason, flag) pe_action_set_flag_reason(__FUNCTION__, __LINE__, action, reason, NULL, flag, FALSE)
 
 void set_bit_recursive(pe_resource_t * rsc, unsigned long long flag);
 void clear_bit_recursive(pe_resource_t * rsc, unsigned long long flag);
 
 gboolean add_tag_ref(GHashTable * tags, const char * tag_name,  const char * obj_ref);
 
 void print_rscs_brief(GListPtr rsc_list, const char * pre_text, long options,
                       void * print_data, gboolean print_all);
 int pe__rscs_brief_output(pcmk__output_t *out, GListPtr rsc_list, long options, gboolean print_all);
 void pe_fence_node(pe_working_set_t * data_set, pe_node_t * node, const char *reason, bool priority_delay);
 
 pe_node_t *pe_create_node(const char *id, const char *uname, const char *type,
                           const char *score, pe_working_set_t * data_set);
 void common_print(pe_resource_t * rsc, const char *pre_text, const char *name, pe_node_t *node, long options, void *print_data);
 int pe__common_output_text(pcmk__output_t *out, pe_resource_t * rsc, const char *name, pe_node_t *node, long options);
 int pe__common_output_html(pcmk__output_t *out, pe_resource_t * rsc, const char *name, pe_node_t *node, long options);
 pe_resource_t *pe__find_bundle_replica(const pe_resource_t *bundle,
                                        const pe_node_t *node);
 bool pe__bundle_needs_remote_name(pe_resource_t *rsc);
 const char *pe__add_bundle_remote_name(pe_resource_t *rsc, xmlNode *xml,
                                        const char *field);
 const char *pe_node_attribute_calculated(const pe_node_t *node,
                                          const char *name,
                                          const pe_resource_t *rsc);
 const char *pe_node_attribute_raw(pe_node_t *node, const char *name);
 bool pe__is_universal_clone(pe_resource_t *rsc,
                             pe_working_set_t *data_set);
 void pe__add_param_check(xmlNode *rsc_op, pe_resource_t *rsc, pe_node_t *node,
                          enum pe_check_parameters, pe_working_set_t *data_set);
 void pe__foreach_param_check(pe_working_set_t *data_set,
                              void (*cb)(pe_resource_t*, pe_node_t*, xmlNode*,
                                         enum pe_check_parameters,
                                         pe_working_set_t*));
 void pe__free_param_checks(pe_working_set_t *data_set);
 
 bool pe__shutdown_requested(pe_node_t *node);
 void pe__update_recheck_time(time_t recheck, pe_working_set_t *data_set);
 
 #define BOOL2STR(x) ((x) ? "true" : "false")
 /*!
  * \internal
  * \brief Register xml formatting message functions.
  */
 void pe__register_messages(pcmk__output_t *out);
 
 void pe__unpack_dataset_nvpairs(xmlNode *xml_obj, const char *set_name,
                                 GHashTable *node_hash, GHashTable *hash,
                                 const char *always_first, gboolean overwrite,
                                 pe_working_set_t *data_set);
 
 bool pe__resource_is_disabled(pe_resource_t *rsc);
 pe_action_t *pe__clear_resource_history(pe_resource_t *rsc, pe_node_t *node,
                                         pe_working_set_t *data_set);
 
 GListPtr pe__unames_with_tag(pe_working_set_t *data_set, const char *tag_name);
 bool pe__uname_has_tag(pe_working_set_t *data_set, const char *node, const char *tag);
 
 bool pe__rsc_running_on_any_node_in_list(pe_resource_t *rsc, GListPtr node_list);
 
+bool pcmk__rsc_is_filtered(pe_resource_t *rsc, GListPtr only_show);
+
 #endif
diff --git a/lib/pengine/bundle.c b/lib/pengine/bundle.c
index 776f85aabe..6a72cc5835 100644
--- a/lib/pengine/bundle.c
+++ b/lib/pengine/bundle.c
@@ -1,1965 +1,1965 @@
 /*
  * Copyright 2004-2020 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <ctype.h>
 
 #include <crm/pengine/rules.h>
 #include <crm/pengine/status.h>
 #include <crm/pengine/internal.h>
 #include <crm/msg_xml.h>
 #include <pe_status_private.h>
 
 #define PE__VARIANT_BUNDLE 1
 #include "./variant.h"
 
 static char *
 next_ip(const char *last_ip)
 {
     unsigned int oct1 = 0;
     unsigned int oct2 = 0;
     unsigned int oct3 = 0;
     unsigned int oct4 = 0;
     int rc = sscanf(last_ip, "%u.%u.%u.%u", &oct1, &oct2, &oct3, &oct4);
 
     if (rc != 4) {
         /*@ TODO check for IPv6 */
         return NULL;
 
     } else if (oct3 > 253) {
         return NULL;
 
     } else if (oct4 > 253) {
         ++oct3;
         oct4 = 1;
 
     } else {
         ++oct4;
     }
 
     return crm_strdup_printf("%u.%u.%u.%u", oct1, oct2, oct3, oct4);
 }
 
 static int
 allocate_ip(pe__bundle_variant_data_t *data, pe__bundle_replica_t *replica,
             char *buffer, int max)
 {
     if(data->ip_range_start == NULL) {
         return 0;
 
     } else if(data->ip_last) {
         replica->ipaddr = next_ip(data->ip_last);
 
     } else {
         replica->ipaddr = strdup(data->ip_range_start);
     }
 
     data->ip_last = replica->ipaddr;
     switch (data->agent_type) {
         case PE__CONTAINER_AGENT_DOCKER:
         case PE__CONTAINER_AGENT_PODMAN:
             if (data->add_host) {
                 return snprintf(buffer, max, " --add-host=%s-%d:%s",
                                 data->prefix, replica->offset,
                                 replica->ipaddr);
             }
         case PE__CONTAINER_AGENT_RKT:
             return snprintf(buffer, max, " --hosts-entry=%s=%s-%d",
                             replica->ipaddr, data->prefix, replica->offset);
         default: // PE__CONTAINER_AGENT_UNKNOWN
             break;
     }
     return 0;
 }
 
 static xmlNode *
 create_resource(const char *name, const char *provider, const char *kind)
 {
     xmlNode *rsc = create_xml_node(NULL, XML_CIB_TAG_RESOURCE);
 
     crm_xml_add(rsc, XML_ATTR_ID, name);
     crm_xml_add(rsc, XML_AGENT_ATTR_CLASS, PCMK_RESOURCE_CLASS_OCF);
     crm_xml_add(rsc, XML_AGENT_ATTR_PROVIDER, provider);
     crm_xml_add(rsc, XML_ATTR_TYPE, kind);
 
     return rsc;
 }
 
 /*!
  * \internal
  * \brief Check whether cluster can manage resource inside container
  *
  * \param[in] data  Container variant data
  *
  * \return TRUE if networking configuration is acceptable, FALSE otherwise
  *
  * \note The resource is manageable if an IP range or control port has been
  *       specified. If a control port is used without an IP range, replicas per
  *       host must be 1.
  */
 static bool
 valid_network(pe__bundle_variant_data_t *data)
 {
     if(data->ip_range_start) {
         return TRUE;
     }
     if(data->control_port) {
         if(data->nreplicas_per_host > 1) {
             pe_err("Specifying the 'control-port' for %s requires 'replicas-per-host=1'", data->prefix);
             data->nreplicas_per_host = 1;
             /* @TODO to be sure: clear_bit(rsc->flags, pe_rsc_unique); */
         }
         return TRUE;
     }
     return FALSE;
 }
 
 static bool
 create_ip_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
                    pe__bundle_replica_t *replica, pe_working_set_t *data_set)
 {
     if(data->ip_range_start) {
         char *id = NULL;
         xmlNode *xml_ip = NULL;
         xmlNode *xml_obj = NULL;
 
         id = crm_strdup_printf("%s-ip-%s", data->prefix, replica->ipaddr);
         crm_xml_sanitize_id(id);
         xml_ip = create_resource(id, "heartbeat", "IPaddr2");
         free(id);
 
         xml_obj = create_xml_node(xml_ip, XML_TAG_ATTR_SETS);
         crm_xml_set_id(xml_obj, "%s-attributes-%d",
                        data->prefix, replica->offset);
 
         crm_create_nvpair_xml(xml_obj, NULL, "ip", replica->ipaddr);
         if(data->host_network) {
             crm_create_nvpair_xml(xml_obj, NULL, "nic", data->host_network);
         }
 
         if(data->host_netmask) {
             crm_create_nvpair_xml(xml_obj, NULL,
                                   "cidr_netmask", data->host_netmask);
 
         } else {
             crm_create_nvpair_xml(xml_obj, NULL, "cidr_netmask", "32");
         }
 
         xml_obj = create_xml_node(xml_ip, "operations");
         crm_create_op_xml(xml_obj, ID(xml_ip), "monitor", "60s", NULL);
 
         // TODO: Other ops? Timeouts and intervals from underlying resource?
 
         if (!common_unpack(xml_ip, &replica->ip, parent, data_set)) {
             return FALSE;
         }
 
         parent->children = g_list_append(parent->children, replica->ip);
     }
     return TRUE;
 }
 
 static bool
 create_docker_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
                        pe__bundle_replica_t *replica,
                        pe_working_set_t *data_set)
 {
         int offset = 0, max = 4096;
         char *buffer = calloc(1, max+1);
 
         int doffset = 0, dmax = 1024;
         char *dbuffer = calloc(1, dmax+1);
 
         char *id = NULL;
         xmlNode *xml_container = NULL;
         xmlNode *xml_obj = NULL;
 
         id = crm_strdup_printf("%s-docker-%d", data->prefix, replica->offset);
         crm_xml_sanitize_id(id);
         xml_container = create_resource(id, "heartbeat",
                                         PE__CONTAINER_AGENT_DOCKER_S);
         free(id);
 
         xml_obj = create_xml_node(xml_container, XML_TAG_ATTR_SETS);
         crm_xml_set_id(xml_obj, "%s-attributes-%d",
                        data->prefix, replica->offset);
 
         crm_create_nvpair_xml(xml_obj, NULL, "image", data->image);
         crm_create_nvpair_xml(xml_obj, NULL, "allow_pull", XML_BOOLEAN_TRUE);
         crm_create_nvpair_xml(xml_obj, NULL, "force_kill", XML_BOOLEAN_FALSE);
         crm_create_nvpair_xml(xml_obj, NULL, "reuse", XML_BOOLEAN_FALSE);
 
         offset += snprintf(buffer+offset, max-offset, " --restart=no");
 
         /* Set a container hostname only if we have an IP to map it to.
          * The user can set -h or --uts=host themselves if they want a nicer
          * name for logs, but this makes applications happy who need their
          * hostname to match the IP they bind to.
          */
         if (data->ip_range_start != NULL) {
             offset += snprintf(buffer+offset, max-offset, " -h %s-%d",
                                data->prefix, replica->offset);
         }
 
         offset += snprintf(buffer+offset, max-offset, " -e PCMK_stderr=1");
 
         if (data->container_network) {
 #if 0
             offset += snprintf(buffer+offset, max-offset, " --link-local-ip=%s",
                                replica->ipaddr);
 #endif
             offset += snprintf(buffer+offset, max-offset, " --net=%s",
                                data->container_network);
         }
 
         if(data->control_port) {
             offset += snprintf(buffer+offset, max-offset, " -e PCMK_remote_port=%s", data->control_port);
         } else {
             offset += snprintf(buffer+offset, max-offset, " -e PCMK_remote_port=%d", DEFAULT_REMOTE_PORT);
         }
 
         for(GListPtr pIter = data->mounts; pIter != NULL; pIter = pIter->next) {
             pe__bundle_mount_t *mount = pIter->data;
 
             if (is_set(mount->flags, pe__bundle_mount_subdir)) {
                 char *source = crm_strdup_printf(
                     "%s/%s-%d", mount->source, data->prefix, replica->offset);
 
                 if(doffset > 0) {
                     doffset += snprintf(dbuffer+doffset, dmax-doffset, ",");
                 }
                 doffset += snprintf(dbuffer+doffset, dmax-doffset, "%s", source);
                 offset += snprintf(buffer+offset, max-offset, " -v %s:%s", source, mount->target);
                 free(source);
 
             } else {
                 offset += snprintf(buffer+offset, max-offset, " -v %s:%s", mount->source, mount->target);
             }
             if(mount->options) {
                 offset += snprintf(buffer+offset, max-offset, ":%s", mount->options);
             }
         }
 
         for(GListPtr pIter = data->ports; pIter != NULL; pIter = pIter->next) {
             pe__bundle_port_t *port = pIter->data;
 
             if (replica->ipaddr) {
                 offset += snprintf(buffer+offset, max-offset, " -p %s:%s:%s",
                                    replica->ipaddr, port->source,
                                    port->target);
             } else if(safe_str_neq(data->container_network, "host")) {
                 // No need to do port mapping if net=host
                 offset += snprintf(buffer+offset, max-offset, " -p %s:%s", port->source, port->target);
             }
         }
 
         if (data->launcher_options) {
             offset += snprintf(buffer+offset, max-offset, " %s",
                                data->launcher_options);
         }
 
         if (data->container_host_options) {
             offset += snprintf(buffer + offset, max - offset, " %s",
                                data->container_host_options);
         }
 
         crm_create_nvpair_xml(xml_obj, NULL, "run_opts", buffer);
         free(buffer);
 
         crm_create_nvpair_xml(xml_obj, NULL, "mount_points", dbuffer);
         free(dbuffer);
 
         if (replica->child) {
             if (data->container_command) {
                 crm_create_nvpair_xml(xml_obj, NULL,
                                       "run_cmd", data->container_command);
             } else {
                 crm_create_nvpair_xml(xml_obj, NULL,
                                       "run_cmd", SBIN_DIR "/pacemaker-remoted");
             }
 
             /* TODO: Allow users to specify their own?
              *
              * We just want to know if the container is alive, we'll
              * monitor the child independently
              */
             crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
         /* } else if(child && data->untrusted) {
          * Support this use-case?
          *
          * The ability to have resources started/stopped by us, but
          * unable to set attributes, etc.
          *
          * Arguably better to control API access this with ACLs like
          * "normal" remote nodes
          *
          *     crm_create_nvpair_xml(xml_obj, NULL,
          *                           "run_cmd",
          *                           "/usr/libexec/pacemaker/pacemaker-execd");
          *     crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd",
          *         "/usr/libexec/pacemaker/lrmd_internal_ctl -c poke");
          */
         } else {
             if (data->container_command) {
                 crm_create_nvpair_xml(xml_obj, NULL,
                                       "run_cmd", data->container_command);
             }
 
             /* TODO: Allow users to specify their own?
              *
              * We don't know what's in the container, so we just want
              * to know if it is alive
              */
             crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
         }
 
 
         xml_obj = create_xml_node(xml_container, "operations");
         crm_create_op_xml(xml_obj, ID(xml_container), "monitor", "60s", NULL);
 
         // TODO: Other ops? Timeouts and intervals from underlying resource?
         if (!common_unpack(xml_container, &replica->container, parent, data_set)) {
             return FALSE;
         }
         parent->children = g_list_append(parent->children, replica->container);
         return TRUE;
 }
 
 static bool
 create_podman_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
                        pe__bundle_replica_t *replica,
                        pe_working_set_t *data_set)
 {
         int offset = 0, max = 4096;
         char *buffer = calloc(1, max+1);
 
         int doffset = 0, dmax = 1024;
         char *dbuffer = calloc(1, dmax+1);
 
         char *id = NULL;
         xmlNode *xml_container = NULL;
         xmlNode *xml_obj = NULL;
 
         id = crm_strdup_printf("%s-podman-%d", data->prefix, replica->offset);
         crm_xml_sanitize_id(id);
         xml_container = create_resource(id, "heartbeat",
                                         PE__CONTAINER_AGENT_PODMAN_S);
         free(id);
 
         xml_obj = create_xml_node(xml_container, XML_TAG_ATTR_SETS);
         crm_xml_set_id(xml_obj, "%s-attributes-%d",
                        data->prefix, replica->offset);
 
         crm_create_nvpair_xml(xml_obj, NULL, "image", data->image);
         crm_create_nvpair_xml(xml_obj, NULL, "allow_pull", XML_BOOLEAN_TRUE);
         crm_create_nvpair_xml(xml_obj, NULL, "force_kill", XML_BOOLEAN_FALSE);
         crm_create_nvpair_xml(xml_obj, NULL, "reuse", XML_BOOLEAN_FALSE);
 
         // FIXME: (bandini 2018-08) podman has no restart policies
         //offset += snprintf(buffer+offset, max-offset, " --restart=no");
 
         /* Set a container hostname only if we have an IP to map it to.
          * The user can set -h or --uts=host themselves if they want a nicer
          * name for logs, but this makes applications happy who need their
          * hostname to match the IP they bind to.
          */
         if (data->ip_range_start != NULL) {
             offset += snprintf(buffer+offset, max-offset, " -h %s-%d",
                                data->prefix, replica->offset);
         }
 
         offset += snprintf(buffer+offset, max-offset, " -e PCMK_stderr=1");
 
         if (data->container_network) {
 #if 0
             // podman has no support for --link-local-ip
             offset += snprintf(buffer+offset, max-offset, " --link-local-ip=%s",
                                replica->ipaddr);
 #endif
             offset += snprintf(buffer+offset, max-offset, " --net=%s",
                                data->container_network);
         }
 
         if(data->control_port) {
             offset += snprintf(buffer+offset, max-offset, " -e PCMK_remote_port=%s", data->control_port);
         } else {
             offset += snprintf(buffer+offset, max-offset, " -e PCMK_remote_port=%d", DEFAULT_REMOTE_PORT);
         }
 
         for(GListPtr pIter = data->mounts; pIter != NULL; pIter = pIter->next) {
             pe__bundle_mount_t *mount = pIter->data;
 
             if (is_set(mount->flags, pe__bundle_mount_subdir)) {
                 char *source = crm_strdup_printf(
                     "%s/%s-%d", mount->source, data->prefix, replica->offset);
 
                 if(doffset > 0) {
                     doffset += snprintf(dbuffer+doffset, dmax-doffset, ",");
                 }
                 doffset += snprintf(dbuffer+doffset, dmax-doffset, "%s", source);
                 offset += snprintf(buffer+offset, max-offset, " -v %s:%s", source, mount->target);
                 free(source);
 
             } else {
                 offset += snprintf(buffer+offset, max-offset, " -v %s:%s", mount->source, mount->target);
             }
             if(mount->options) {
                 offset += snprintf(buffer+offset, max-offset, ":%s", mount->options);
             }
         }
 
         for(GListPtr pIter = data->ports; pIter != NULL; pIter = pIter->next) {
             pe__bundle_port_t *port = pIter->data;
 
             if (replica->ipaddr) {
                 offset += snprintf(buffer+offset, max-offset, " -p %s:%s:%s",
                                    replica->ipaddr, port->source,
                                    port->target);
             } else if(safe_str_neq(data->container_network, "host")) {
                 // No need to do port mapping if net=host
                 offset += snprintf(buffer+offset, max-offset, " -p %s:%s", port->source, port->target);
             }
         }
 
         if (data->launcher_options) {
             offset += snprintf(buffer+offset, max-offset, " %s",
                                data->launcher_options);
         }
 
         if (data->container_host_options) {
             offset += snprintf(buffer + offset, max - offset, " %s",
                                data->container_host_options);
         }
 
         crm_create_nvpair_xml(xml_obj, NULL, "run_opts", buffer);
         free(buffer);
 
         crm_create_nvpair_xml(xml_obj, NULL, "mount_points", dbuffer);
         free(dbuffer);
 
         if (replica->child) {
             if (data->container_command) {
                 crm_create_nvpair_xml(xml_obj, NULL,
                                       "run_cmd", data->container_command);
             } else {
                 crm_create_nvpair_xml(xml_obj, NULL,
                                       "run_cmd", SBIN_DIR "/pacemaker-remoted");
             }
 
             /* TODO: Allow users to specify their own?
              *
              * We just want to know if the container is alive, we'll
              * monitor the child independently
              */
             crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
         /* } else if(child && data->untrusted) {
          * Support this use-case?
          *
          * The ability to have resources started/stopped by us, but
          * unable to set attributes, etc.
          *
          * Arguably better to control API access this with ACLs like
          * "normal" remote nodes
          *
          *     crm_create_nvpair_xml(xml_obj, NULL,
          *                           "run_cmd",
          *                           "/usr/libexec/pacemaker/pacemaker-execd");
          *     crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd",
          *         "/usr/libexec/pacemaker/lrmd_internal_ctl -c poke");
          */
         } else {
             if (data->container_command) {
                 crm_create_nvpair_xml(xml_obj, NULL,
                                       "run_cmd", data->container_command);
             }
 
             /* TODO: Allow users to specify their own?
              *
              * We don't know what's in the container, so we just want
              * to know if it is alive
              */
             crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
         }
 
 
         xml_obj = create_xml_node(xml_container, "operations");
         crm_create_op_xml(xml_obj, ID(xml_container), "monitor", "60s", NULL);
 
         // TODO: Other ops? Timeouts and intervals from underlying resource?
         if (!common_unpack(xml_container, &replica->container, parent,
                            data_set)) {
             return FALSE;
         }
         parent->children = g_list_append(parent->children, replica->container);
         return TRUE;
 }
 
 static bool
 create_rkt_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
                     pe__bundle_replica_t *replica, pe_working_set_t *data_set)
 {
         int offset = 0, max = 4096;
         char *buffer = calloc(1, max+1);
 
         int doffset = 0, dmax = 1024;
         char *dbuffer = calloc(1, dmax+1);
 
         char *id = NULL;
         xmlNode *xml_container = NULL;
         xmlNode *xml_obj = NULL;
 
         int volid = 0;
 
         id = crm_strdup_printf("%s-rkt-%d", data->prefix, replica->offset);
         crm_xml_sanitize_id(id);
         xml_container = create_resource(id, "heartbeat",
                                         PE__CONTAINER_AGENT_RKT_S);
         free(id);
 
         xml_obj = create_xml_node(xml_container, XML_TAG_ATTR_SETS);
         crm_xml_set_id(xml_obj, "%s-attributes-%d",
                        data->prefix, replica->offset);
 
         crm_create_nvpair_xml(xml_obj, NULL, "image", data->image);
         crm_create_nvpair_xml(xml_obj, NULL, "allow_pull", "true");
         crm_create_nvpair_xml(xml_obj, NULL, "force_kill", "false");
         crm_create_nvpair_xml(xml_obj, NULL, "reuse", "false");
 
         /* Set a container hostname only if we have an IP to map it to.
          * The user can set -h or --uts=host themselves if they want a nicer
          * name for logs, but this makes applications happy who need their
          * hostname to match the IP they bind to.
          */
         if (data->ip_range_start != NULL) {
             offset += snprintf(buffer+offset, max-offset, " --hostname=%s-%d",
                                data->prefix, replica->offset);
         }
 
         offset += snprintf(buffer+offset, max-offset, " --environment=PCMK_stderr=1");
 
         if (data->container_network) {
 #if 0
             offset += snprintf(buffer+offset, max-offset, " --link-local-ip=%s",
                                replica->ipaddr);
 #endif
             offset += snprintf(buffer+offset, max-offset, " --net=%s",
                                data->container_network);
         }
 
         if(data->control_port) {
             offset += snprintf(buffer+offset, max-offset, " --environment=PCMK_remote_port=%s", data->control_port);
         } else {
             offset += snprintf(buffer+offset, max-offset, " --environment=PCMK_remote_port=%d", DEFAULT_REMOTE_PORT);
         }
 
         for(GListPtr pIter = data->mounts; pIter != NULL; pIter = pIter->next) {
             pe__bundle_mount_t *mount = pIter->data;
 
             if (is_set(mount->flags, pe__bundle_mount_subdir)) {
                 char *source = crm_strdup_printf(
                     "%s/%s-%d", mount->source, data->prefix, replica->offset);
 
                 if(doffset > 0) {
                     doffset += snprintf(dbuffer+doffset, dmax-doffset, ",");
                 }
                 doffset += snprintf(dbuffer+doffset, dmax-doffset, "%s", source);
                 offset += snprintf(buffer+offset, max-offset, " --volume vol%d,kind=host,source=%s", volid, source);
                 if(mount->options) {
                     offset += snprintf(buffer+offset, max-offset, ",%s", mount->options);
                 }
                 offset += snprintf(buffer+offset, max-offset, " --mount volume=vol%d,target=%s", volid, mount->target);
                 free(source);
 
             } else {
                 offset += snprintf(buffer+offset, max-offset, " --volume vol%d,kind=host,source=%s", volid, mount->source);
                 if(mount->options) {
                     offset += snprintf(buffer+offset, max-offset, ",%s", mount->options);
                 }
                 offset += snprintf(buffer+offset, max-offset, " --mount volume=vol%d,target=%s", volid, mount->target);
             }
             volid++;
         }
 
         for(GListPtr pIter = data->ports; pIter != NULL; pIter = pIter->next) {
             pe__bundle_port_t *port = pIter->data;
 
             if (replica->ipaddr) {
                 offset += snprintf(buffer+offset, max-offset,
                                    " --port=%s:%s:%s", port->target,
                                    replica->ipaddr, port->source);
             } else {
                 offset += snprintf(buffer+offset, max-offset, " --port=%s:%s", port->target, port->source);
             }
         }
 
         if (data->launcher_options) {
             offset += snprintf(buffer+offset, max-offset, " %s",
                                data->launcher_options);
         }
 
         if (data->container_host_options) {
             offset += snprintf(buffer + offset, max - offset, " %s",
                                data->container_host_options);
         }
 
         crm_create_nvpair_xml(xml_obj, NULL, "run_opts", buffer);
         free(buffer);
 
         crm_create_nvpair_xml(xml_obj, NULL, "mount_points", dbuffer);
         free(dbuffer);
 
         if (replica->child) {
             if (data->container_command) {
                 crm_create_nvpair_xml(xml_obj, NULL, "run_cmd",
                                       data->container_command);
             } else {
                 crm_create_nvpair_xml(xml_obj, NULL, "run_cmd",
                                       SBIN_DIR "/pacemaker-remoted");
             }
 
             /* TODO: Allow users to specify their own?
              *
              * We just want to know if the container is alive, we'll
              * monitor the child independently
              */
             crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
         /* } else if(child && data->untrusted) {
          * Support this use-case?
          *
          * The ability to have resources started/stopped by us, but
          * unable to set attributes, etc.
          *
          * Arguably better to control API access this with ACLs like
          * "normal" remote nodes
          *
          *     crm_create_nvpair_xml(xml_obj, NULL,
          *                           "run_cmd",
          *                           "/usr/libexec/pacemaker/pacemaker-execd");
          *     crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd",
          *         "/usr/libexec/pacemaker/lrmd_internal_ctl -c poke");
          */
         } else {
             if (data->container_command) {
                 crm_create_nvpair_xml(xml_obj, NULL, "run_cmd",
                                       data->container_command);
             }
 
             /* TODO: Allow users to specify their own?
              *
              * We don't know what's in the container, so we just want
              * to know if it is alive
              */
             crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
         }
 
 
         xml_obj = create_xml_node(xml_container, "operations");
         crm_create_op_xml(xml_obj, ID(xml_container), "monitor", "60s", NULL);
 
         // TODO: Other ops? Timeouts and intervals from underlying resource?
 
         if (!common_unpack(xml_container, &replica->container, parent, data_set)) {
             return FALSE;
         }
         parent->children = g_list_append(parent->children, replica->container);
         return TRUE;
 }
 
 /*!
  * \brief Ban a node from a resource's (and its children's) allowed nodes list
  *
  * \param[in,out] rsc    Resource to modify
  * \param[in]     uname  Name of node to ban
  */
 static void
 disallow_node(pe_resource_t *rsc, const char *uname)
 {
     gpointer match = g_hash_table_lookup(rsc->allowed_nodes, uname);
 
     if (match) {
         ((pe_node_t *) match)->weight = -INFINITY;
         ((pe_node_t *) match)->rsc_discover_mode = pe_discover_never;
     }
     if (rsc->children) {
         GListPtr child;
 
         for (child = rsc->children; child != NULL; child = child->next) {
             disallow_node((pe_resource_t *) (child->data), uname);
         }
     }
 }
 
 static bool
 create_remote_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
                        pe__bundle_replica_t *replica,
                        pe_working_set_t *data_set)
 {
     if (replica->child && valid_network(data)) {
         GHashTableIter gIter;
         GListPtr rsc_iter = NULL;
         pe_node_t *node = NULL;
         xmlNode *xml_remote = NULL;
         char *id = crm_strdup_printf("%s-%d", data->prefix, replica->offset);
         char *port_s = NULL;
         const char *uname = NULL;
         const char *connect_name = NULL;
 
         if (pe_find_resource(data_set->resources, id) != NULL) {
             free(id);
             // The biggest hammer we have
             id = crm_strdup_printf("pcmk-internal-%s-remote-%d",
                                    replica->child->id, replica->offset);
             //@TODO return false instead of asserting?
             CRM_ASSERT(pe_find_resource(data_set->resources, id) == NULL);
         }
 
         /* REMOTE_CONTAINER_HACK: Using "#uname" as the server name when the
          * connection does not have its own IP is a magic string that we use to
          * support nested remotes (i.e. a bundle running on a remote node).
          */
         connect_name = (replica->ipaddr? replica->ipaddr : "#uname");
 
         if (data->control_port == NULL) {
             port_s = crm_itoa(DEFAULT_REMOTE_PORT);
         }
 
         /* This sets replica->container as replica->remote's container, which is
          * similar to what happens with guest nodes. This is how the scheduler
          * knows that the bundle node is fenced by recovering the container, and
          * that remote should be ordered relative to the container.
          */
         xml_remote = pe_create_remote_xml(NULL, id, replica->container->id,
                                           NULL, NULL, NULL,
                                           connect_name, (data->control_port?
                                           data->control_port : port_s));
         free(port_s);
 
         /* Abandon our created ID, and pull the copy from the XML, because we
          * need something that will get freed during data set cleanup to use as
          * the node ID and uname.
          */
         free(id);
         id = NULL;
         uname = ID(xml_remote);
 
         /* Ensure a node has been created for the guest (it may have already
          * been, if it has a permanent node attribute), and ensure its weight is
          * -INFINITY so no other resources can run on it.
          */
         node = pe_find_node(data_set->nodes, uname);
         if (node == NULL) {
             node = pe_create_node(uname, uname, "remote", "-INFINITY",
                                   data_set);
         } else {
             node->weight = -INFINITY;
         }
         node->rsc_discover_mode = pe_discover_never;
 
         /* unpack_remote_nodes() ensures that each remote node and guest node
          * has a pe_node_t entry. Ideally, it would do the same for bundle nodes.
          * Unfortunately, a bundle has to be mostly unpacked before it's obvious
          * what nodes will be needed, so we do it just above.
          *
          * Worse, that means that the node may have been utilized while
          * unpacking other resources, without our weight correction. The most
          * likely place for this to happen is when common_unpack() calls
          * resource_location() to set a default score in symmetric clusters.
          * This adds a node *copy* to each resource's allowed nodes, and these
          * copies will have the wrong weight.
          *
          * As a hacky workaround, fix those copies here.
          *
          * @TODO Possible alternative: ensure bundles are unpacked before other
          * resources, so the weight is correct before any copies are made.
          */
         for (rsc_iter = data_set->resources; rsc_iter; rsc_iter = rsc_iter->next) {
             disallow_node((pe_resource_t *) (rsc_iter->data), uname);
         }
 
         replica->node = pe__copy_node(node);
         replica->node->weight = 500;
         replica->node->rsc_discover_mode = pe_discover_exclusive;
 
         /* Ensure the node shows up as allowed and with the correct discovery set */
         if (replica->child->allowed_nodes != NULL) {
             g_hash_table_destroy(replica->child->allowed_nodes);
         }
         replica->child->allowed_nodes = g_hash_table_new_full(crm_str_hash,
                                                               g_str_equal,
                                                               NULL, free);
         g_hash_table_insert(replica->child->allowed_nodes,
                             (gpointer) replica->node->details->id,
                             pe__copy_node(replica->node));
 
         {
             pe_node_t *copy = pe__copy_node(replica->node);
             copy->weight = -INFINITY;
             g_hash_table_insert(replica->child->parent->allowed_nodes,
                                 (gpointer) replica->node->details->id, copy);
         }
         if (!common_unpack(xml_remote, &replica->remote, parent, data_set)) {
             return FALSE;
         }
 
         g_hash_table_iter_init(&gIter, replica->remote->allowed_nodes);
         while (g_hash_table_iter_next(&gIter, NULL, (void **)&node)) {
             if (pe__is_guest_or_remote_node(node)) {
                 /* Remote resources can only run on 'normal' cluster node */
                 node->weight = -INFINITY;
             }
         }
 
         replica->node->details->remote_rsc = replica->remote;
 
         // Ensure pe__is_guest_node() functions correctly immediately
         replica->remote->container = replica->container;
 
         /* A bundle's #kind is closer to "container" (guest node) than the
          * "remote" set by pe_create_node().
          */
         g_hash_table_insert(replica->node->details->attrs,
                             strdup(CRM_ATTR_KIND), strdup("container"));
 
         /* One effect of this is that setup_container() will add
          * replica->remote to replica->container's fillers, which will make
          * pe__resource_contains_guest_node() true for replica->container.
          *
          * replica->child does NOT get added to replica->container's fillers.
          * The only noticeable effect if it did would be for its fail count to
          * be taken into account when checking replica->container's migration
          * threshold.
          */
         parent->children = g_list_append(parent->children, replica->remote);
     }
     return TRUE;
 }
 
 static bool
 create_container(pe_resource_t *parent, pe__bundle_variant_data_t *data,
                  pe__bundle_replica_t *replica, pe_working_set_t *data_set)
 {
 
     switch (data->agent_type) {
         case PE__CONTAINER_AGENT_DOCKER:
             if (!create_docker_resource(parent, data, replica, data_set)) {
                 return FALSE;
             }
             break;
 
         case PE__CONTAINER_AGENT_PODMAN:
             if (!create_podman_resource(parent, data, replica, data_set)) {
                 return FALSE;
             }
             break;
 
         case PE__CONTAINER_AGENT_RKT:
             if (!create_rkt_resource(parent, data, replica, data_set)) {
                 return FALSE;
             }
             break;
         default: // PE__CONTAINER_AGENT_UNKNOWN
             return FALSE;
     }
 
     if (create_ip_resource(parent, data, replica, data_set) == FALSE) {
         return FALSE;
     }
     if(create_remote_resource(parent, data, replica, data_set) == FALSE) {
         return FALSE;
     }
     if (replica->child && replica->ipaddr) {
         add_hash_param(replica->child->meta, "external-ip", replica->ipaddr);
     }
 
     if (replica->remote) {
         /*
          * Allow the remote connection resource to be allocated to a
          * different node than the one on which the container is active.
          *
          * This makes it possible to have Pacemaker Remote nodes running
          * containers with pacemaker-remoted inside in order to start
          * services inside those containers.
          */
         set_bit(replica->remote->flags, pe_rsc_allow_remote_remotes);
     }
 
     return TRUE;
 }
 
 static void
 mount_add(pe__bundle_variant_data_t *bundle_data, const char *source,
           const char *target, const char *options, uint32_t flags)
 {
     pe__bundle_mount_t *mount = calloc(1, sizeof(pe__bundle_mount_t));
 
     mount->source = strdup(source);
     mount->target = strdup(target);
     if (options) {
         mount->options = strdup(options);
     }
     mount->flags = flags;
     bundle_data->mounts = g_list_append(bundle_data->mounts, mount);
 }
 
 static void
 mount_free(pe__bundle_mount_t *mount)
 {
     free(mount->source);
     free(mount->target);
     free(mount->options);
     free(mount);
 }
 
 static void
 port_free(pe__bundle_port_t *port)
 {
     free(port->source);
     free(port->target);
     free(port);
 }
 
 static pe__bundle_replica_t *
 replica_for_remote(pe_resource_t *remote)
 {
     pe_resource_t *top = remote;
     pe__bundle_variant_data_t *bundle_data = NULL;
 
     if (top == NULL) {
         return NULL;
     }
 
     while (top->parent != NULL) {
         top = top->parent;
     }
 
     get_bundle_variant_data(bundle_data, top);
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         if (replica->remote == remote) {
             return replica;
         }
     }
     CRM_LOG_ASSERT(FALSE);
     return NULL;
 }
 
 bool
 pe__bundle_needs_remote_name(pe_resource_t *rsc)
 {
     const char *value;
 
     if (rsc == NULL) {
         return FALSE;
     }
 
     value = g_hash_table_lookup(rsc->parameters, XML_RSC_ATTR_REMOTE_RA_ADDR);
     if (safe_str_eq(value, "#uname") == FALSE) {
         return FALSE;
 
     } else {
         const char *match[3][2] = {
             { XML_ATTR_TYPE,           "remote"                },
             { XML_AGENT_ATTR_CLASS,    PCMK_RESOURCE_CLASS_OCF },
             { XML_AGENT_ATTR_PROVIDER, "pacemaker"             },
         };
 
         for (int m = 0; m < 3; m++) {
             value = crm_element_value(rsc->xml, match[m][0]);
             if (safe_str_neq(value, match[m][1])) {
                 return FALSE;
             }
         }
     }
     return TRUE;
 }
 
 const char *
 pe__add_bundle_remote_name(pe_resource_t *rsc, xmlNode *xml, const char *field)
 {
     // REMOTE_CONTAINER_HACK: Allow remote nodes that start containers with pacemaker remote inside
 
     pe_node_t *node = NULL;
     pe__bundle_replica_t *replica = NULL;
 
     if (!pe__bundle_needs_remote_name(rsc)) {
         return NULL;
     }
 
     replica = replica_for_remote(rsc);
     if (replica == NULL) {
         return NULL;
     }
 
     node = replica->container->allocated_to;
     if (node == NULL) {
         /* If it won't be running anywhere after the
          * transition, go with where it's running now.
          */
         node = pe__current_node(replica->container);
     }
 
     if(node == NULL) {
         crm_trace("Cannot determine address for bundle connection %s", rsc->id);
         return NULL;
     }
 
     crm_trace("Setting address for bundle connection %s to bundle host %s",
               rsc->id, node->details->uname);
     if(xml != NULL && field != NULL) {
         crm_xml_add(xml, field, node->details->uname);
     }
 
     return node->details->uname;
 }
 
 gboolean
 pe__unpack_bundle(pe_resource_t *rsc, pe_working_set_t *data_set)
 {
     const char *value = NULL;
     xmlNode *xml_obj = NULL;
     xmlNode *xml_resource = NULL;
     pe__bundle_variant_data_t *bundle_data = NULL;
     bool need_log_mount = TRUE;
 
     CRM_ASSERT(rsc != NULL);
     pe_rsc_trace(rsc, "Processing resource %s...", rsc->id);
 
     bundle_data = calloc(1, sizeof(pe__bundle_variant_data_t));
     rsc->variant_opaque = bundle_data;
     bundle_data->prefix = strdup(rsc->id);
 
     xml_obj = first_named_child(rsc->xml, PE__CONTAINER_AGENT_DOCKER_S);
     if (xml_obj != NULL) {
         bundle_data->agent_type = PE__CONTAINER_AGENT_DOCKER;
     } else {
         xml_obj = first_named_child(rsc->xml, PE__CONTAINER_AGENT_RKT_S);
         if (xml_obj != NULL) {
             bundle_data->agent_type = PE__CONTAINER_AGENT_RKT;
         } else {
             xml_obj = first_named_child(rsc->xml, PE__CONTAINER_AGENT_PODMAN_S);
             if (xml_obj != NULL) {
                 bundle_data->agent_type = PE__CONTAINER_AGENT_PODMAN;
             } else {
                 return FALSE;
             }
         }
     }
 
     value = crm_element_value(xml_obj, XML_RSC_ATTR_PROMOTED_MAX);
     if (value == NULL) {
         // @COMPAT deprecated since 2.0.0
         value = crm_element_value(xml_obj, "masters");
     }
     bundle_data->promoted_max = crm_parse_int(value, "0");
     if (bundle_data->promoted_max < 0) {
         pe_err("%s for %s must be nonnegative integer, using 0",
                XML_RSC_ATTR_PROMOTED_MAX, rsc->id);
         bundle_data->promoted_max = 0;
     }
 
     value = crm_element_value(xml_obj, "replicas");
     if ((value == NULL) && bundle_data->promoted_max) {
         bundle_data->nreplicas = bundle_data->promoted_max;
     } else {
         bundle_data->nreplicas = crm_parse_int(value, "1");
     }
     if (bundle_data->nreplicas < 1) {
         pe_err("'replicas' for %s must be positive integer, using 1", rsc->id);
         bundle_data->nreplicas = 1;
     }
 
     /*
      * Communication between containers on the same host via the
      * floating IPs only works if the container is started with:
      *   --userland-proxy=false --ip-masq=false
      */
     value = crm_element_value(xml_obj, "replicas-per-host");
     bundle_data->nreplicas_per_host = crm_parse_int(value, "1");
     if (bundle_data->nreplicas_per_host < 1) {
         pe_err("'replicas-per-host' for %s must be positive integer, using 1",
                rsc->id);
         bundle_data->nreplicas_per_host = 1;
     }
     if (bundle_data->nreplicas_per_host == 1) {
         clear_bit(rsc->flags, pe_rsc_unique);
     }
 
     bundle_data->container_command = crm_element_value_copy(xml_obj, "run-command");
     bundle_data->launcher_options = crm_element_value_copy(xml_obj, "options");
     bundle_data->image = crm_element_value_copy(xml_obj, "image");
     bundle_data->container_network = crm_element_value_copy(xml_obj, "network");
 
     xml_obj = first_named_child(rsc->xml, "network");
     if(xml_obj) {
 
         bundle_data->ip_range_start = crm_element_value_copy(xml_obj, "ip-range-start");
         bundle_data->host_netmask = crm_element_value_copy(xml_obj, "host-netmask");
         bundle_data->host_network = crm_element_value_copy(xml_obj, "host-interface");
         bundle_data->control_port = crm_element_value_copy(xml_obj, "control-port");
         value = crm_element_value(xml_obj, "add-host");
         if (crm_str_to_boolean(value, &bundle_data->add_host) != 1) {
             bundle_data->add_host = TRUE;
         }
 
         for (xmlNode *xml_child = __xml_first_child_element(xml_obj); xml_child != NULL;
              xml_child = __xml_next_element(xml_child)) {
 
             pe__bundle_port_t *port = calloc(1, sizeof(pe__bundle_port_t));
             port->source = crm_element_value_copy(xml_child, "port");
 
             if(port->source == NULL) {
                 port->source = crm_element_value_copy(xml_child, "range");
             } else {
                 port->target = crm_element_value_copy(xml_child, "internal-port");
             }
 
             if(port->source != NULL && strlen(port->source) > 0) {
                 if(port->target == NULL) {
                     port->target = strdup(port->source);
                 }
                 bundle_data->ports = g_list_append(bundle_data->ports, port);
 
             } else {
                 pe_err("Invalid port directive %s", ID(xml_child));
                 port_free(port);
             }
         }
     }
 
     xml_obj = first_named_child(rsc->xml, "storage");
     for (xmlNode *xml_child = __xml_first_child_element(xml_obj); xml_child != NULL;
          xml_child = __xml_next_element(xml_child)) {
 
         const char *source = crm_element_value(xml_child, "source-dir");
         const char *target = crm_element_value(xml_child, "target-dir");
         const char *options = crm_element_value(xml_child, "options");
         int flags = pe__bundle_mount_none;
 
         if (source == NULL) {
             source = crm_element_value(xml_child, "source-dir-root");
             set_bit(flags, pe__bundle_mount_subdir);
         }
 
         if (source && target) {
             mount_add(bundle_data, source, target, options, flags);
             if (strcmp(target, "/var/log") == 0) {
                 need_log_mount = FALSE;
             }
         } else {
             pe_err("Invalid mount directive %s", ID(xml_child));
         }
     }
 
     xml_obj = first_named_child(rsc->xml, "primitive");
     if (xml_obj && valid_network(bundle_data)) {
         char *value = NULL;
         xmlNode *xml_set = NULL;
 
         xml_resource = create_xml_node(NULL, XML_CIB_TAG_INCARNATION);
 
         /* @COMPAT We no longer use the <master> tag, but we need to keep it as
          * part of the resource name, so that bundles don't restart in a rolling
          * upgrade. (It also avoids needing to change regression tests.)
          */
         crm_xml_set_id(xml_resource, "%s-%s", bundle_data->prefix,
                       (bundle_data->promoted_max? "master"
                       : (const char *)xml_resource->name));
 
         xml_set = create_xml_node(xml_resource, XML_TAG_META_SETS);
         crm_xml_set_id(xml_set, "%s-%s-meta", bundle_data->prefix, xml_resource->name);
 
         crm_create_nvpair_xml(xml_set, NULL,
                               XML_RSC_ATTR_ORDERED, XML_BOOLEAN_TRUE);
 
         value = crm_itoa(bundle_data->nreplicas);
         crm_create_nvpair_xml(xml_set, NULL,
                               XML_RSC_ATTR_INCARNATION_MAX, value);
         free(value);
 
         value = crm_itoa(bundle_data->nreplicas_per_host);
         crm_create_nvpair_xml(xml_set, NULL,
                               XML_RSC_ATTR_INCARNATION_NODEMAX, value);
         free(value);
 
         crm_create_nvpair_xml(xml_set, NULL, XML_RSC_ATTR_UNIQUE,
                 (bundle_data->nreplicas_per_host > 1)?
                 XML_BOOLEAN_TRUE : XML_BOOLEAN_FALSE);
 
         if (bundle_data->promoted_max) {
             crm_create_nvpair_xml(xml_set, NULL,
                                   XML_RSC_ATTR_PROMOTABLE, XML_BOOLEAN_TRUE);
 
             value = crm_itoa(bundle_data->promoted_max);
             crm_create_nvpair_xml(xml_set, NULL,
                                   XML_RSC_ATTR_PROMOTED_MAX, value);
             free(value);
         }
 
         //crm_xml_add(xml_obj, XML_ATTR_ID, bundle_data->prefix);
         add_node_copy(xml_resource, xml_obj);
 
     } else if(xml_obj) {
         pe_err("Cannot control %s inside %s without either ip-range-start or control-port",
                rsc->id, ID(xml_obj));
         return FALSE;
     }
 
     if(xml_resource) {
         int lpc = 0;
         GListPtr childIter = NULL;
         pe_resource_t *new_rsc = NULL;
         pe__bundle_port_t *port = NULL;
 
         int offset = 0, max = 1024;
         char *buffer = NULL;
 
         if (common_unpack(xml_resource, &new_rsc, rsc, data_set) == FALSE) {
             pe_err("Failed unpacking resource %s", ID(rsc->xml));
             if (new_rsc != NULL && new_rsc->fns != NULL) {
                 new_rsc->fns->free(new_rsc);
             }
             return FALSE;
         }
 
         bundle_data->child = new_rsc;
 
         /* Currently, we always map the default authentication key location
          * into the same location inside the container.
          *
          * Ideally, we would respect the host's PCMK_authkey_location, but:
          * - it may be different on different nodes;
          * - the actual connection will do extra checking to make sure the key
          *   file exists and is readable, that we can't do here on the DC
          * - tools such as crm_resource and crm_simulate may not have the same
          *   environment variables as the cluster, causing operation digests to
          *   differ
          *
          * Always using the default location inside the container is fine,
          * because we control the pacemaker_remote environment, and it avoids
          * having to pass another environment variable to the container.
          *
          * @TODO A better solution may be to have only pacemaker_remote use the
          * environment variable, and have the cluster nodes use a new
          * cluster option for key location. This would introduce the limitation
          * of the location being the same on all cluster nodes, but that's
          * reasonable.
          */
         mount_add(bundle_data, DEFAULT_REMOTE_KEY_LOCATION,
                   DEFAULT_REMOTE_KEY_LOCATION, NULL, pe__bundle_mount_none);
 
         if (need_log_mount) {
             mount_add(bundle_data, CRM_BUNDLE_DIR, "/var/log", NULL,
                       pe__bundle_mount_subdir);
         }
 
         port = calloc(1, sizeof(pe__bundle_port_t));
         if(bundle_data->control_port) {
             port->source = strdup(bundle_data->control_port);
         } else {
             /* If we wanted to respect PCMK_remote_port, we could use
              * crm_default_remote_port() here and elsewhere in this file instead
              * of DEFAULT_REMOTE_PORT.
              *
              * However, it gains nothing, since we control both the container
              * environment and the connection resource parameters, and the user
              * can use a different port if desired by setting control-port.
              */
             port->source = crm_itoa(DEFAULT_REMOTE_PORT);
         }
         port->target = strdup(port->source);
         bundle_data->ports = g_list_append(bundle_data->ports, port);
 
         buffer = calloc(1, max+1);
         for (childIter = bundle_data->child->children; childIter != NULL;
              childIter = childIter->next) {
 
             pe__bundle_replica_t *replica = calloc(1, sizeof(pe__bundle_replica_t));
 
             replica->child = childIter->data;
             replica->child->exclusive_discover = TRUE;
             replica->offset = lpc++;
 
             // Ensure the child's notify gets set based on the underlying primitive's value
             if (is_set(replica->child->flags, pe_rsc_notify)) {
                 set_bit(bundle_data->child->flags, pe_rsc_notify);
             }
 
             offset += allocate_ip(bundle_data, replica, buffer+offset,
                                   max-offset);
             bundle_data->replicas = g_list_append(bundle_data->replicas,
                                                   replica);
             bundle_data->attribute_target = g_hash_table_lookup(replica->child->meta,
                                                                 XML_RSC_ATTR_TARGET);
         }
         bundle_data->container_host_options = buffer;
         if (bundle_data->attribute_target) {
             g_hash_table_replace(rsc->meta, strdup(XML_RSC_ATTR_TARGET),
                                  strdup(bundle_data->attribute_target));
             g_hash_table_replace(bundle_data->child->meta,
                                  strdup(XML_RSC_ATTR_TARGET),
                                  strdup(bundle_data->attribute_target));
         }
 
     } else {
         // Just a naked container, no pacemaker-remote
         int offset = 0, max = 1024;
         char *buffer = calloc(1, max+1);
 
         for (int lpc = 0; lpc < bundle_data->nreplicas; lpc++) {
             pe__bundle_replica_t *replica = calloc(1, sizeof(pe__bundle_replica_t));
 
             replica->offset = lpc;
             offset += allocate_ip(bundle_data, replica, buffer+offset,
                                   max-offset);
             bundle_data->replicas = g_list_append(bundle_data->replicas,
                                                   replica);
         }
         bundle_data->container_host_options = buffer;
     }
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         if (!create_container(rsc, bundle_data, replica, data_set)) {
             pe_err("Failed unpacking resource %s", rsc->id);
             rsc->fns->free(rsc);
             return FALSE;
         }
     }
 
     if (bundle_data->child) {
         rsc->children = g_list_append(rsc->children, bundle_data->child);
     }
     return TRUE;
 }
 
 static int
 replica_resource_active(pe_resource_t *rsc, gboolean all)
 {
     if (rsc) {
         gboolean child_active = rsc->fns->active(rsc, all);
 
         if (child_active && !all) {
             return TRUE;
         } else if (!child_active && all) {
             return FALSE;
         }
     }
     return -1;
 }
 
 gboolean
 pe__bundle_active(pe_resource_t *rsc, gboolean all)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
     GListPtr iter = NULL;
 
     get_bundle_variant_data(bundle_data, rsc);
     for (iter = bundle_data->replicas; iter != NULL; iter = iter->next) {
         pe__bundle_replica_t *replica = iter->data;
         int rsc_active;
 
         rsc_active = replica_resource_active(replica->ip, all);
         if (rsc_active >= 0) {
             return (gboolean) rsc_active;
         }
 
         rsc_active = replica_resource_active(replica->child, all);
         if (rsc_active >= 0) {
             return (gboolean) rsc_active;
         }
 
         rsc_active = replica_resource_active(replica->container, all);
         if (rsc_active >= 0) {
             return (gboolean) rsc_active;
         }
 
         rsc_active = replica_resource_active(replica->remote, all);
         if (rsc_active >= 0) {
             return (gboolean) rsc_active;
         }
     }
 
     /* If "all" is TRUE, we've already checked that no resources were inactive,
      * so return TRUE; if "all" is FALSE, we didn't find any active resources,
      * so return FALSE.
      */
     return all;
 }
 
 /*!
  * \internal
  * \brief Find the bundle replica corresponding to a given node
  *
  * \param[in] bundle  Top-level bundle resource
  * \param[in] node    Node to search for
  *
  * \return Bundle replica if found, NULL otherwise
  */
 pe_resource_t *
 pe__find_bundle_replica(const pe_resource_t *bundle, const pe_node_t *node)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
     CRM_ASSERT(bundle && node);
 
     get_bundle_variant_data(bundle_data, bundle);
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         CRM_ASSERT(replica && replica->node);
         if (replica->node->details == node->details) {
             return replica->child;
         }
     }
     return NULL;
 }
 
 static void
 print_rsc_in_list(pe_resource_t *rsc, const char *pre_text, long options,
                   void *print_data)
 {
     if (rsc != NULL) {
         if (options & pe_print_html) {
             status_print("<li>");
         }
         rsc->fns->print(rsc, pre_text, options, print_data);
         if (options & pe_print_html) {
             status_print("</li>\n");
         }
     }
 }
 
 static const char*
 container_agent_str(enum pe__container_agent t)
 {
     switch (t) {
         case PE__CONTAINER_AGENT_DOCKER: return PE__CONTAINER_AGENT_DOCKER_S;
         case PE__CONTAINER_AGENT_RKT:    return PE__CONTAINER_AGENT_RKT_S;
         case PE__CONTAINER_AGENT_PODMAN: return PE__CONTAINER_AGENT_PODMAN_S;
         default: // PE__CONTAINER_AGENT_UNKNOWN
             break;
     }
     return PE__CONTAINER_AGENT_UNKNOWN_S;
 }
 
 static void
 bundle_print_xml(pe_resource_t *rsc, const char *pre_text, long options,
                  void *print_data)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
     char *child_text = NULL;
     CRM_CHECK(rsc != NULL, return);
 
     if (pre_text == NULL) {
         pre_text = "";
     }
     child_text = crm_strdup_printf("%s        ", pre_text);
 
     get_bundle_variant_data(bundle_data, rsc);
 
     status_print("%s<bundle ", pre_text);
     status_print("id=\"%s\" ", rsc->id);
     status_print("type=\"%s\" ", container_agent_str(bundle_data->agent_type));
     status_print("image=\"%s\" ", bundle_data->image);
     status_print("unique=\"%s\" ", is_set(rsc->flags, pe_rsc_unique)? "true" : "false");
     status_print("managed=\"%s\" ", is_set(rsc->flags, pe_rsc_managed) ? "true" : "false");
     status_print("failed=\"%s\" ", is_set(rsc->flags, pe_rsc_failed) ? "true" : "false");
     status_print(">\n");
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         CRM_ASSERT(replica);
         status_print("%s    <replica id=\"%d\">\n", pre_text, replica->offset);
         print_rsc_in_list(replica->ip, child_text, options, print_data);
         print_rsc_in_list(replica->child, child_text, options, print_data);
         print_rsc_in_list(replica->container, child_text, options, print_data);
         print_rsc_in_list(replica->remote, child_text, options, print_data);
         status_print("%s    </replica>\n", pre_text);
     }
     status_print("%s</bundle>\n", pre_text);
     free(child_text);
 }
 
 PCMK__OUTPUT_ARGS("bundle", "unsigned int", "struct pe_resource_t *", "GListPtr")
 int
 pe__bundle_xml(pcmk__output_t *out, va_list args)
 {
     unsigned int options = va_arg(args, unsigned int);
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     GListPtr only_show = va_arg(args, GListPtr);
 
     pe__bundle_variant_data_t *bundle_data = NULL;
     int rc = pcmk_rc_no_output;
     gboolean printed_header = FALSE;
 
     CRM_ASSERT(rsc != NULL);
 
     get_bundle_variant_data(bundle_data, rsc);
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
         char *id = crm_itoa(replica->offset);
 
         CRM_ASSERT(replica);
 
-        if (!pe__rsc_running_on_any_node_in_list(replica->container, only_show)) {
+        if (pcmk__rsc_is_filtered(replica->container, only_show)) {
             continue;
         }
 
         if (!printed_header) {
             printed_header = TRUE;
 
             rc = pe__name_and_nvpairs_xml(out, true, "bundle", 6
                          , "id", rsc->id
                          , "type", container_agent_str(bundle_data->agent_type)
                          , "image", bundle_data->image
                          , "unique", BOOL2STR(is_set(rsc->flags, pe_rsc_unique))
                          , "managed", BOOL2STR(is_set(rsc->flags, pe_rsc_managed))
                          , "failed", BOOL2STR(is_set(rsc->flags, pe_rsc_failed)));
             CRM_ASSERT(rc == pcmk_rc_ok);
         }
 
         rc = pe__name_and_nvpairs_xml(out, true, "replica", 1, "id", id);
         free(id);
         CRM_ASSERT(rc == pcmk_rc_ok);
 
         if (replica->ip != NULL) {
             out->message(out, crm_map_element_name(replica->ip->xml), options, replica->ip, only_show);
         }
 
         if (replica->child != NULL) {
             out->message(out, crm_map_element_name(replica->child->xml), options, replica->child, only_show);
         }
 
         out->message(out, crm_map_element_name(replica->container->xml), options, replica->container, only_show);
 
         if (replica->remote != NULL) {
             out->message(out, crm_map_element_name(replica->remote->xml), options, replica->remote, only_show);
         }
 
         pcmk__output_xml_pop_parent(out); // replica
     }
 
     if (printed_header) {
         pcmk__output_xml_pop_parent(out); // bundle
     }
 
     return rc;
 }
 
 static void
 pe__bundle_replica_output_html(pcmk__output_t *out, pe__bundle_replica_t *replica,
                                long options)
 {
     pe_node_t *node = NULL;
     pe_resource_t *rsc = replica->child;
 
     int offset = 0;
     char buffer[LINE_MAX];
 
     if(rsc == NULL) {
         rsc = replica->container;
     }
 
     if (replica->remote) {
         offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
                            rsc_printable_id(replica->remote));
     } else {
         offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
                            rsc_printable_id(replica->container));
     }
     if (replica->ipaddr) {
         offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)",
                            replica->ipaddr);
     }
 
     pe__common_output_html(out, rsc, buffer, node, options);
 }
 
 PCMK__OUTPUT_ARGS("bundle", "unsigned int", "struct pe_resource_t *", "GListPtr")
 int
 pe__bundle_html(pcmk__output_t *out, va_list args)
 {
     unsigned int options = va_arg(args, unsigned int);
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     GListPtr only_show = va_arg(args, GListPtr);
 
     pe__bundle_variant_data_t *bundle_data = NULL;
     char buffer[LINE_MAX];
 
     CRM_ASSERT(rsc != NULL);
 
     get_bundle_variant_data(bundle_data, rsc);
 
     pcmk__output_create_xml_node(out, "br");
     out->begin_list(out, NULL, NULL, "Container bundle%s: %s [%s]%s%s",
                     (bundle_data->nreplicas > 1)? " set" : "",
                     rsc->id, bundle_data->image,
                     is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
                     is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)");
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         CRM_ASSERT(replica);
 
-        if (!pe__rsc_running_on_any_node_in_list(replica->container, only_show)) {
+        if (pcmk__rsc_is_filtered(replica->container, only_show)) {
             continue;
         }
 
         pcmk__output_xml_create_parent(out, "li");
         if (is_set(options, pe_print_implicit)) {
             if (pcmk__list_of_multiple(bundle_data->replicas)) {
                 snprintf(buffer, LINE_MAX, " Replica[%d]", replica->offset);
                 xmlNodeSetContent(pcmk__output_xml_peek_parent(out), (pcmkXmlStr) buffer);
             }
             pcmk__output_create_xml_node(out, "br");
             out->begin_list(out, NULL, NULL, NULL);
 
             if (replica->ip != NULL) {
                 out->message(out, crm_map_element_name(replica->ip->xml), options, replica->ip, only_show);
             }
 
             if (replica->child != NULL) {
                 out->message(out, crm_map_element_name(replica->child->xml), options, replica->child, only_show);
             }
 
             out->message(out, crm_map_element_name(replica->container->xml), options, replica->container, only_show);
 
             if (replica->remote != NULL) {
                 out->message(out, crm_map_element_name(replica->remote->xml), options, replica->remote, only_show);
             }
 
             out->end_list(out);
         } else {
             pe__bundle_replica_output_html(out, replica, options);
         }
 
         pcmk__output_xml_pop_parent(out);
     }
 
     out->end_list(out);
     return pcmk_rc_ok;
 }
 
 static void
 pe__bundle_replica_output_text(pcmk__output_t *out, pe__bundle_replica_t *replica,
                                long options)
 {
     pe_node_t *node = NULL;
     pe_resource_t *rsc = replica->child;
 
     int offset = 0;
     char buffer[LINE_MAX];
 
     if(rsc == NULL) {
         rsc = replica->container;
     }
 
     if (replica->remote) {
         offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
                            rsc_printable_id(replica->remote));
     } else {
         offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
                            rsc_printable_id(replica->container));
     }
     if (replica->ipaddr) {
         offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)",
                            replica->ipaddr);
     }
 
     pe__common_output_text(out, rsc, buffer, node, options);
 }
 
 PCMK__OUTPUT_ARGS("bundle", "unsigned int", "struct pe_resource_t *", "GListPtr")
 int
 pe__bundle_text(pcmk__output_t *out, va_list args)
 {
     unsigned int options = va_arg(args, unsigned int);
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     GListPtr only_show = va_arg(args, GListPtr);
 
     pe__bundle_variant_data_t *bundle_data = NULL;
 
     CRM_ASSERT(rsc != NULL);
 
     get_bundle_variant_data(bundle_data, rsc);
 
     out->begin_list(out, NULL, NULL, "Container bundle%s: %s [%s]%s%s",
                     (bundle_data->nreplicas > 1)? " set" : "",
                     rsc->id, bundle_data->image,
                     is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
                     is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)");
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         CRM_ASSERT(replica);
 
-        if (!pe__rsc_running_on_any_node_in_list(replica->container, only_show)) {
+        if (pcmk__rsc_is_filtered(replica->container, only_show)) {
             continue;
         }
 
         if (is_set(options, pe_print_implicit)) {
             if (pcmk__list_of_multiple(bundle_data->replicas)) {
                 out->list_item(out, NULL, "Replica[%d]", replica->offset);
             }
 
             out->begin_list(out, NULL, NULL, NULL);
 
             if (replica->ip != NULL) {
                 out->message(out, crm_map_element_name(replica->ip->xml), options, replica->ip, only_show);
             }
 
             if (replica->child != NULL) {
                 out->message(out, crm_map_element_name(replica->child->xml), options, replica->child, only_show);
             }
 
             out->message(out, crm_map_element_name(replica->container->xml), options, replica->container, only_show);
 
             if (replica->remote != NULL) {
                 out->message(out, crm_map_element_name(replica->remote->xml), options, replica->remote, only_show);
             }
 
             out->end_list(out);
         } else {
             pe__bundle_replica_output_text(out, replica, options);
         }
     }
 
     out->end_list(out);
     return pcmk_rc_ok;
 }
 
 static void
 print_bundle_replica(pe__bundle_replica_t *replica, const char *pre_text,
                      long options, void *print_data)
 {
     pe_node_t *node = NULL;
     pe_resource_t *rsc = replica->child;
 
     int offset = 0;
     char buffer[LINE_MAX];
 
     if(rsc == NULL) {
         rsc = replica->container;
     }
 
     if (replica->remote) {
         offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
                            rsc_printable_id(replica->remote));
     } else {
         offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
                            rsc_printable_id(replica->container));
     }
     if (replica->ipaddr) {
         offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)",
                            replica->ipaddr);
     }
 
     node = pe__current_node(replica->container);
     common_print(rsc, pre_text, buffer, node, options, print_data);
 }
 
 void
 pe__print_bundle(pe_resource_t *rsc, const char *pre_text, long options,
                  void *print_data)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
     char *child_text = NULL;
     CRM_CHECK(rsc != NULL, return);
 
     if (options & pe_print_xml) {
         bundle_print_xml(rsc, pre_text, options, print_data);
         return;
     }
 
     get_bundle_variant_data(bundle_data, rsc);
 
     if (pre_text == NULL) {
         pre_text = " ";
     }
 
     status_print("%sContainer bundle%s: %s [%s]%s%s\n",
                  pre_text, ((bundle_data->nreplicas > 1)? " set" : ""),
                  rsc->id, bundle_data->image,
                  is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
                  is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)");
     if (options & pe_print_html) {
         status_print("<br />\n<ul>\n");
     }
 
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         CRM_ASSERT(replica);
         if (options & pe_print_html) {
             status_print("<li>");
         }
 
         if (is_set(options, pe_print_implicit)) {
             child_text = crm_strdup_printf("     %s", pre_text);
             if (pcmk__list_of_multiple(bundle_data->replicas)) {
                 status_print("  %sReplica[%d]\n", pre_text, replica->offset);
             }
             if (options & pe_print_html) {
                 status_print("<br />\n<ul>\n");
             }
             print_rsc_in_list(replica->ip, child_text, options, print_data);
             print_rsc_in_list(replica->container, child_text, options, print_data);
             print_rsc_in_list(replica->remote, child_text, options, print_data);
             print_rsc_in_list(replica->child, child_text, options, print_data);
             if (options & pe_print_html) {
                 status_print("</ul>\n");
             }
         } else {
             child_text = crm_strdup_printf("%s  ", pre_text);
             print_bundle_replica(replica, child_text, options, print_data);
         }
         free(child_text);
 
         if (options & pe_print_html) {
             status_print("</li>\n");
         }
     }
     if (options & pe_print_html) {
         status_print("</ul>\n");
     }
 }
 
 static void
 free_bundle_replica(pe__bundle_replica_t *replica)
 {
     if (replica == NULL) {
         return;
     }
 
     if (replica->node) {
         free(replica->node);
         replica->node = NULL;
     }
 
     if (replica->ip) {
         free_xml(replica->ip->xml);
         replica->ip->xml = NULL;
         replica->ip->fns->free(replica->ip);
         replica->ip = NULL;
     }
     if (replica->container) {
         free_xml(replica->container->xml);
         replica->container->xml = NULL;
         replica->container->fns->free(replica->container);
         replica->container = NULL;
     }
     if (replica->remote) {
         free_xml(replica->remote->xml);
         replica->remote->xml = NULL;
         replica->remote->fns->free(replica->remote);
         replica->remote = NULL;
     }
     free(replica->ipaddr);
     free(replica);
 }
 
 void
 pe__free_bundle(pe_resource_t *rsc)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
     CRM_CHECK(rsc != NULL, return);
 
     get_bundle_variant_data(bundle_data, rsc);
     pe_rsc_trace(rsc, "Freeing %s", rsc->id);
 
     free(bundle_data->prefix);
     free(bundle_data->image);
     free(bundle_data->control_port);
     free(bundle_data->host_network);
     free(bundle_data->host_netmask);
     free(bundle_data->ip_range_start);
     free(bundle_data->container_network);
     free(bundle_data->launcher_options);
     free(bundle_data->container_command);
     free(bundle_data->container_host_options);
 
     g_list_free_full(bundle_data->replicas,
                      (GDestroyNotify) free_bundle_replica);
     g_list_free_full(bundle_data->mounts, (GDestroyNotify)mount_free);
     g_list_free_full(bundle_data->ports, (GDestroyNotify)port_free);
     g_list_free(rsc->children);
 
     if(bundle_data->child) {
         free_xml(bundle_data->child->xml);
         bundle_data->child->xml = NULL;
         bundle_data->child->fns->free(bundle_data->child);
     }
     common_free(rsc);
 }
 
 enum rsc_role_e
 pe__bundle_resource_state(const pe_resource_t *rsc, gboolean current)
 {
     enum rsc_role_e container_role = RSC_ROLE_UNKNOWN;
     return container_role;
 }
 
 /*!
  * \brief Get the number of configured replicas in a bundle
  *
  * \param[in] rsc  Bundle resource
  *
  * \return Number of configured replicas, or 0 on error
  */
 int
 pe_bundle_replicas(const pe_resource_t *rsc)
 {
     if ((rsc == NULL) || (rsc->variant != pe_container)) {
         return 0;
     } else {
         pe__bundle_variant_data_t *bundle_data = NULL;
 
         get_bundle_variant_data(bundle_data, rsc);
         return bundle_data->nreplicas;
     }
 }
 
 void
 pe__count_bundle(pe_resource_t *rsc)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
 
     get_bundle_variant_data(bundle_data, rsc);
     for (GList *item = bundle_data->replicas; item != NULL; item = item->next) {
         pe__bundle_replica_t *replica = item->data;
 
         if (replica->ip) {
             replica->ip->fns->count(replica->ip);
         }
         if (replica->child) {
             replica->child->fns->count(replica->child);
         }
         if (replica->container) {
             replica->container->fns->count(replica->container);
         }
         if (replica->remote) {
             replica->remote->fns->count(replica->remote);
         }
     }
 }
diff --git a/lib/pengine/clone.c b/lib/pengine/clone.c
index 8292d9087a..83c58288fa 100644
--- a/lib/pengine/clone.c
+++ b/lib/pengine/clone.c
@@ -1,1107 +1,1107 @@
 /*
  * Copyright 2004-2020 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <crm/pengine/rules.h>
 #include <crm/pengine/status.h>
 #include <crm/pengine/internal.h>
 #include <pe_status_private.h>
 #include <crm/msg_xml.h>
 
 #define VARIANT_CLONE 1
 #include "./variant.h"
 
 void
 pe__force_anon(const char *standard, pe_resource_t *rsc, const char *rid,
                pe_working_set_t *data_set)
 {
     if (pe_rsc_is_clone(rsc)) {
         clone_variant_data_t *clone_data = NULL;
 
         get_clone_variant_data(clone_data, rsc);
 
         pe_warn("Ignoring " XML_RSC_ATTR_UNIQUE " for %s because %s resources "
                 "such as %s can be used only as anonymous clones",
                 rsc->id, standard, rid);
 
         clone_data->clone_node_max = 1;
         clone_data->clone_max = QB_MIN(clone_data->clone_max,
                                        g_list_length(data_set->nodes));
     }
 }
 
 pe_resource_t *
 find_clone_instance(pe_resource_t * rsc, const char *sub_id, pe_working_set_t * data_set)
 {
     char *child_id = NULL;
     pe_resource_t *child = NULL;
     const char *child_base = NULL;
     clone_variant_data_t *clone_data = NULL;
 
     get_clone_variant_data(clone_data, rsc);
 
     child_base = ID(clone_data->xml_obj_child);
     child_id = crm_strdup_printf("%s:%s", child_base, sub_id);
     child = pe_find_resource(rsc->children, child_id);
 
     free(child_id);
     return child;
 }
 
 pe_resource_t *
 pe__create_clone_child(pe_resource_t *rsc, pe_working_set_t *data_set)
 {
     gboolean as_orphan = FALSE;
     char *inc_num = NULL;
     char *inc_max = NULL;
     pe_resource_t *child_rsc = NULL;
     xmlNode *child_copy = NULL;
     clone_variant_data_t *clone_data = NULL;
 
     get_clone_variant_data(clone_data, rsc);
 
     CRM_CHECK(clone_data->xml_obj_child != NULL, return FALSE);
 
     if (clone_data->total_clones >= clone_data->clone_max) {
         // If we've already used all available instances, this is an orphan
         as_orphan = TRUE;
     }
 
     // Allocate instance numbers in numerical order (starting at 0)
     inc_num = crm_itoa(clone_data->total_clones);
     inc_max = crm_itoa(clone_data->clone_max);
 
     child_copy = copy_xml(clone_data->xml_obj_child);
 
     crm_xml_add(child_copy, XML_RSC_ATTR_INCARNATION, inc_num);
 
     if (common_unpack(child_copy, &child_rsc, rsc, data_set) == FALSE) {
         pe_err("Failed unpacking resource %s", crm_element_value(child_copy, XML_ATTR_ID));
         child_rsc = NULL;
         goto bail;
     }
 /*  child_rsc->globally_unique = rsc->globally_unique; */
 
     CRM_ASSERT(child_rsc);
     clone_data->total_clones += 1;
     pe_rsc_trace(child_rsc, "Setting clone attributes for: %s", child_rsc->id);
     rsc->children = g_list_append(rsc->children, child_rsc);
     if (as_orphan) {
         set_bit_recursive(child_rsc, pe_rsc_orphan);
     }
 
     add_hash_param(child_rsc->meta, XML_RSC_ATTR_INCARNATION_MAX, inc_max);
     pe_rsc_trace(rsc, "Added %s instance %s", rsc->id, child_rsc->id);
 
   bail:
     free(inc_num);
     free(inc_max);
 
     return child_rsc;
 }
 
 gboolean
 clone_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
 {
     int lpc = 0;
     xmlNode *a_child = NULL;
     xmlNode *xml_obj = rsc->xml;
     clone_variant_data_t *clone_data = NULL;
 
     const char *ordered = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_ORDERED);
     const char *max_clones = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION_MAX);
     const char *max_clones_node = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION_NODEMAX);
 
     pe_rsc_trace(rsc, "Processing resource %s...", rsc->id);
 
     clone_data = calloc(1, sizeof(clone_variant_data_t));
     rsc->variant_opaque = clone_data;
 
     if (is_set(rsc->flags, pe_rsc_promotable)) {
         const char *promoted_max = NULL;
         const char *promoted_node_max = NULL;
 
         promoted_max = g_hash_table_lookup(rsc->meta,
                                            XML_RSC_ATTR_PROMOTED_MAX);
         if (promoted_max == NULL) {
             // @COMPAT deprecated since 2.0.0
             promoted_max = g_hash_table_lookup(rsc->meta,
                                                XML_RSC_ATTR_MASTER_MAX);
         }
 
         promoted_node_max = g_hash_table_lookup(rsc->meta,
                                                 XML_RSC_ATTR_PROMOTED_NODEMAX);
         if (promoted_node_max == NULL) {
             // @COMPAT deprecated since 2.0.0
             promoted_node_max = g_hash_table_lookup(rsc->meta,
                                                     XML_RSC_ATTR_MASTER_NODEMAX);
         }
 
         clone_data->promoted_max = crm_parse_int(promoted_max, "1");
         clone_data->promoted_node_max = crm_parse_int(promoted_node_max, "1");
     }
 
     // Implied by calloc()
     /* clone_data->xml_obj_child = NULL; */
 
     clone_data->clone_node_max = crm_parse_int(max_clones_node, "1");
 
     if (max_clones) {
         clone_data->clone_max = crm_parse_int(max_clones, "1");
 
     } else if (pcmk__list_of_multiple(data_set->nodes)) {
         clone_data->clone_max = g_list_length(data_set->nodes);
 
     } else {
         clone_data->clone_max = 1;      /* Handy during crm_verify */
     }
 
     clone_data->ordered = crm_is_true(ordered);
 
     if ((rsc->flags & pe_rsc_unique) == 0 && clone_data->clone_node_max > 1) {
         pcmk__config_err("Ignoring " XML_RSC_ATTR_PROMOTED_MAX " for %s "
                          "because anonymous clones support only one instance "
                          "per node", rsc->id);
         clone_data->clone_node_max = 1;
     }
 
     pe_rsc_trace(rsc, "Options for %s", rsc->id);
     pe_rsc_trace(rsc, "\tClone max: %d", clone_data->clone_max);
     pe_rsc_trace(rsc, "\tClone node max: %d", clone_data->clone_node_max);
     pe_rsc_trace(rsc, "\tClone is unique: %s",
                  is_set(rsc->flags, pe_rsc_unique) ? "true" : "false");
     pe_rsc_trace(rsc, "\tClone is promotable: %s",
                  is_set(rsc->flags, pe_rsc_promotable) ? "true" : "false");
 
     // Clones may contain a single group or primitive
     for (a_child = __xml_first_child_element(xml_obj); a_child != NULL;
          a_child = __xml_next_element(a_child)) {
 
         if (crm_str_eq((const char *)a_child->name, XML_CIB_TAG_RESOURCE, TRUE)
         || crm_str_eq((const char *)a_child->name, XML_CIB_TAG_GROUP, TRUE)) {
             clone_data->xml_obj_child = a_child;
             break;
         }
     }
 
     if (clone_data->xml_obj_child == NULL) {
         pcmk__config_err("%s has nothing to clone", rsc->id);
         return FALSE;
     }
 
     /*
      * Make clones ever so slightly sticky by default
      *
      * This helps ensure clone instances are not shuffled around the cluster
      * for no benefit in situations when pre-allocation is not appropriate
      */
     if (g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_STICKINESS) == NULL) {
         add_hash_param(rsc->meta, XML_RSC_ATTR_STICKINESS, "1");
     }
 
     /* This ensures that the globally-unique value always exists for children to
      * inherit when being unpacked, as well as in resource agents' environment.
      */
     add_hash_param(rsc->meta, XML_RSC_ATTR_UNIQUE,
                    is_set(rsc->flags, pe_rsc_unique) ? XML_BOOLEAN_TRUE : XML_BOOLEAN_FALSE);
 
     if (clone_data->clone_max <= 0) {
         /* Create one child instance so that unpack_find_resource() will hook up
          * any orphans up to the parent correctly.
          */
         if (pe__create_clone_child(rsc, data_set) == NULL) {
             return FALSE;
         }
 
     } else {
         // Create a child instance for each available instance number
         for (lpc = 0; lpc < clone_data->clone_max; lpc++) {
             if (pe__create_clone_child(rsc, data_set) == NULL) {
                 return FALSE;
             }
         }
     }
 
     pe_rsc_trace(rsc, "Added %d children to resource %s...", clone_data->clone_max, rsc->id);
     return TRUE;
 }
 
 gboolean
 clone_active(pe_resource_t * rsc, gboolean all)
 {
     GListPtr gIter = rsc->children;
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
         gboolean child_active = child_rsc->fns->active(child_rsc, all);
 
         if (all == FALSE && child_active) {
             return TRUE;
         } else if (all && child_active == FALSE) {
             return FALSE;
         }
     }
 
     if (all) {
         return TRUE;
     } else {
         return FALSE;
     }
 }
 
 static void
 short_print(char *list, const char *prefix, const char *type, const char *suffix, long options, void *print_data)
 {
     if(suffix == NULL) {
         suffix = "";
     }
 
     if (list) {
         if (options & pe_print_html) {
             status_print("<li>");
         }
         status_print("%s%s: [%s ]%s", prefix, type, list, suffix);
 
         if (options & pe_print_html) {
             status_print("</li>\n");
 
         } else if (options & pe_print_suppres_nl) {
             /* nothing */
         } else if ((options & pe_print_printf) || (options & pe_print_ncurses)) {
             status_print("\n");
         }
 
     }
 }
 
 static const char *
 configured_role_str(pe_resource_t * rsc)
 {
     const char *target_role = g_hash_table_lookup(rsc->meta,
                                                   XML_RSC_ATTR_TARGET_ROLE);
 
     if ((target_role == NULL) && rsc->children && rsc->children->data) {
         target_role = g_hash_table_lookup(((pe_resource_t*)rsc->children->data)->meta,
                                           XML_RSC_ATTR_TARGET_ROLE);
     }
     return target_role;
 }
 
 static enum rsc_role_e
 configured_role(pe_resource_t * rsc)
 {
     const char *target_role = configured_role_str(rsc);
 
     if (target_role) {
         return text2role(target_role);
     }
     return RSC_ROLE_UNKNOWN;
 }
 
 static void
 clone_print_xml(pe_resource_t * rsc, const char *pre_text, long options, void *print_data)
 {
     char *child_text = crm_strdup_printf("%s    ", pre_text);
     const char *target_role = configured_role_str(rsc);
     GListPtr gIter = rsc->children;
 
     status_print("%s<clone ", pre_text);
     status_print("id=\"%s\" ", rsc->id);
     status_print("multi_state=\"%s\" ", is_set(rsc->flags, pe_rsc_promotable)? "true" : "false");
     status_print("unique=\"%s\" ", is_set(rsc->flags, pe_rsc_unique) ? "true" : "false");
     status_print("managed=\"%s\" ", is_set(rsc->flags, pe_rsc_managed) ? "true" : "false");
     status_print("failed=\"%s\" ", is_set(rsc->flags, pe_rsc_failed) ? "true" : "false");
     status_print("failure_ignored=\"%s\" ",
                  is_set(rsc->flags, pe_rsc_failure_ignored) ? "true" : "false");
     if (target_role) {
         status_print("target_role=\"%s\" ", target_role);
     }
     status_print(">\n");
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         child_rsc->fns->print(child_rsc, child_text, options, print_data);
     }
 
     status_print("%s</clone>\n", pre_text);
     free(child_text);
 }
 
 bool is_set_recursive(pe_resource_t * rsc, long long flag, bool any)
 {
     GListPtr gIter;
     bool all = !any;
 
     if(is_set(rsc->flags, flag)) {
         if(any) {
             return TRUE;
         }
     } else if(all) {
         return FALSE;
     }
 
     for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
         if(is_set_recursive(gIter->data, flag, any)) {
             if(any) {
                 return TRUE;
             }
 
         } else if(all) {
             return FALSE;
         }
     }
 
     if(all) {
         return TRUE;
     }
     return FALSE;
 }
 
 void
 clone_print(pe_resource_t * rsc, const char *pre_text, long options, void *print_data)
 {
     char *list_text = NULL;
     char *child_text = NULL;
     char *stopped_list = NULL;
 
     GListPtr master_list = NULL;
     GListPtr started_list = NULL;
     GListPtr gIter = rsc->children;
 
     clone_variant_data_t *clone_data = NULL;
     int active_instances = 0;
 
     if (pre_text == NULL) {
         pre_text = " ";
     }
 
     if (options & pe_print_xml) {
         clone_print_xml(rsc, pre_text, options, print_data);
         return;
     }
 
     get_clone_variant_data(clone_data, rsc);
 
     child_text = crm_strdup_printf("%s    ", pre_text);
 
     status_print("%sClone Set: %s [%s]%s%s%s",
                  pre_text ? pre_text : "", rsc->id, ID(clone_data->xml_obj_child),
                  is_set(rsc->flags, pe_rsc_promotable) ? " (promotable)" : "",
                  is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
                  is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)");
 
     if (options & pe_print_html) {
         status_print("\n<ul>\n");
 
     } else if ((options & pe_print_log) == 0) {
         status_print("\n");
     }
 
     for (; gIter != NULL; gIter = gIter->next) {
         gboolean print_full = FALSE;
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
         gboolean partially_active = child_rsc->fns->active(child_rsc, FALSE);
 
         if (options & pe_print_clone_details) {
             print_full = TRUE;
         }
 
         if (is_set(rsc->flags, pe_rsc_unique)) {
             // Print individual instance when unique (except stopped orphans)
             if (partially_active || is_not_set(rsc->flags, pe_rsc_orphan)) {
                 print_full = TRUE;
             }
 
         // Everything else in this block is for anonymous clones
 
         } else if (is_set(options, pe_print_pending)
                    && (child_rsc->pending_task != NULL)
                    && strcmp(child_rsc->pending_task, "probe")) {
             // Print individual instance when non-probe action is pending
             print_full = TRUE;
 
         } else if (partially_active == FALSE) {
             // List stopped instances when requested (except orphans)
             if (is_not_set(child_rsc->flags, pe_rsc_orphan)
                 && is_not_set(options, pe_print_clone_active)) {
                 stopped_list = pcmk__add_word(stopped_list, child_rsc->id);
             }
 
         } else if (is_set_recursive(child_rsc, pe_rsc_orphan, TRUE)
                    || is_set_recursive(child_rsc, pe_rsc_managed, FALSE) == FALSE
                    || is_set_recursive(child_rsc, pe_rsc_failed, TRUE)) {
 
             // Print individual instance when active orphaned/unmanaged/failed
             print_full = TRUE;
 
         } else if (child_rsc->fns->active(child_rsc, TRUE)) {
             // Instance of fully active anonymous clone
 
             pe_node_t *location = child_rsc->fns->location(child_rsc, NULL, TRUE);
 
             if (location) {
                 // Instance is active on a single node
 
                 enum rsc_role_e a_role = child_rsc->fns->state(child_rsc, TRUE);
 
                 if (location->details->online == FALSE && location->details->unclean) {
                     print_full = TRUE;
 
                 } else if (a_role > RSC_ROLE_SLAVE) {
                     master_list = g_list_append(master_list, location);
 
                 } else {
                     started_list = g_list_append(started_list, location);
                 }
 
             } else {
                 /* uncolocated group - bleh */
                 print_full = TRUE;
             }
 
         } else {
             // Instance of partially active anonymous clone
             print_full = TRUE;
         }
 
         if (print_full) {
             if (options & pe_print_html) {
                 status_print("<li>\n");
             }
             child_rsc->fns->print(child_rsc, child_text, options, print_data);
             if (options & pe_print_html) {
                 status_print("</li>\n");
             }
         }
     }
 
     /* Masters */
     master_list = g_list_sort(master_list, sort_node_uname);
     for (gIter = master_list; gIter; gIter = gIter->next) {
         pe_node_t *host = gIter->data;
 
         list_text = pcmk__add_word(list_text, host->details->uname);
 	active_instances++;
     }
 
     short_print(list_text, child_text, "Masters", NULL, options, print_data);
     g_list_free(master_list);
     free(list_text);
     list_text = NULL;
 
     /* Started/Slaves */
     started_list = g_list_sort(started_list, sort_node_uname);
     for (gIter = started_list; gIter; gIter = gIter->next) {
         pe_node_t *host = gIter->data;
 
         list_text = pcmk__add_word(list_text, host->details->uname);
 	active_instances++;
     }
 
     if (is_set(rsc->flags, pe_rsc_promotable)) {
         enum rsc_role_e role = configured_role(rsc);
 
         if(role == RSC_ROLE_SLAVE) {
             short_print(list_text, child_text, "Slaves (target-role)", NULL, options, print_data);
         } else {
             short_print(list_text, child_text, "Slaves", NULL, options, print_data);
         }
 
     } else {
         short_print(list_text, child_text, "Started", NULL, options, print_data);
     }
 
     g_list_free(started_list);
     free(list_text);
     list_text = NULL;
 
     if (is_not_set(options, pe_print_clone_active)) {
         const char *state = "Stopped";
         enum rsc_role_e role = configured_role(rsc);
 
         if (role == RSC_ROLE_STOPPED) {
             state = "Stopped (disabled)";
         }
 
         if (is_not_set(rsc->flags, pe_rsc_unique)
             && (clone_data->clone_max > active_instances)) {
 
             GListPtr nIter;
             GListPtr list = g_hash_table_get_values(rsc->allowed_nodes);
 
             /* Custom stopped list for non-unique clones */
             free(stopped_list); stopped_list = NULL;
 
             if (list == NULL) {
                 /* Clusters with symmetrical=false haven't calculated allowed_nodes yet
                  * If we've not probed for them yet, the Stopped list will be empty
                  */
                 list = g_hash_table_get_values(rsc->known_on);
             }
 
             list = g_list_sort(list, sort_node_uname);
             for (nIter = list; nIter != NULL; nIter = nIter->next) {
                 pe_node_t *node = (pe_node_t *)nIter->data;
 
                 if (pe_find_node(rsc->running_on, node->details->uname) == NULL) {
                     stopped_list = pcmk__add_word(stopped_list,
                                                   node->details->uname);
                 }
             }
             g_list_free(list);
         }
 
         short_print(stopped_list, child_text, state, NULL, options, print_data);
         free(stopped_list);
     }
 
     if (options & pe_print_html) {
         status_print("</ul>\n");
     }
 
     free(child_text);
 }
 
 PCMK__OUTPUT_ARGS("clone", "unsigned int", "struct pe_resource_t *", "GListPtr")
 int
 pe__clone_xml(pcmk__output_t *out, va_list args)
 {
     unsigned int options = va_arg(args, unsigned int);
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     GListPtr only_show = va_arg(args, GListPtr);
 
     GListPtr gIter = rsc->children;
 
     int rc = pcmk_rc_no_output;
     gboolean printed_header = FALSE;
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
-        if (!pe__rsc_running_on_any_node_in_list(child_rsc, only_show)) {
+        if (pcmk__rsc_is_filtered(child_rsc, only_show)) {
             continue;
         }
 
         if (!printed_header) {
             printed_header = TRUE;
 
             rc = pe__name_and_nvpairs_xml(out, true, "clone", 7
                      , "id", rsc->id
                      , "multi_state", BOOL2STR(is_set(rsc->flags, pe_rsc_promotable))
                      , "unique", BOOL2STR(is_set(rsc->flags, pe_rsc_unique))
                      , "managed", BOOL2STR(is_set(rsc->flags, pe_rsc_managed))
                      , "failed", BOOL2STR(is_set(rsc->flags, pe_rsc_failed))
                      , "failure_ignored", BOOL2STR(is_set(rsc->flags, pe_rsc_failure_ignored))
                      , "target_role", configured_role_str(rsc));
             CRM_ASSERT(rc == pcmk_rc_ok);
         }
 
         out->message(out, crm_map_element_name(child_rsc->xml), options, child_rsc, only_show);
     }
 
     if (printed_header) {
         pcmk__output_xml_pop_parent(out);
     }
 
     return rc;
 }
 
 PCMK__OUTPUT_ARGS("clone", "unsigned int", "struct pe_resource_t *", "GListPtr")
 int
 pe__clone_html(pcmk__output_t *out, va_list args)
 {
     unsigned int options = va_arg(args, unsigned int);
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     GListPtr only_show = va_arg(args, GListPtr);
 
     char *list_text = NULL;
     char *stopped_list = NULL;
 
     GListPtr master_list = NULL;
     GListPtr started_list = NULL;
     GListPtr gIter = rsc->children;
 
     clone_variant_data_t *clone_data = NULL;
     int active_instances = 0;
 
     get_clone_variant_data(clone_data, rsc);
 
     out->begin_list(out, NULL, NULL, "Clone Set: %s [%s]%s%s%s",
                     rsc->id, ID(clone_data->xml_obj_child),
                     is_set(rsc->flags, pe_rsc_promotable) ? " (promotable)" : "",
                     is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
                     is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)");
 
     for (; gIter != NULL; gIter = gIter->next) {
         gboolean print_full = FALSE;
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
         gboolean partially_active = child_rsc->fns->active(child_rsc, FALSE);
 
-        if (!pe__rsc_running_on_any_node_in_list(child_rsc, only_show)) {
+        if (pcmk__rsc_is_filtered(child_rsc, only_show)) {
             continue;
         }
 
         if (options & pe_print_clone_details) {
             print_full = TRUE;
         }
 
         if (is_set(rsc->flags, pe_rsc_unique)) {
             // Print individual instance when unique (except stopped orphans)
             if (partially_active || is_not_set(rsc->flags, pe_rsc_orphan)) {
                 print_full = TRUE;
             }
 
         // Everything else in this block is for anonymous clones
 
         } else if (is_set(options, pe_print_pending)
                    && (child_rsc->pending_task != NULL)
                    && strcmp(child_rsc->pending_task, "probe")) {
             // Print individual instance when non-probe action is pending
             print_full = TRUE;
 
         } else if (partially_active == FALSE) {
             // List stopped instances when requested (except orphans)
             if (is_not_set(child_rsc->flags, pe_rsc_orphan)
                 && is_not_set(options, pe_print_clone_active)) {
                 stopped_list = pcmk__add_word(stopped_list, child_rsc->id);
             }
 
         } else if (is_set_recursive(child_rsc, pe_rsc_orphan, TRUE)
                    || is_set_recursive(child_rsc, pe_rsc_managed, FALSE) == FALSE
                    || is_set_recursive(child_rsc, pe_rsc_failed, TRUE)) {
 
             // Print individual instance when active orphaned/unmanaged/failed
             print_full = TRUE;
 
         } else if (child_rsc->fns->active(child_rsc, TRUE)) {
             // Instance of fully active anonymous clone
 
             pe_node_t *location = child_rsc->fns->location(child_rsc, NULL, TRUE);
 
             if (location) {
                 // Instance is active on a single node
 
                 enum rsc_role_e a_role = child_rsc->fns->state(child_rsc, TRUE);
 
                 if (location->details->online == FALSE && location->details->unclean) {
                     print_full = TRUE;
 
                 } else if (a_role > RSC_ROLE_SLAVE) {
                     master_list = g_list_append(master_list, location);
 
                 } else {
                     started_list = g_list_append(started_list, location);
                 }
 
             } else {
                 /* uncolocated group - bleh */
                 print_full = TRUE;
             }
 
         } else {
             // Instance of partially active anonymous clone
             print_full = TRUE;
         }
 
         if (print_full) {
             out->message(out, crm_map_element_name(child_rsc->xml), options, child_rsc, only_show);
         }
     }
 
     if (is_set(options, pe_print_clone_details)) {
         free(stopped_list);
         out->end_list(out);
         return pcmk_rc_ok;
     }
 
     /* Masters */
     master_list = g_list_sort(master_list, sort_node_uname);
     for (gIter = master_list; gIter; gIter = gIter->next) {
         pe_node_t *host = gIter->data;
 
         if (!pcmk__str_in_list(only_show, host->details->uname)) {
             continue;
         }
 
         list_text = pcmk__add_word(list_text, host->details->uname);
         active_instances++;
     }
 
     if (list_text != NULL) {
         out->list_item(out, NULL, " Masters: [%s ]", list_text);
         g_list_free(master_list);
         free(list_text);
         list_text = NULL;
     }
 
     /* Started/Slaves */
     started_list = g_list_sort(started_list, sort_node_uname);
     for (gIter = started_list; gIter; gIter = gIter->next) {
         pe_node_t *host = gIter->data;
 
         if (!pcmk__str_in_list(only_show, host->details->uname)) {
             continue;
         }
 
         list_text = pcmk__add_word(list_text, host->details->uname);
         active_instances++;
     }
 
     if (list_text != NULL) {
         if (is_set(rsc->flags, pe_rsc_promotable)) {
             enum rsc_role_e role = configured_role(rsc);
 
             if(role == RSC_ROLE_SLAVE) {
                 out->list_item(out, NULL, " Slaves (target-role): [%s ]", list_text);
             } else {
                 out->list_item(out, NULL, " Slaves: [%s ]", list_text);
             }
 
         } else {
             out->list_item(out, NULL, " Started: [%s ]", list_text);
         }
 
         g_list_free(started_list);
         free(list_text);
         list_text = NULL;
     }
 
     if (is_not_set(options, pe_print_clone_active)) {
         const char *state = "Stopped";
         enum rsc_role_e role = configured_role(rsc);
 
         if (role == RSC_ROLE_STOPPED) {
             state = "Stopped (disabled)";
         }
 
         if (is_not_set(rsc->flags, pe_rsc_unique)
             && (clone_data->clone_max > active_instances)) {
 
             GListPtr nIter;
             GListPtr list = g_hash_table_get_values(rsc->allowed_nodes);
 
             /* Custom stopped list for non-unique clones */
             free(stopped_list);
             stopped_list = NULL;
 
             if (list == NULL) {
                 /* Clusters with symmetrical=false haven't calculated allowed_nodes yet
                  * If we've not probed for them yet, the Stopped list will be empty
                  */
                 list = g_hash_table_get_values(rsc->known_on);
             }
 
             list = g_list_sort(list, sort_node_uname);
             for (nIter = list; nIter != NULL; nIter = nIter->next) {
                 pe_node_t *node = (pe_node_t *)nIter->data;
 
                 if (pe_find_node(rsc->running_on, node->details->uname) == NULL &&
                     pcmk__str_in_list(only_show, node->details->uname)) {
                     stopped_list = pcmk__add_word(stopped_list,
                                                   node->details->uname);
                 }
             }
             g_list_free(list);
         }
 
         if (stopped_list != NULL) {
             out->list_item(out, NULL, " %s: [%s ]", state, stopped_list);
             free(stopped_list);
         }
     }
 
     out->end_list(out);
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("clone", "unsigned int", "struct pe_resource_t *", "GListPtr")
 int
 pe__clone_text(pcmk__output_t *out, va_list args)
 {
     unsigned int options = va_arg(args, unsigned int);
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     GListPtr only_show = va_arg(args, GListPtr);
 
     char *list_text = NULL;
     char *stopped_list = NULL;
 
     GListPtr master_list = NULL;
     GListPtr started_list = NULL;
     GListPtr gIter = rsc->children;
 
     clone_variant_data_t *clone_data = NULL;
     int active_instances = 0;
 
     get_clone_variant_data(clone_data, rsc);
 
     out->begin_list(out, NULL, NULL, "Clone Set: %s [%s]%s%s%s",
                     rsc->id, ID(clone_data->xml_obj_child),
                     is_set(rsc->flags, pe_rsc_promotable) ? " (promotable)" : "",
                     is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
                     is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)");
 
     for (; gIter != NULL; gIter = gIter->next) {
         gboolean print_full = FALSE;
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
         gboolean partially_active = child_rsc->fns->active(child_rsc, FALSE);
 
-        if (!pe__rsc_running_on_any_node_in_list(child_rsc, only_show)) {
+        if (pcmk__rsc_is_filtered(child_rsc, only_show)) {
             continue;
         }
 
         if (options & pe_print_clone_details) {
             print_full = TRUE;
         }
 
         if (is_set(rsc->flags, pe_rsc_unique)) {
             // Print individual instance when unique (except stopped orphans)
             if (partially_active || is_not_set(rsc->flags, pe_rsc_orphan)) {
                 print_full = TRUE;
             }
 
         // Everything else in this block is for anonymous clones
 
         } else if (is_set(options, pe_print_pending)
                    && (child_rsc->pending_task != NULL)
                    && strcmp(child_rsc->pending_task, "probe")) {
             // Print individual instance when non-probe action is pending
             print_full = TRUE;
 
         } else if (partially_active == FALSE) {
             // List stopped instances when requested (except orphans)
             if (is_not_set(child_rsc->flags, pe_rsc_orphan)
                 && is_not_set(options, pe_print_clone_active)) {
                 stopped_list = pcmk__add_word(stopped_list, child_rsc->id);
             }
 
         } else if (is_set_recursive(child_rsc, pe_rsc_orphan, TRUE)
                    || is_set_recursive(child_rsc, pe_rsc_managed, FALSE) == FALSE
                    || is_set_recursive(child_rsc, pe_rsc_failed, TRUE)) {
 
             // Print individual instance when active orphaned/unmanaged/failed
             print_full = TRUE;
 
         } else if (child_rsc->fns->active(child_rsc, TRUE)) {
             // Instance of fully active anonymous clone
 
             pe_node_t *location = child_rsc->fns->location(child_rsc, NULL, TRUE);
 
             if (location) {
                 // Instance is active on a single node
 
                 enum rsc_role_e a_role = child_rsc->fns->state(child_rsc, TRUE);
 
                 if (location->details->online == FALSE && location->details->unclean) {
                     print_full = TRUE;
 
                 } else if (a_role > RSC_ROLE_SLAVE) {
                     master_list = g_list_append(master_list, location);
 
                 } else {
                     started_list = g_list_append(started_list, location);
                 }
 
             } else {
                 /* uncolocated group - bleh */
                 print_full = TRUE;
             }
 
         } else {
             // Instance of partially active anonymous clone
             print_full = TRUE;
         }
 
         if (print_full) {
             out->message(out, crm_map_element_name(child_rsc->xml), options, child_rsc, only_show);
         }
     }
 
     if (is_set(options, pe_print_clone_details)) {
         free(stopped_list);
         out->end_list(out);
         return pcmk_rc_ok;
     }
 
     /* Masters */
     master_list = g_list_sort(master_list, sort_node_uname);
     for (gIter = master_list; gIter; gIter = gIter->next) {
         pe_node_t *host = gIter->data;
 
         if (!pcmk__str_in_list(only_show, host->details->uname)) {
             continue;
         }
 
         list_text = pcmk__add_word(list_text, host->details->uname);
         active_instances++;
     }
 
     if (list_text != NULL) {
         out->list_item(out, "Masters", "[%s ]", list_text);
         g_list_free(master_list);
         free(list_text);
         list_text = NULL;
     }
 
     /* Started/Slaves */
     started_list = g_list_sort(started_list, sort_node_uname);
     for (gIter = started_list; gIter; gIter = gIter->next) {
         pe_node_t *host = gIter->data;
 
         if (!pcmk__str_in_list(only_show, host->details->uname)) {
             continue;
         }
 
         list_text = pcmk__add_word(list_text, host->details->uname);
         active_instances++;
     }
 
     if (list_text != NULL) {
         if (is_set(rsc->flags, pe_rsc_promotable)) {
             enum rsc_role_e role = configured_role(rsc);
 
             if(role == RSC_ROLE_SLAVE) {
                 out->list_item(out, "Slaves (target-role)", "[%s ]", list_text);
             } else {
                 out->list_item(out, "Slaves", "[%s ]", list_text);
             }
         } else {
             out->list_item(out, "Started", "[%s ]", list_text);
         }
 
         g_list_free(started_list);
         free(list_text);
         list_text = NULL;
     }
 
     if (is_not_set(options, pe_print_clone_active)) {
         const char *state = "Stopped";
         enum rsc_role_e role = configured_role(rsc);
 
         if (role == RSC_ROLE_STOPPED) {
             state = "Stopped (disabled)";
         }
 
         if (is_not_set(rsc->flags, pe_rsc_unique)
             && (clone_data->clone_max > active_instances)) {
 
             GListPtr nIter;
             GListPtr list = g_hash_table_get_values(rsc->allowed_nodes);
 
             /* Custom stopped list for non-unique clones */
             free(stopped_list);
             stopped_list = NULL;
 
             if (list == NULL) {
                 /* Clusters with symmetrical=false haven't calculated allowed_nodes yet
                  * If we've not probed for them yet, the Stopped list will be empty
                  */
                 list = g_hash_table_get_values(rsc->known_on);
             }
 
             list = g_list_sort(list, sort_node_uname);
             for (nIter = list; nIter != NULL; nIter = nIter->next) {
                 pe_node_t *node = (pe_node_t *)nIter->data;
 
                 if (pe_find_node(rsc->running_on, node->details->uname) == NULL &&
                     pcmk__str_in_list(only_show, node->details->uname)) {
                     stopped_list = pcmk__add_word(stopped_list,
                                                   node->details->uname);
                 }
             }
             g_list_free(list);
         }
 
         if (stopped_list != NULL) {
             out->list_item(out, state, "[%s ]", stopped_list);
             free(stopped_list);
         }
     }
 
     out->end_list(out);
 
     return pcmk_rc_ok;
 }
 
 void
 clone_free(pe_resource_t * rsc)
 {
     clone_variant_data_t *clone_data = NULL;
 
     get_clone_variant_data(clone_data, rsc);
 
     pe_rsc_trace(rsc, "Freeing %s", rsc->id);
 
     for (GListPtr gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         CRM_ASSERT(child_rsc);
         pe_rsc_trace(child_rsc, "Freeing child %s", child_rsc->id);
         free_xml(child_rsc->xml);
         child_rsc->xml = NULL;
         /* There could be a saved unexpanded xml */
         free_xml(child_rsc->orig_xml);
         child_rsc->orig_xml = NULL;
         child_rsc->fns->free(child_rsc);
     }
 
     g_list_free(rsc->children);
 
     if (clone_data) {
         CRM_ASSERT(clone_data->demote_notify == NULL);
         CRM_ASSERT(clone_data->stop_notify == NULL);
         CRM_ASSERT(clone_data->start_notify == NULL);
         CRM_ASSERT(clone_data->promote_notify == NULL);
     }
 
     common_free(rsc);
 }
 
 enum rsc_role_e
 clone_resource_state(const pe_resource_t * rsc, gboolean current)
 {
     enum rsc_role_e clone_role = RSC_ROLE_UNKNOWN;
     GListPtr gIter = rsc->children;
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
         enum rsc_role_e a_role = child_rsc->fns->state(child_rsc, current);
 
         if (a_role > clone_role) {
             clone_role = a_role;
         }
     }
 
     pe_rsc_trace(rsc, "%s role: %s", rsc->id, role2text(clone_role));
     return clone_role;
 }
 
 /*!
  * \internal
  * \brief Check whether a clone has an instance for every node
  *
  * \param[in] rsc       Clone to check
  * \param[in] data_set  Cluster state
  */
 bool
 pe__is_universal_clone(pe_resource_t *rsc,
                        pe_working_set_t *data_set)
 {
     if (pe_rsc_is_clone(rsc)) {
         clone_variant_data_t *clone_data = NULL;
 
         get_clone_variant_data(clone_data, rsc);
         if (clone_data->clone_max == g_list_length(data_set->nodes)) {
             return TRUE;
         }
     }
     return FALSE;
 }
diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c
index 6ff1ee5c25..b18b7e4726 100644
--- a/lib/pengine/utils.c
+++ b/lib/pengine/utils.c
@@ -1,2778 +1,2769 @@
 /*
  * Copyright 2004-2020 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 #include <crm/crm.h>
 #include <crm/msg_xml.h>
 #include <crm/common/xml.h>
 #include <crm/common/util.h>
 
 #include <ctype.h>
 #include <glib.h>
 #include <stdbool.h>
 
 #include <crm/pengine/rules.h>
 #include <crm/pengine/internal.h>
 
 extern xmlNode *get_object_root(const char *object_type, xmlNode * the_root);
 void print_str_str(gpointer key, gpointer value, gpointer user_data);
 gboolean ghash_free_str_str(gpointer key, gpointer value, gpointer user_data);
 void unpack_operation(pe_action_t * action, xmlNode * xml_obj, pe_resource_t * container,
                       pe_working_set_t * data_set);
 static xmlNode *find_rsc_op_entry_helper(pe_resource_t * rsc, const char *key,
                                          gboolean include_disabled);
 
 #if ENABLE_VERSIONED_ATTRS
 pe_rsc_action_details_t *
 pe_rsc_action_details(pe_action_t *action)
 {
     pe_rsc_action_details_t *details;
 
     CRM_CHECK(action != NULL, return NULL);
 
     if (action->action_details == NULL) {
         action->action_details = calloc(1, sizeof(pe_rsc_action_details_t));
         CRM_CHECK(action->action_details != NULL, return NULL);
     }
 
     details = (pe_rsc_action_details_t *) action->action_details;
     if (details->versioned_parameters == NULL) {
         details->versioned_parameters = create_xml_node(NULL,
                                                         XML_TAG_OP_VER_ATTRS);
     }
     if (details->versioned_meta == NULL) {
         details->versioned_meta = create_xml_node(NULL, XML_TAG_OP_VER_META);
     }
     return details;
 }
 
 static void
 pe_free_rsc_action_details(pe_action_t *action)
 {
     pe_rsc_action_details_t *details;
 
     if ((action == NULL) || (action->action_details == NULL)) {
         return;
     }
 
     details = (pe_rsc_action_details_t *) action->action_details;
 
     if (details->versioned_parameters) {
         free_xml(details->versioned_parameters);
     }
     if (details->versioned_meta) {
         free_xml(details->versioned_meta);
     }
 
     action->action_details = NULL;
 }
 #endif
 
 /*!
  * \internal
  * \brief Check whether we can fence a particular node
  *
  * \param[in] data_set  Working set for cluster
  * \param[in] node      Name of node to check
  *
  * \return true if node can be fenced, false otherwise
  */
 bool
 pe_can_fence(pe_working_set_t *data_set, pe_node_t *node)
 {
     if (pe__is_guest_node(node)) {
         /* Guest nodes are fenced by stopping their container resource. We can
          * do that if the container's host is either online or fenceable.
          */
         pe_resource_t *rsc = node->details->remote_rsc->container;
 
         for (GList *n = rsc->running_on; n != NULL; n = n->next) {
             pe_node_t *container_node = n->data;
 
             if (!container_node->details->online
                 && !pe_can_fence(data_set, container_node)) {
                 return false;
             }
         }
         return true;
 
     } else if(is_not_set(data_set->flags, pe_flag_stonith_enabled)) {
         return false; /* Turned off */
 
     } else if (is_not_set(data_set->flags, pe_flag_have_stonith_resource)) {
         return false; /* No devices */
 
     } else if (is_set(data_set->flags, pe_flag_have_quorum)) {
         return true;
 
     } else if (data_set->no_quorum_policy == no_quorum_ignore) {
         return true;
 
     } else if(node == NULL) {
         return false;
 
     } else if(node->details->online) {
         crm_notice("We can fence %s without quorum because they're in our membership", node->details->uname);
         return true;
     }
 
     crm_trace("Cannot fence %s", node->details->uname);
     return false;
 }
 
 /*!
  * \internal
  * \brief Copy a node object
  *
  * \param[in] this_node  Node object to copy
  *
  * \return Newly allocated shallow copy of this_node
  * \note This function asserts on errors and is guaranteed to return non-NULL.
  */
 pe_node_t *
 pe__copy_node(const pe_node_t *this_node)
 {
     pe_node_t *new_node = NULL;
 
     CRM_ASSERT(this_node != NULL);
 
     new_node = calloc(1, sizeof(pe_node_t));
     CRM_ASSERT(new_node != NULL);
 
     new_node->rsc_discover_mode = this_node->rsc_discover_mode;
     new_node->weight = this_node->weight;
     new_node->fixed = this_node->fixed;
     new_node->details = this_node->details;
 
     return new_node;
 }
 
 /* any node in list1 or list2 and not in the other gets a score of -INFINITY */
 void
 node_list_exclude(GHashTable * hash, GListPtr list, gboolean merge_scores)
 {
     GHashTable *result = hash;
     pe_node_t *other_node = NULL;
     GListPtr gIter = list;
 
     GHashTableIter iter;
     pe_node_t *node = NULL;
 
     g_hash_table_iter_init(&iter, hash);
     while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
 
         other_node = pe_find_node_id(list, node->details->id);
         if (other_node == NULL) {
             node->weight = -INFINITY;
         } else if (merge_scores) {
             node->weight = pe__add_scores(node->weight, other_node->weight);
         }
     }
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_node_t *node = (pe_node_t *) gIter->data;
 
         other_node = pe_hash_table_lookup(result, node->details->id);
 
         if (other_node == NULL) {
             pe_node_t *new_node = pe__copy_node(node);
 
             new_node->weight = -INFINITY;
             g_hash_table_insert(result, (gpointer) new_node->details->id, new_node);
         }
     }
 }
 
 /*!
  * \internal
  * \brief Create a node hash table from a node list
  *
  * \param[in] list  Node list
  *
  * \return Hash table equivalent of node list
  */
 GHashTable *
 pe__node_list2table(GList *list)
 {
     GHashTable *result = NULL;
 
     result = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, free);
     for (GList *gIter = list; gIter != NULL; gIter = gIter->next) {
         pe_node_t *new_node = pe__copy_node((pe_node_t *) gIter->data);
 
         g_hash_table_insert(result, (gpointer) new_node->details->id, new_node);
     }
     return result;
 }
 
 gint
 sort_node_uname(gconstpointer a, gconstpointer b)
 {
     const char *name_a = ((const pe_node_t *) a)->details->uname;
     const char *name_b = ((const pe_node_t *) b)->details->uname;
 
     while (*name_a && *name_b) {
         if (isdigit(*name_a) && isdigit(*name_b)) {
             // If node names contain a number, sort numerically
 
             char *end_a = NULL;
             char *end_b = NULL;
             long num_a = strtol(name_a, &end_a, 10);
             long num_b = strtol(name_b, &end_b, 10);
 
             // allow ordering e.g. 007 > 7
             size_t len_a = end_a - name_a;
             size_t len_b = end_b - name_b;
 
             if (num_a < num_b) {
                 return -1;
             } else if (num_a > num_b) {
                 return 1;
             } else if (len_a < len_b) {
                 return -1;
             } else if (len_a > len_b) {
                 return 1;
             }
             name_a = end_a;
             name_b = end_b;
         } else {
             // Compare non-digits case-insensitively
             int lower_a = tolower(*name_a);
             int lower_b = tolower(*name_b);
 
             if (lower_a < lower_b) {
                 return -1;
             } else if (lower_a > lower_b) {
                 return 1;
             }
             ++name_a;
             ++name_b;
         }
     }
     if (!*name_a && *name_b) {
         return -1;
     } else if (*name_a && !*name_b) {
         return 1;
     }
     return 0;
 }
 
 /*!
  * \internal
  * \brief Output node weights to stdout
  *
  * \param[in] rsc       Use allowed nodes for this resource
  * \param[in] comment   Text description to prefix lines with
  * \param[in] nodes     If rsc is not specified, use these nodes
  */
 static void
 pe__output_node_weights(pe_resource_t *rsc, const char *comment,
                         GHashTable *nodes)
 {
     char score[128]; // Stack-allocated since this is called frequently
 
     // Sort the nodes so the output is consistent for regression tests
     GList *list = g_list_sort(g_hash_table_get_values(nodes), sort_node_uname);
 
     for (GList *gIter = list; gIter != NULL; gIter = gIter->next) {
         pe_node_t *node = (pe_node_t *) gIter->data;
 
         score2char_stack(node->weight, score, sizeof(score));
         if (rsc) {
             printf("%s: %s allocation score on %s: %s\n",
                    comment, rsc->id, node->details->uname, score);
         } else {
             printf("%s: %s = %s\n", comment, node->details->uname, score);
         }
     }
     g_list_free(list);
 }
 
 /*!
  * \internal
  * \brief Log node weights at trace level
  *
  * \param[in] file      Caller's filename
  * \param[in] function  Caller's function name
  * \param[in] line      Caller's line number
  * \param[in] rsc       Use allowed nodes for this resource
  * \param[in] comment   Text description to prefix lines with
  * \param[in] nodes     If rsc is not specified, use these nodes
  */
 static void
 pe__log_node_weights(const char *file, const char *function, int line,
                      pe_resource_t *rsc, const char *comment, GHashTable *nodes)
 {
     GHashTableIter iter;
     pe_node_t *node = NULL;
     char score[128]; // Stack-allocated since this is called frequently
 
     // Don't waste time if we're not tracing at this point
     pcmk__log_else(LOG_TRACE, return);
 
     g_hash_table_iter_init(&iter, nodes);
     while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
         score2char_stack(node->weight, score, sizeof(score));
         if (rsc) {
             qb_log_from_external_source(function, file,
                                         "%s: %s allocation score on %s: %s",
                                         LOG_TRACE, line, 0,
                                         comment, rsc->id,
                                         node->details->uname, score);
         } else {
             qb_log_from_external_source(function, file, "%s: %s = %s",
                                         LOG_TRACE, line, 0,
                                         comment, node->details->uname,
                                         score);
         }
     }
 }
 
 /*!
  * \internal
  * \brief Log or output node weights
  *
  * \param[in] file      Caller's filename
  * \param[in] function  Caller's function name
  * \param[in] line      Caller's line number
  * \param[in] to_log    Log if true, otherwise output
  * \param[in] rsc       Use allowed nodes for this resource
  * \param[in] comment   Text description to prefix lines with
  * \param[in] nodes     If rsc is not specified, use these nodes
  */
 void
 pe__show_node_weights_as(const char *file, const char *function, int line,
                          bool to_log, pe_resource_t *rsc, const char *comment,
                          GHashTable *nodes)
 {
     if (rsc != NULL) {
         if (is_set(rsc->flags, pe_rsc_orphan)) {
             // Don't show allocation scores for orphans
             return;
         }
         nodes = rsc->allowed_nodes;
     }
     if (nodes == NULL) {
         // Nothing to show
         return;
     }
 
     if (to_log) {
         pe__log_node_weights(file, function, line, rsc, comment, nodes);
     } else {
         pe__output_node_weights(rsc, comment, nodes);
     }
 
     // If this resource has children, repeat recursively for each
     if (rsc && rsc->children) {
         for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
             pe_resource_t *child = (pe_resource_t *) gIter->data;
 
             pe__show_node_weights_as(file, function, line, to_log, child,
                                      comment, nodes);
         }
     }
 }
 
 static void
 append_dump_text(gpointer key, gpointer value, gpointer user_data)
 {
     char **dump_text = user_data;
     char *new_text = crm_strdup_printf("%s %s=%s",
                                        *dump_text, (char *)key, (char *)value);
 
     free(*dump_text);
     *dump_text = new_text;
 }
 
 void
 dump_node_capacity(int level, const char *comment, pe_node_t * node)
 {
     char *dump_text = crm_strdup_printf("%s: %s capacity:",
                                         comment, node->details->uname);
 
     g_hash_table_foreach(node->details->utilization, append_dump_text, &dump_text);
 
     if (level == LOG_STDOUT) {
         fprintf(stdout, "%s\n", dump_text);
     } else {
         crm_trace("%s", dump_text);
     }
 
     free(dump_text);
 }
 
 void
 dump_rsc_utilization(int level, const char *comment, pe_resource_t * rsc, pe_node_t * node)
 {
     char *dump_text = crm_strdup_printf("%s: %s utilization on %s:",
                                         comment, rsc->id, node->details->uname);
 
     g_hash_table_foreach(rsc->utilization, append_dump_text, &dump_text);
     switch (level) {
         case LOG_STDOUT:
             fprintf(stdout, "%s\n", dump_text);
             break;
         case LOG_NEVER:
             break;
         default:
             crm_trace("%s", dump_text);
     }
     free(dump_text);
 }
 
 gint
 sort_rsc_index(gconstpointer a, gconstpointer b)
 {
     const pe_resource_t *resource1 = (const pe_resource_t *)a;
     const pe_resource_t *resource2 = (const pe_resource_t *)b;
 
     if (a == NULL && b == NULL) {
         return 0;
     }
     if (a == NULL) {
         return 1;
     }
     if (b == NULL) {
         return -1;
     }
 
     if (resource1->sort_index > resource2->sort_index) {
         return -1;
     }
 
     if (resource1->sort_index < resource2->sort_index) {
         return 1;
     }
 
     return 0;
 }
 
 gint
 sort_rsc_priority(gconstpointer a, gconstpointer b)
 {
     const pe_resource_t *resource1 = (const pe_resource_t *)a;
     const pe_resource_t *resource2 = (const pe_resource_t *)b;
 
     if (a == NULL && b == NULL) {
         return 0;
     }
     if (a == NULL) {
         return 1;
     }
     if (b == NULL) {
         return -1;
     }
 
     if (resource1->priority > resource2->priority) {
         return -1;
     }
 
     if (resource1->priority < resource2->priority) {
         return 1;
     }
 
     return 0;
 }
 
 pe_action_t *
 custom_action(pe_resource_t * rsc, char *key, const char *task,
               pe_node_t * on_node, gboolean optional, gboolean save_action,
               pe_working_set_t * data_set)
 {
     pe_action_t *action = NULL;
     GListPtr possible_matches = NULL;
 
     CRM_CHECK(key != NULL, return NULL);
     CRM_CHECK(task != NULL, free(key); return NULL);
 
     if (save_action && rsc != NULL) {
         possible_matches = find_actions(rsc->actions, key, on_node);
     } else if(save_action) {
 #if 0
         action = g_hash_table_lookup(data_set->singletons, key);
 #else
         /* More expensive but takes 'node' into account */
         possible_matches = find_actions(data_set->actions, key, on_node);
 #endif
     }
 
     if(data_set->singletons == NULL) {
         data_set->singletons = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, NULL);
     }
 
     if (possible_matches != NULL) {
         if (pcmk__list_of_multiple(possible_matches)) {
             pe_warn("Action %s for %s on %s exists %d times",
                     task, rsc ? rsc->id : "<NULL>",
                     on_node ? on_node->details->uname : "<NULL>", g_list_length(possible_matches));
         }
 
         action = g_list_nth_data(possible_matches, 0);
         pe_rsc_trace(rsc, "Found existing action %d (%s) for %s (%s) on %s",
                      action->id, action->uuid,
                      (rsc? rsc->id : "no resource"), task,
                      (on_node? on_node->details->uname : "no node"));
         g_list_free(possible_matches);
     }
 
     if (action == NULL) {
         if (save_action) {
             pe_rsc_trace(rsc, "Creating %s action %d: %s for %s (%s) on %s",
                          (optional? "optional" : "mandatory"),
                          data_set->action_id, key,
                          (rsc? rsc->id : "no resource"), task,
                          (on_node? on_node->details->uname : "no node"));
         }
 
         action = calloc(1, sizeof(pe_action_t));
         if (save_action) {
             action->id = data_set->action_id++;
         } else {
             action->id = 0;
         }
         action->rsc = rsc;
         CRM_ASSERT(task != NULL);
         action->task = strdup(task);
         if (on_node) {
             action->node = pe__copy_node(on_node);
         }
         action->uuid = strdup(key);
 
         if (safe_str_eq(task, CRM_OP_LRM_DELETE)) {
             // Resource history deletion for a node can be done on the DC
             pe_set_action_bit(action, pe_action_dc);
         }
 
         pe_set_action_bit(action, pe_action_runnable);
         if (optional) {
             pe_set_action_bit(action, pe_action_optional);
         } else {
             pe_clear_action_bit(action, pe_action_optional);
         }
 
         action->extra = crm_str_table_new();
         action->meta = crm_str_table_new();
 
         if (save_action) {
             data_set->actions = g_list_prepend(data_set->actions, action);
             if(rsc == NULL) {
                 g_hash_table_insert(data_set->singletons, action->uuid, action);
             }
         }
 
         if (rsc != NULL) {
             action->op_entry = find_rsc_op_entry_helper(rsc, key, TRUE);
 
             unpack_operation(action, action->op_entry, rsc->container, data_set);
 
             if (save_action) {
                 rsc->actions = g_list_prepend(rsc->actions, action);
             }
         }
 
         if (save_action) {
             pe_rsc_trace(rsc, "Action %d created", action->id);
         }
     }
 
     if (!optional && is_set(action->flags, pe_action_optional)) {
         pe_rsc_trace(rsc, "Unset optional on action %d", action->id);
         pe_clear_action_bit(action, pe_action_optional);
     }
 
     if (rsc != NULL) {
         enum action_tasks a_task = text2task(action->task);
         int warn_level = LOG_TRACE;
 
         if (save_action) {
             warn_level = LOG_WARNING;
         }
 
         if (is_set(action->flags, pe_action_have_node_attrs) == FALSE
             && action->node != NULL && action->op_entry != NULL) {
             pe_set_action_bit(action, pe_action_have_node_attrs);
             pe__unpack_dataset_nvpairs(action->op_entry, XML_TAG_ATTR_SETS,
                                        action->node->details->attrs,
                                        action->extra, NULL, FALSE, data_set);
         }
 
         if (is_set(action->flags, pe_action_pseudo)) {
             /* leave untouched */
 
         } else if (action->node == NULL) {
             pe_rsc_trace(rsc, "Unset runnable on %s", action->uuid);
             pe_clear_action_bit(action, pe_action_runnable);
 
         } else if (is_not_set(rsc->flags, pe_rsc_managed)
                    && g_hash_table_lookup(action->meta,
                                           XML_LRM_ATTR_INTERVAL_MS) == NULL) {
             crm_debug("Action %s (unmanaged)", action->uuid);
             pe_rsc_trace(rsc, "Set optional on %s", action->uuid);
             pe_set_action_bit(action, pe_action_optional);
 /*   			action->runnable = FALSE; */
 
         } else if (is_not_set(action->flags, pe_action_dc)
                    && !(action->node->details->online)
                    && (!pe__is_guest_node(action->node)
                        || action->node->details->remote_requires_reset)) {
             pe_clear_action_bit(action, pe_action_runnable);
             do_crm_log(warn_level, "Action %s on %s is unrunnable (offline)",
                        action->uuid, action->node->details->uname);
             if (is_set(action->rsc->flags, pe_rsc_managed)
                 && save_action && a_task == stop_rsc
                 && action->node->details->unclean == FALSE) {
                 pe_fence_node(data_set, action->node, "resource actions are unrunnable", FALSE);
             }
 
         } else if (is_not_set(action->flags, pe_action_dc)
                    && action->node->details->pending) {
             pe_clear_action_bit(action, pe_action_runnable);
             do_crm_log(warn_level, "Action %s on %s is unrunnable (pending)",
                        action->uuid, action->node->details->uname);
 
         } else if (action->needs == rsc_req_nothing) {
             pe_rsc_trace(rsc, "Action %s does not require anything", action->uuid);
             pe_action_set_reason(action, NULL, TRUE);
             if (pe__is_guest_node(action->node)
                 && !pe_can_fence(data_set, action->node)) {
                 /* An action that requires nothing usually does not require any
                  * fencing in order to be runnable. However, there is an
                  * exception: an action cannot be completed if it is on a guest
                  * node whose host is unclean and cannot be fenced.
                  */
                 pe_clear_action_bit(action, pe_action_runnable);
                 crm_debug("%s\t%s (cancelled : host cannot be fenced)",
                           action->node->details->uname, action->uuid);
             } else {
                 pe_set_action_bit(action, pe_action_runnable);
             }
 #if 0
             /*
              * No point checking this
              * - if we don't have quorum we can't stonith anyway
              */
         } else if (action->needs == rsc_req_stonith) {
             crm_trace("Action %s requires only stonith", action->uuid);
             action->runnable = TRUE;
 #endif
         } else if (is_set(data_set->flags, pe_flag_have_quorum) == FALSE
                    && data_set->no_quorum_policy == no_quorum_stop) {
             pe_action_set_flag_reason(__FUNCTION__, __LINE__, action, NULL, "no quorum", pe_action_runnable, TRUE);
             crm_debug("%s\t%s (cancelled : quorum)", action->node->details->uname, action->uuid);
 
         } else if (is_set(data_set->flags, pe_flag_have_quorum) == FALSE
                    && data_set->no_quorum_policy == no_quorum_freeze) {
             pe_rsc_trace(rsc, "Check resource is already active: %s %s %s %s", rsc->id, action->uuid, role2text(rsc->next_role), role2text(rsc->role));
             if (rsc->fns->active(rsc, TRUE) == FALSE || rsc->next_role > rsc->role) {
                 pe_action_set_flag_reason(__FUNCTION__, __LINE__, action, NULL, "quorum freeze", pe_action_runnable, TRUE);
                 pe_rsc_debug(rsc, "%s\t%s (cancelled : quorum freeze)",
                              action->node->details->uname, action->uuid);
             }
 
         } else if(is_not_set(action->flags, pe_action_runnable)) {
             pe_rsc_trace(rsc, "Action %s is runnable", action->uuid);
             //pe_action_set_reason(action, NULL, TRUE);
             pe_set_action_bit(action, pe_action_runnable);
         }
 
         if (save_action) {
             switch (a_task) {
                 case stop_rsc:
                     set_bit(rsc->flags, pe_rsc_stopping);
                     break;
                 case start_rsc:
                     clear_bit(rsc->flags, pe_rsc_starting);
                     if (is_set(action->flags, pe_action_runnable)) {
                         set_bit(rsc->flags, pe_rsc_starting);
                     }
                     break;
                 default:
                     break;
             }
         }
     }
 
     free(key);
     return action;
 }
 
 static const char *
 unpack_operation_on_fail(pe_action_t * action)
 {
 
     const char *value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ON_FAIL);
 
     if (safe_str_eq(action->task, CRMD_ACTION_STOP) && safe_str_eq(value, "standby")) {
         pcmk__config_err("Resetting '" XML_OP_ATTR_ON_FAIL "' for %s stop "
                          "action to default value because 'standby' is not "
                          "allowed for stop", action->rsc->id);
         return NULL;
     } else if (safe_str_eq(action->task, CRMD_ACTION_DEMOTE) && !value) {
         /* demote on_fail defaults to master monitor value if present */
         xmlNode *operation = NULL;
         const char *name = NULL;
         const char *role = NULL;
         const char *on_fail = NULL;
         const char *interval_spec = NULL;
         const char *enabled = NULL;
 
         CRM_CHECK(action->rsc != NULL, return NULL);
 
         for (operation = __xml_first_child_element(action->rsc->ops_xml);
              operation && !value; operation = __xml_next_element(operation)) {
 
             if (!crm_str_eq((const char *)operation->name, "op", TRUE)) {
                 continue;
             }
             name = crm_element_value(operation, "name");
             role = crm_element_value(operation, "role");
             on_fail = crm_element_value(operation, XML_OP_ATTR_ON_FAIL);
             enabled = crm_element_value(operation, "enabled");
             interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
             if (!on_fail) {
                 continue;
             } else if (enabled && !crm_is_true(enabled)) {
                 continue;
             } else if (safe_str_neq(name, "monitor") || safe_str_neq(role, "Master")) {
                 continue;
             } else if (crm_parse_interval_spec(interval_spec) == 0) {
                 continue;
             }
 
             value = on_fail;
         }
     } else if (safe_str_eq(action->task, CRM_OP_LRM_DELETE)) {
         value = "ignore";
     }
 
     return value;
 }
 
 static xmlNode *
 find_min_interval_mon(pe_resource_t * rsc, gboolean include_disabled)
 {
     guint interval_ms = 0;
     guint min_interval_ms = G_MAXUINT;
     const char *name = NULL;
     const char *value = NULL;
     const char *interval_spec = NULL;
     xmlNode *op = NULL;
     xmlNode *operation = NULL;
 
     for (operation = __xml_first_child_element(rsc->ops_xml); operation != NULL;
          operation = __xml_next_element(operation)) {
 
         if (crm_str_eq((const char *)operation->name, "op", TRUE)) {
             name = crm_element_value(operation, "name");
             interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
             value = crm_element_value(operation, "enabled");
             if (!include_disabled && value && crm_is_true(value) == FALSE) {
                 continue;
             }
 
             if (safe_str_neq(name, RSC_STATUS)) {
                 continue;
             }
 
             interval_ms = crm_parse_interval_spec(interval_spec);
 
             if (interval_ms && (interval_ms < min_interval_ms)) {
                 min_interval_ms = interval_ms;
                 op = operation;
             }
         }
     }
 
     return op;
 }
 
 static int
 unpack_start_delay(const char *value, GHashTable *meta)
 {
     int start_delay = 0;
 
     if (value != NULL) {
         start_delay = crm_get_msec(value);
 
         if (start_delay < 0) {
             start_delay = 0;
         }
 
         if (meta) {
             g_hash_table_replace(meta, strdup(XML_OP_ATTR_START_DELAY), crm_itoa(start_delay));
         }
     }
 
     return start_delay;
 }
 
 // true if value contains valid, non-NULL interval origin for recurring op
 static bool
 unpack_interval_origin(const char *value, xmlNode *xml_obj, guint interval_ms,
                        crm_time_t *now, long long *start_delay)
 {
     long long result = 0;
     guint interval_sec = interval_ms / 1000;
     crm_time_t *origin = NULL;
 
     // Ignore unspecified values and non-recurring operations
     if ((value == NULL) || (interval_ms == 0) || (now == NULL)) {
         return false;
     }
 
     // Parse interval origin from text
     origin = crm_time_new(value);
     if (origin == NULL) {
         pcmk__config_err("Ignoring '" XML_OP_ATTR_ORIGIN "' for operation "
                          "'%s' because '%s' is not valid",
                          (ID(xml_obj)? ID(xml_obj) : "(missing ID)"), value);
         return false;
     }
 
     // Get seconds since origin (negative if origin is in the future)
     result = crm_time_get_seconds(now) - crm_time_get_seconds(origin);
     crm_time_free(origin);
 
     // Calculate seconds from closest interval to now
     result = result % interval_sec;
 
     // Calculate seconds remaining until next interval
     result = ((result <= 0)? 0 : interval_sec) - result;
     crm_info("Calculated a start delay of %llds for operation '%s'",
              result,
              (ID(xml_obj)? ID(xml_obj) : "(unspecified)"));
 
     if (start_delay != NULL) {
         *start_delay = result * 1000; // milliseconds
     }
     return true;
 }
 
 static int
 unpack_timeout(const char *value)
 {
     int timeout = crm_get_msec(value);
 
     if (timeout < 0) {
         timeout = crm_get_msec(CRM_DEFAULT_OP_TIMEOUT_S);
     }
     return timeout;
 }
 
 int
 pe_get_configured_timeout(pe_resource_t *rsc, const char *action, pe_working_set_t *data_set)
 {
     xmlNode *child = NULL;
     const char *timeout = NULL;
     int timeout_ms = 0;
 
     for (child = first_named_child(rsc->ops_xml, XML_ATTR_OP);
          child != NULL; child = crm_next_same_xml(child)) {
         if (safe_str_eq(action, crm_element_value(child, XML_NVPAIR_ATTR_NAME))) {
             timeout = crm_element_value(child, XML_ATTR_TIMEOUT);
             break;
         }
     }
 
     if (timeout == NULL && data_set->op_defaults) {
         GHashTable *action_meta = crm_str_table_new();
         pe__unpack_dataset_nvpairs(data_set->op_defaults, XML_TAG_META_SETS,
                                    NULL, action_meta, NULL, FALSE, data_set);
         timeout = g_hash_table_lookup(action_meta, XML_ATTR_TIMEOUT);
     }
 
     // @TODO check meta-attributes (including versioned meta-attributes)
     // @TODO maybe use min-interval monitor timeout as default for monitors
 
     timeout_ms = crm_get_msec(timeout);
     if (timeout_ms < 0) {
         timeout_ms = crm_get_msec(CRM_DEFAULT_OP_TIMEOUT_S);
     }
     return timeout_ms;
 }
 
 #if ENABLE_VERSIONED_ATTRS
 static void
 unpack_versioned_meta(xmlNode *versioned_meta, xmlNode *xml_obj,
                       guint interval_ms, crm_time_t *now)
 {
     xmlNode *attrs = NULL;
     xmlNode *attr = NULL;
 
     for (attrs = __xml_first_child_element(versioned_meta); attrs != NULL;
          attrs = __xml_next_element(attrs)) {
 
         for (attr = __xml_first_child_element(attrs); attr != NULL;
              attr = __xml_next_element(attr)) {
 
             const char *name = crm_element_value(attr, XML_NVPAIR_ATTR_NAME);
             const char *value = crm_element_value(attr, XML_NVPAIR_ATTR_VALUE);
 
             if (safe_str_eq(name, XML_OP_ATTR_START_DELAY)) {
                 int start_delay = unpack_start_delay(value, NULL);
 
                 crm_xml_add_int(attr, XML_NVPAIR_ATTR_VALUE, start_delay);
             } else if (safe_str_eq(name, XML_OP_ATTR_ORIGIN)) {
                 long long start_delay = 0;
 
                 if (unpack_interval_origin(value, xml_obj, interval_ms, now,
                                            &start_delay)) {
                     crm_xml_add(attr, XML_NVPAIR_ATTR_NAME,
                                 XML_OP_ATTR_START_DELAY);
                     crm_xml_add_ll(attr, XML_NVPAIR_ATTR_VALUE, start_delay);
                 }
             } else if (safe_str_eq(name, XML_ATTR_TIMEOUT)) {
                 int timeout = unpack_timeout(value);
 
                 crm_xml_add_int(attr, XML_NVPAIR_ATTR_VALUE, timeout);
             }
         }
     }
 }
 #endif
 
 /*!
  * \brief Unpack operation XML into an action structure
  *
  * Unpack an operation's meta-attributes (normalizing the interval, timeout,
  * and start delay values as integer milliseconds), requirements, and
  * failure policy.
  *
  * \param[in,out] action     Action to unpack into
  * \param[in]     xml_obj    Operation XML (or NULL if all defaults)
  * \param[in]     container  Resource that contains affected resource, if any
  * \param[in]     data_set   Cluster state
  */
 void
 unpack_operation(pe_action_t * action, xmlNode * xml_obj, pe_resource_t * container,
                  pe_working_set_t * data_set)
 {
     guint interval_ms = 0;
     int timeout = 0;
     char *value_ms = NULL;
     const char *value = NULL;
     const char *field = NULL;
     char *default_timeout = NULL;
 #if ENABLE_VERSIONED_ATTRS
     pe_rsc_action_details_t *rsc_details = NULL;
 #endif
 
     CRM_CHECK(action && action->rsc, return);
 
     // Cluster-wide <op_defaults> <meta_attributes>
     pe__unpack_dataset_nvpairs(data_set->op_defaults, XML_TAG_META_SETS, NULL,
                                action->meta, NULL, FALSE, data_set);
 
     // Probe timeouts default differently, so handle timeout default later
     default_timeout = g_hash_table_lookup(action->meta, XML_ATTR_TIMEOUT);
     if (default_timeout) {
         default_timeout = strdup(default_timeout);
         g_hash_table_remove(action->meta, XML_ATTR_TIMEOUT);
     }
 
     if (xml_obj) {
         xmlAttrPtr xIter = NULL;
 
         // <op> <meta_attributes> take precedence over defaults
         pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_META_SETS, NULL,
                                    action->meta, NULL, TRUE, data_set);
 
 #if ENABLE_VERSIONED_ATTRS
         rsc_details = pe_rsc_action_details(action);
         pe_unpack_versioned_attributes(data_set->input, xml_obj,
                                        XML_TAG_ATTR_SETS, NULL,
                                        rsc_details->versioned_parameters,
                                        data_set->now, NULL);
         pe_unpack_versioned_attributes(data_set->input, xml_obj,
                                        XML_TAG_META_SETS, NULL,
                                        rsc_details->versioned_meta,
                                        data_set->now, NULL);
 #endif
 
         /* Anything set as an <op> XML property has highest precedence.
          * This ensures we use the name and interval from the <op> tag.
          */
         for (xIter = xml_obj->properties; xIter; xIter = xIter->next) {
             const char *prop_name = (const char *)xIter->name;
             const char *prop_value = crm_element_value(xml_obj, prop_name);
 
             g_hash_table_replace(action->meta, strdup(prop_name), strdup(prop_value));
         }
     }
 
     g_hash_table_remove(action->meta, "id");
 
     // Normalize interval to milliseconds
     field = XML_LRM_ATTR_INTERVAL;
     value = g_hash_table_lookup(action->meta, field);
     if (value != NULL) {
         interval_ms = crm_parse_interval_spec(value);
 
     } else if ((xml_obj == NULL) && !strcmp(action->task, RSC_STATUS)) {
         /* An orphaned recurring monitor will not have any XML. However, we
          * want the interval to be set, so the action can be properly detected
          * as a recurring monitor. Parse it from the key in this case.
          */
         parse_op_key(action->uuid, NULL, NULL, &interval_ms);
     }
     if (interval_ms > 0) {
         value_ms = crm_strdup_printf("%u", interval_ms);
         g_hash_table_replace(action->meta, strdup(field), value_ms);
 
     } else if (value) {
         g_hash_table_remove(action->meta, field);
     }
 
     // Handle timeout default, now that we know the interval
     if (g_hash_table_lookup(action->meta, XML_ATTR_TIMEOUT)) {
         free(default_timeout);
 
     } else {
         // Probe timeouts default to minimum-interval monitor's
         if (safe_str_eq(action->task, RSC_STATUS) && (interval_ms == 0)) {
 
             xmlNode *min_interval_mon = find_min_interval_mon(action->rsc, FALSE);
 
             if (min_interval_mon) {
                 value = crm_element_value(min_interval_mon, XML_ATTR_TIMEOUT);
                 if (value) {
                     crm_trace("\t%s defaults to minimum-interval monitor's timeout '%s'",
                               action->uuid, value);
                     free(default_timeout);
                     default_timeout = strdup(value);
                 }
             }
         }
 
         if (default_timeout) {
             g_hash_table_insert(action->meta, strdup(XML_ATTR_TIMEOUT),
                                 default_timeout);
         }
     }
 
     if (safe_str_neq(action->task, RSC_START)
         && safe_str_neq(action->task, RSC_PROMOTE)) {
         action->needs = rsc_req_nothing;
         value = "nothing (not start/promote)";
 
     } else if (is_set(action->rsc->flags, pe_rsc_needs_fencing)) {
         action->needs = rsc_req_stonith;
         value = "fencing (resource)";
 
     } else if (is_set(action->rsc->flags, pe_rsc_needs_quorum)) {
         action->needs = rsc_req_quorum;
         value = "quorum (resource)";
 
     } else {
         action->needs = rsc_req_nothing;
         value = "nothing (resource)";
     }
 
     pe_rsc_trace(action->rsc, "\tAction %s requires: %s", action->uuid, value);
 
     value = unpack_operation_on_fail(action);
 
     if (value == NULL) {
 
     } else if (safe_str_eq(value, "block")) {
         action->on_fail = action_fail_block;
         g_hash_table_insert(action->meta, strdup(XML_OP_ATTR_ON_FAIL), strdup("block"));
         value = "block"; // The above could destroy the original string
 
     } else if (safe_str_eq(value, "fence")) {
         action->on_fail = action_fail_fence;
         value = "node fencing";
 
         if (is_set(data_set->flags, pe_flag_stonith_enabled) == FALSE) {
             pcmk__config_err("Resetting '" XML_OP_ATTR_ON_FAIL "' for "
                              "operation '%s' to 'stop' because 'fence' is not "
                              "valid when fencing is disabled", action->uuid);
             action->on_fail = action_fail_stop;
             action->fail_role = RSC_ROLE_STOPPED;
             value = "stop resource";
         }
 
     } else if (safe_str_eq(value, "standby")) {
         action->on_fail = action_fail_standby;
         value = "node standby";
 
     } else if (safe_str_eq(value, "ignore")
                || safe_str_eq(value, "nothing")) {
         action->on_fail = action_fail_ignore;
         value = "ignore";
 
     } else if (safe_str_eq(value, "migrate")) {
         action->on_fail = action_fail_migrate;
         value = "force migration";
 
     } else if (safe_str_eq(value, "stop")) {
         action->on_fail = action_fail_stop;
         action->fail_role = RSC_ROLE_STOPPED;
         value = "stop resource";
 
     } else if (safe_str_eq(value, "restart")) {
         action->on_fail = action_fail_recover;
         value = "restart (and possibly migrate)";
 
     } else if (safe_str_eq(value, "restart-container")) {
         if (container) {
             action->on_fail = action_fail_restart_container;
             value = "restart container (and possibly migrate)";
 
         } else {
             value = NULL;
         }
 
     } else {
         pe_err("Resource %s: Unknown failure type (%s)", action->rsc->id, value);
         value = NULL;
     }
 
     /* defaults */
     if (value == NULL && container) {
         action->on_fail = action_fail_restart_container;
         value = "restart container (and possibly migrate) (default)";
 
     /* For remote nodes, ensure that any failure that results in dropping an
      * active connection to the node results in fencing of the node.
      *
      * There are only two action failures that don't result in fencing.
      * 1. probes - probe failures are expected.
      * 2. start - a start failure indicates that an active connection does not already
      * exist. The user can set op on-fail=fence if they really want to fence start
      * failures. */
     } else if (((value == NULL) || !is_set(action->rsc->flags, pe_rsc_managed)) &&
                 (pe__resource_is_remote_conn(action->rsc, data_set) &&
                !(safe_str_eq(action->task, CRMD_ACTION_STATUS) && (interval_ms == 0)) &&
                 (safe_str_neq(action->task, CRMD_ACTION_START)))) {
 
         if (!is_set(action->rsc->flags, pe_rsc_managed)) {
             action->on_fail = action_fail_stop;
             action->fail_role = RSC_ROLE_STOPPED;
             value = "stop unmanaged remote node (enforcing default)";
 
         } else {
             if (is_set(data_set->flags, pe_flag_stonith_enabled)) {
                 value = "fence remote node (default)";
             } else {
                 value = "recover remote node connection (default)";
             }
 
             if (action->rsc->remote_reconnect_ms) {
                 action->fail_role = RSC_ROLE_STOPPED;
             }
             action->on_fail = action_fail_reset_remote;
         }
 
     } else if (value == NULL && safe_str_eq(action->task, CRMD_ACTION_STOP)) {
         if (is_set(data_set->flags, pe_flag_stonith_enabled)) {
             action->on_fail = action_fail_fence;
             value = "resource fence (default)";
 
         } else {
             action->on_fail = action_fail_block;
             value = "resource block (default)";
         }
 
     } else if (value == NULL) {
         action->on_fail = action_fail_recover;
         value = "restart (and possibly migrate) (default)";
     }
 
     pe_rsc_trace(action->rsc, "\t%s failure handling: %s", action->task, value);
 
     value = NULL;
     if (xml_obj != NULL) {
         value = g_hash_table_lookup(action->meta, "role_after_failure");
         if (value) {
             pe_warn_once(pe_wo_role_after,
                         "Support for role_after_failure is deprecated and will be removed in a future release");
         }
     }
     if (value != NULL && action->fail_role == RSC_ROLE_UNKNOWN) {
         action->fail_role = text2role(value);
     }
     /* defaults */
     if (action->fail_role == RSC_ROLE_UNKNOWN) {
         if (safe_str_eq(action->task, CRMD_ACTION_PROMOTE)) {
             action->fail_role = RSC_ROLE_SLAVE;
         } else {
             action->fail_role = RSC_ROLE_STARTED;
         }
     }
     pe_rsc_trace(action->rsc, "\t%s failure results in: %s", action->task,
                  role2text(action->fail_role));
 
     value = g_hash_table_lookup(action->meta, XML_OP_ATTR_START_DELAY);
     if (value) {
         unpack_start_delay(value, action->meta);
     } else {
         long long start_delay = 0;
 
         value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ORIGIN);
         if (unpack_interval_origin(value, xml_obj, interval_ms, data_set->now,
                                    &start_delay)) {
             g_hash_table_replace(action->meta, strdup(XML_OP_ATTR_START_DELAY),
                                  crm_strdup_printf("%lld", start_delay));
         }
     }
 
     value = g_hash_table_lookup(action->meta, XML_ATTR_TIMEOUT);
     timeout = unpack_timeout(value);
     g_hash_table_replace(action->meta, strdup(XML_ATTR_TIMEOUT), crm_itoa(timeout));
 
 #if ENABLE_VERSIONED_ATTRS
     unpack_versioned_meta(rsc_details->versioned_meta, xml_obj, interval_ms,
                           data_set->now);
 #endif
 }
 
 static xmlNode *
 find_rsc_op_entry_helper(pe_resource_t * rsc, const char *key, gboolean include_disabled)
 {
     guint interval_ms = 0;
     gboolean do_retry = TRUE;
     char *local_key = NULL;
     const char *name = NULL;
     const char *value = NULL;
     const char *interval_spec = NULL;
     char *match_key = NULL;
     xmlNode *op = NULL;
     xmlNode *operation = NULL;
 
   retry:
     for (operation = __xml_first_child_element(rsc->ops_xml); operation != NULL;
          operation = __xml_next_element(operation)) {
         if (crm_str_eq((const char *)operation->name, "op", TRUE)) {
             name = crm_element_value(operation, "name");
             interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
             value = crm_element_value(operation, "enabled");
             if (!include_disabled && value && crm_is_true(value) == FALSE) {
                 continue;
             }
 
             interval_ms = crm_parse_interval_spec(interval_spec);
             match_key = pcmk__op_key(rsc->id, name, interval_ms);
             if (safe_str_eq(key, match_key)) {
                 op = operation;
             }
             free(match_key);
 
             if (rsc->clone_name) {
                 match_key = pcmk__op_key(rsc->clone_name, name, interval_ms);
                 if (safe_str_eq(key, match_key)) {
                     op = operation;
                 }
                 free(match_key);
             }
 
             if (op != NULL) {
                 free(local_key);
                 return op;
             }
         }
     }
 
     free(local_key);
     if (do_retry == FALSE) {
         return NULL;
     }
 
     do_retry = FALSE;
     if (strstr(key, CRMD_ACTION_MIGRATE) || strstr(key, CRMD_ACTION_MIGRATED)) {
         local_key = pcmk__op_key(rsc->id, "migrate", 0);
         key = local_key;
         goto retry;
 
     } else if (strstr(key, "_notify_")) {
         local_key = pcmk__op_key(rsc->id, "notify", 0);
         key = local_key;
         goto retry;
     }
 
     return NULL;
 }
 
 xmlNode *
 find_rsc_op_entry(pe_resource_t * rsc, const char *key)
 {
     return find_rsc_op_entry_helper(rsc, key, FALSE);
 }
 
 void
 print_node(const char *pre_text, pe_node_t * node, gboolean details)
 {
     if (node == NULL) {
         crm_trace("%s%s: <NULL>", pre_text == NULL ? "" : pre_text, pre_text == NULL ? "" : ": ");
         return;
     }
 
     CRM_ASSERT(node->details);
     crm_trace("%s%s%sNode %s: (weight=%d, fixed=%s)",
               pre_text == NULL ? "" : pre_text,
               pre_text == NULL ? "" : ": ",
               node->details->online ? "" : "Unavailable/Unclean ",
               node->details->uname, node->weight, node->fixed ? "True" : "False");
 
     if (details) {
         int log_level = LOG_TRACE;
 
         char *pe_mutable = strdup("\t\t");
         GListPtr gIter = node->details->running_rsc;
 
         crm_trace("\t\t===Node Attributes");
         g_hash_table_foreach(node->details->attrs, print_str_str, pe_mutable);
         free(pe_mutable);
 
         crm_trace("\t\t=== Resources");
 
         for (; gIter != NULL; gIter = gIter->next) {
             pe_resource_t *rsc = (pe_resource_t *) gIter->data;
 
             rsc->fns->print(rsc, "\t\t", pe_print_log|pe_print_pending,
                             &log_level);
         }
     }
 }
 
 /*
  * Used by the HashTable for-loop
  */
 void
 print_str_str(gpointer key, gpointer value, gpointer user_data)
 {
     crm_trace("%s%s %s ==> %s",
               user_data == NULL ? "" : (char *)user_data,
               user_data == NULL ? "" : ": ", (char *)key, (char *)value);
 }
 
 void
 pe_free_action(pe_action_t * action)
 {
     if (action == NULL) {
         return;
     }
     g_list_free_full(action->actions_before, free);     /* pe_action_wrapper_t* */
     g_list_free_full(action->actions_after, free);      /* pe_action_wrapper_t* */
     if (action->extra) {
         g_hash_table_destroy(action->extra);
     }
     if (action->meta) {
         g_hash_table_destroy(action->meta);
     }
 #if ENABLE_VERSIONED_ATTRS
     if (action->rsc) {
         pe_free_rsc_action_details(action);
     }
 #endif
     free(action->cancel_task);
     free(action->reason);
     free(action->task);
     free(action->uuid);
     free(action->node);
     free(action);
 }
 
 GListPtr
 find_recurring_actions(GListPtr input, pe_node_t * not_on_node)
 {
     const char *value = NULL;
     GListPtr result = NULL;
     GListPtr gIter = input;
 
     CRM_CHECK(input != NULL, return NULL);
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_action_t *action = (pe_action_t *) gIter->data;
 
         value = g_hash_table_lookup(action->meta, XML_LRM_ATTR_INTERVAL_MS);
         if (value == NULL) {
             /* skip */
         } else if (safe_str_eq(value, "0")) {
             /* skip */
         } else if (safe_str_eq(CRMD_ACTION_CANCEL, action->task)) {
             /* skip */
         } else if (not_on_node == NULL) {
             crm_trace("(null) Found: %s", action->uuid);
             result = g_list_prepend(result, action);
 
         } else if (action->node == NULL) {
             /* skip */
         } else if (action->node->details != not_on_node->details) {
             crm_trace("Found: %s", action->uuid);
             result = g_list_prepend(result, action);
         }
     }
 
     return result;
 }
 
 enum action_tasks
 get_complex_task(pe_resource_t * rsc, const char *name, gboolean allow_non_atomic)
 {
     enum action_tasks task = text2task(name);
 
     if (rsc == NULL) {
         return task;
 
     } else if (allow_non_atomic == FALSE || rsc->variant == pe_native) {
         switch (task) {
             case stopped_rsc:
             case started_rsc:
             case action_demoted:
             case action_promoted:
                 crm_trace("Folding %s back into its atomic counterpart for %s", name, rsc->id);
                 return task - 1;
                 break;
             default:
                 break;
         }
     }
     return task;
 }
 
 pe_action_t *
 find_first_action(GListPtr input, const char *uuid, const char *task, pe_node_t * on_node)
 {
     GListPtr gIter = NULL;
 
     CRM_CHECK(uuid || task, return NULL);
 
     for (gIter = input; gIter != NULL; gIter = gIter->next) {
         pe_action_t *action = (pe_action_t *) gIter->data;
 
         if (uuid != NULL && safe_str_neq(uuid, action->uuid)) {
             continue;
 
         } else if (task != NULL && safe_str_neq(task, action->task)) {
             continue;
 
         } else if (on_node == NULL) {
             return action;
 
         } else if (action->node == NULL) {
             continue;
 
         } else if (on_node->details == action->node->details) {
             return action;
         }
     }
 
     return NULL;
 }
 
 GListPtr
 find_actions(GListPtr input, const char *key, const pe_node_t *on_node)
 {
     GListPtr gIter = input;
     GListPtr result = NULL;
 
     CRM_CHECK(key != NULL, return NULL);
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_action_t *action = (pe_action_t *) gIter->data;
 
         if (safe_str_neq(key, action->uuid)) {
             crm_trace("%s does not match action %s", key, action->uuid);
             continue;
 
         } else if (on_node == NULL) {
             crm_trace("Action %s matches (ignoring node)", key);
             result = g_list_prepend(result, action);
 
         } else if (action->node == NULL) {
             crm_trace("Action %s matches (unallocated, assigning to %s)",
                       key, on_node->details->uname);
 
             action->node = pe__copy_node(on_node);
             result = g_list_prepend(result, action);
 
         } else if (on_node->details == action->node->details) {
             crm_trace("Action %s on %s matches", key, on_node->details->uname);
             result = g_list_prepend(result, action);
 
         } else {
             crm_trace("Action %s on node %s does not match requested node %s",
                       key, action->node->details->uname,
                       on_node->details->uname);
         }
     }
 
     return result;
 }
 
 GList *
 find_actions_exact(GList *input, const char *key, const pe_node_t *on_node)
 {
     GList *result = NULL;
 
     CRM_CHECK(key != NULL, return NULL);
 
     if (on_node == NULL) {
         crm_trace("Not searching for action %s because node not specified",
                   key);
         return NULL;
     }
 
     for (GList *gIter = input; gIter != NULL; gIter = gIter->next) {
         pe_action_t *action = (pe_action_t *) gIter->data;
 
         if (action->node == NULL) {
             crm_trace("Skipping comparison of %s vs action %s without node",
                       key, action->uuid);
 
         } else if (safe_str_neq(key, action->uuid)) {
             crm_trace("Desired action %s doesn't match %s", key, action->uuid);
 
         } else if (safe_str_neq(on_node->details->id,
                                 action->node->details->id)) {
             crm_trace("Action %s desired node ID %s doesn't match %s",
                       key, on_node->details->id, action->node->details->id);
 
         } else {
             crm_trace("Action %s matches", key);
             result = g_list_prepend(result, action);
         }
     }
 
     return result;
 }
 
 /*!
  * \brief Find all actions of given type for a resource
  *
  * \param[in] rsc           Resource to search
  * \param[in] node          Find only actions scheduled on this node
  * \param[in] task          Action name to search for
  * \param[in] require_node  If TRUE, NULL node or action node will not match
  *
  * \return List of actions found (or NULL if none)
  * \note If node is not NULL and require_node is FALSE, matching actions
  *       without a node will be assigned to node.
  */
 GList *
 pe__resource_actions(const pe_resource_t *rsc, const pe_node_t *node,
                      const char *task, bool require_node)
 {
     GList *result = NULL;
     char *key = pcmk__op_key(rsc->id, task, 0);
 
     if (require_node) {
         result = find_actions_exact(rsc->actions, key, node);
     } else {
         result = find_actions(rsc->actions, key, node);
     }
     free(key);
     return result;
 }
 
 static void
 resource_node_score(pe_resource_t * rsc, pe_node_t * node, int score, const char *tag)
 {
     pe_node_t *match = NULL;
 
     if ((rsc->exclusive_discover || (node->rsc_discover_mode == pe_discover_never))
         && safe_str_eq(tag, "symmetric_default")) {
         /* This string comparision may be fragile, but exclusive resources and
          * exclusive nodes should not have the symmetric_default constraint
          * applied to them.
          */
         return;
 
     } else if (rsc->children) {
         GListPtr gIter = rsc->children;
 
         for (; gIter != NULL; gIter = gIter->next) {
             pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
             resource_node_score(child_rsc, node, score, tag);
         }
     }
 
     pe_rsc_trace(rsc, "Setting %s for %s on %s: %d", tag, rsc->id, node->details->uname, score);
     match = pe_hash_table_lookup(rsc->allowed_nodes, node->details->id);
     if (match == NULL) {
         match = pe__copy_node(node);
         g_hash_table_insert(rsc->allowed_nodes, (gpointer) match->details->id, match);
     }
     match->weight = pe__add_scores(match->weight, score);
 }
 
 void
 resource_location(pe_resource_t * rsc, pe_node_t * node, int score, const char *tag,
                   pe_working_set_t * data_set)
 {
     if (node != NULL) {
         resource_node_score(rsc, node, score, tag);
 
     } else if (data_set != NULL) {
         GListPtr gIter = data_set->nodes;
 
         for (; gIter != NULL; gIter = gIter->next) {
             pe_node_t *node_iter = (pe_node_t *) gIter->data;
 
             resource_node_score(rsc, node_iter, score, tag);
         }
 
     } else {
         GHashTableIter iter;
         pe_node_t *node_iter = NULL;
 
         g_hash_table_iter_init(&iter, rsc->allowed_nodes);
         while (g_hash_table_iter_next(&iter, NULL, (void **)&node_iter)) {
             resource_node_score(rsc, node_iter, score, tag);
         }
     }
 
     if (node == NULL && score == -INFINITY) {
         if (rsc->allocated_to) {
             crm_info("Deallocating %s from %s", rsc->id, rsc->allocated_to->details->uname);
             free(rsc->allocated_to);
             rsc->allocated_to = NULL;
         }
     }
 }
 
 #define sort_return(an_int, why) do {					\
 	free(a_uuid);						\
 	free(b_uuid);						\
 	crm_trace("%s (%d) %c %s (%d) : %s",				\
 		  a_xml_id, a_call_id, an_int>0?'>':an_int<0?'<':'=',	\
 		  b_xml_id, b_call_id, why);				\
 	return an_int;							\
     } while(0)
 
 gint
 sort_op_by_callid(gconstpointer a, gconstpointer b)
 {
     int a_call_id = -1;
     int b_call_id = -1;
 
     char *a_uuid = NULL;
     char *b_uuid = NULL;
 
     const xmlNode *xml_a = a;
     const xmlNode *xml_b = b;
 
     const char *a_xml_id = crm_element_value(xml_a, XML_ATTR_ID);
     const char *b_xml_id = crm_element_value(xml_b, XML_ATTR_ID);
 
     if (safe_str_eq(a_xml_id, b_xml_id)) {
         /* We have duplicate lrm_rsc_op entries in the status
          * section which is unlikely to be a good thing
          *    - we can handle it easily enough, but we need to get
          *    to the bottom of why it's happening.
          */
         pe_err("Duplicate lrm_rsc_op entries named %s", a_xml_id);
         sort_return(0, "duplicate");
     }
 
     crm_element_value_int(xml_a, XML_LRM_ATTR_CALLID, &a_call_id);
     crm_element_value_int(xml_b, XML_LRM_ATTR_CALLID, &b_call_id);
 
     if (a_call_id == -1 && b_call_id == -1) {
         /* both are pending ops so it doesn't matter since
          *   stops are never pending
          */
         sort_return(0, "pending");
 
     } else if (a_call_id >= 0 && a_call_id < b_call_id) {
         sort_return(-1, "call id");
 
     } else if (b_call_id >= 0 && a_call_id > b_call_id) {
         sort_return(1, "call id");
 
     } else if (b_call_id >= 0 && a_call_id == b_call_id) {
         /*
          * The op and last_failed_op are the same
          * Order on last-rc-change
          */
         time_t last_a = -1;
         time_t last_b = -1;
 
         crm_element_value_epoch(xml_a, XML_RSC_OP_LAST_CHANGE, &last_a);
         crm_element_value_epoch(xml_b, XML_RSC_OP_LAST_CHANGE, &last_b);
 
         crm_trace("rc-change: %lld vs %lld",
                   (long long) last_a, (long long) last_b);
         if (last_a >= 0 && last_a < last_b) {
             sort_return(-1, "rc-change");
 
         } else if (last_b >= 0 && last_a > last_b) {
             sort_return(1, "rc-change");
         }
         sort_return(0, "rc-change");
 
     } else {
         /* One of the inputs is a pending operation
          * Attempt to use XML_ATTR_TRANSITION_MAGIC to determine its age relative to the other
          */
 
         int a_id = -1;
         int b_id = -1;
 
         const char *a_magic = crm_element_value(xml_a, XML_ATTR_TRANSITION_MAGIC);
         const char *b_magic = crm_element_value(xml_b, XML_ATTR_TRANSITION_MAGIC);
 
         CRM_CHECK(a_magic != NULL && b_magic != NULL, sort_return(0, "No magic"));
         if (!decode_transition_magic(a_magic, &a_uuid, &a_id, NULL, NULL, NULL,
                                      NULL)) {
             sort_return(0, "bad magic a");
         }
         if (!decode_transition_magic(b_magic, &b_uuid, &b_id, NULL, NULL, NULL,
                                      NULL)) {
             sort_return(0, "bad magic b");
         }
         /* try to determine the relative age of the operation...
          * some pending operations (e.g. a start) may have been superseded
          *   by a subsequent stop
          *
          * [a|b]_id == -1 means it's a shutdown operation and _always_ comes last
          */
         if (safe_str_neq(a_uuid, b_uuid) || a_id == b_id) {
             /*
              * some of the logic in here may be redundant...
              *
              * if the UUID from the TE doesn't match then one better
              *   be a pending operation.
              * pending operations don't survive between elections and joins
              *   because we query the LRM directly
              */
 
             if (b_call_id == -1) {
                 sort_return(-1, "transition + call");
 
             } else if (a_call_id == -1) {
                 sort_return(1, "transition + call");
             }
 
         } else if ((a_id >= 0 && a_id < b_id) || b_id == -1) {
             sort_return(-1, "transition");
 
         } else if ((b_id >= 0 && a_id > b_id) || a_id == -1) {
             sort_return(1, "transition");
         }
     }
 
     /* we should never end up here */
     CRM_CHECK(FALSE, sort_return(0, "default"));
 
 }
 
 time_t
 get_effective_time(pe_working_set_t * data_set)
 {
     if(data_set) {
         if (data_set->now == NULL) {
             crm_trace("Recording a new 'now'");
             data_set->now = crm_time_new(NULL);
         }
         return crm_time_get_seconds_since_epoch(data_set->now);
     }
 
     crm_trace("Defaulting to 'now'");
     return time(NULL);
 }
 
 gboolean
 get_target_role(pe_resource_t * rsc, enum rsc_role_e * role)
 {
     enum rsc_role_e local_role = RSC_ROLE_UNKNOWN;
     const char *value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
 
     CRM_CHECK(role != NULL, return FALSE);
 
     if (value == NULL || safe_str_eq("started", value)
         || safe_str_eq("default", value)) {
         return FALSE;
     }
 
     local_role = text2role(value);
     if (local_role == RSC_ROLE_UNKNOWN) {
         pcmk__config_err("Ignoring '" XML_RSC_ATTR_TARGET_ROLE "' for %s "
                          "because '%s' is not valid", rsc->id, value);
         return FALSE;
 
     } else if (local_role > RSC_ROLE_STARTED) {
         if (is_set(uber_parent(rsc)->flags, pe_rsc_promotable)) {
             if (local_role > RSC_ROLE_SLAVE) {
                 /* This is what we'd do anyway, just leave the default to avoid messing up the placement algorithm */
                 return FALSE;
             }
 
         } else {
             pcmk__config_err("Ignoring '" XML_RSC_ATTR_TARGET_ROLE "' for %s "
                              "because '%s' only makes sense for promotable "
                              "clones", rsc->id, value);
             return FALSE;
         }
     }
 
     *role = local_role;
     return TRUE;
 }
 
 gboolean
 order_actions(pe_action_t * lh_action, pe_action_t * rh_action, enum pe_ordering order)
 {
     GListPtr gIter = NULL;
     pe_action_wrapper_t *wrapper = NULL;
     GListPtr list = NULL;
 
     if (order == pe_order_none) {
         return FALSE;
     }
 
     if (lh_action == NULL || rh_action == NULL) {
         return FALSE;
     }
 
     crm_trace("Ordering Action %s before %s", lh_action->uuid, rh_action->uuid);
 
     /* Ensure we never create a dependency on ourselves... it's happened */
     CRM_ASSERT(lh_action != rh_action);
 
     /* Filter dups, otherwise update_action_states() has too much work to do */
     gIter = lh_action->actions_after;
     for (; gIter != NULL; gIter = gIter->next) {
         pe_action_wrapper_t *after = (pe_action_wrapper_t *) gIter->data;
 
         if (after->action == rh_action && (after->type & order)) {
             return FALSE;
         }
     }
 
     wrapper = calloc(1, sizeof(pe_action_wrapper_t));
     wrapper->action = rh_action;
     wrapper->type = order;
 
     list = lh_action->actions_after;
     list = g_list_prepend(list, wrapper);
     lh_action->actions_after = list;
 
     wrapper = NULL;
 
 /* 	order |= pe_order_implies_then; */
 /* 	order ^= pe_order_implies_then; */
 
     wrapper = calloc(1, sizeof(pe_action_wrapper_t));
     wrapper->action = lh_action;
     wrapper->type = order;
     list = rh_action->actions_before;
     list = g_list_prepend(list, wrapper);
     rh_action->actions_before = list;
     return TRUE;
 }
 
 pe_action_t *
 get_pseudo_op(const char *name, pe_working_set_t * data_set)
 {
     pe_action_t *op = NULL;
 
     if(data_set->singletons) {
         op = g_hash_table_lookup(data_set->singletons, name);
     }
     if (op == NULL) {
         op = custom_action(NULL, strdup(name), name, NULL, TRUE, TRUE, data_set);
         set_bit(op->flags, pe_action_pseudo);
         set_bit(op->flags, pe_action_runnable);
     }
 
     return op;
 }
 
 void
 destroy_ticket(gpointer data)
 {
     pe_ticket_t *ticket = data;
 
     if (ticket->state) {
         g_hash_table_destroy(ticket->state);
     }
     free(ticket->id);
     free(ticket);
 }
 
 pe_ticket_t *
 ticket_new(const char *ticket_id, pe_working_set_t * data_set)
 {
     pe_ticket_t *ticket = NULL;
 
     if (ticket_id == NULL || strlen(ticket_id) == 0) {
         return NULL;
     }
 
     if (data_set->tickets == NULL) {
         data_set->tickets =
             g_hash_table_new_full(crm_str_hash, g_str_equal, free,
                                   destroy_ticket);
     }
 
     ticket = g_hash_table_lookup(data_set->tickets, ticket_id);
     if (ticket == NULL) {
 
         ticket = calloc(1, sizeof(pe_ticket_t));
         if (ticket == NULL) {
             crm_err("Cannot allocate ticket '%s'", ticket_id);
             return NULL;
         }
 
         crm_trace("Creaing ticket entry for %s", ticket_id);
 
         ticket->id = strdup(ticket_id);
         ticket->granted = FALSE;
         ticket->last_granted = -1;
         ticket->standby = FALSE;
         ticket->state = crm_str_table_new();
 
         g_hash_table_insert(data_set->tickets, strdup(ticket->id), ticket);
     }
 
     return ticket;
 }
 
 static void
 filter_parameters(xmlNode * param_set, const char *param_string, bool need_present)
 {
     if (param_set && param_string) {
         xmlAttrPtr xIter = param_set->properties;
 
         while (xIter) {
             const char *prop_name = (const char *)xIter->name;
             char *name = crm_strdup_printf(" %s ", prop_name);
             char *match = strstr(param_string, name);
 
             free(name);
 
             //  Do now, because current entry might get removed below
             xIter = xIter->next;
 
             if (need_present && match == NULL) {
                 crm_trace("%s not found in %s", prop_name, param_string);
                 xml_remove_prop(param_set, prop_name);
 
             } else if (need_present == FALSE && match) {
                 crm_trace("%s found in %s", prop_name, param_string);
                 xml_remove_prop(param_set, prop_name);
             }
         }
     }
 }
 
 #if ENABLE_VERSIONED_ATTRS
 static void
 append_versioned_params(xmlNode *versioned_params, const char *ra_version, xmlNode *params)
 {
     GHashTable *hash = pe_unpack_versioned_parameters(versioned_params, ra_version);
     char *key = NULL;
     char *value = NULL;
     GHashTableIter iter;
 
     g_hash_table_iter_init(&iter, hash);
     while (g_hash_table_iter_next(&iter, (gpointer *) &key, (gpointer *) &value)) {
         crm_xml_add(params, key, value);
     }
     g_hash_table_destroy(hash);
 }
 #endif
 
 /*!
  * \internal
  * \brief Calculate action digests and store in node's digest cache
  *
  * \param[in] rsc          Resource that action was for
  * \param[in] task         Name of action performed
  * \param[in] key          Action's task key
  * \param[in] node         Node action was performed on
  * \param[in] xml_op       XML of operation in CIB status (if available)
  * \param[in] calc_secure  Whether to calculate secure digest
  * \param[in] data_set     Cluster working set
  *
  * \return Pointer to node's digest cache entry
  */
 static op_digest_cache_t *
 rsc_action_digest(pe_resource_t *rsc, const char *task, const char *key,
                   pe_node_t *node, xmlNode *xml_op, bool calc_secure,
                   pe_working_set_t *data_set)
 {
     op_digest_cache_t *data = NULL;
 
     data = g_hash_table_lookup(node->details->digest_cache, key);
     if (data == NULL) {
         GHashTable *local_rsc_params = crm_str_table_new();
         pe_action_t *action = custom_action(rsc, strdup(key), task, node, TRUE, FALSE, data_set);
 #if ENABLE_VERSIONED_ATTRS
         xmlNode *local_versioned_params = create_xml_node(NULL, XML_TAG_RSC_VER_ATTRS);
         const char *ra_version = NULL;
 #endif
 
         const char *op_version;
         const char *restart_list = NULL;
         const char *secure_list = " passwd password ";
 
         data = calloc(1, sizeof(op_digest_cache_t));
         CRM_ASSERT(data != NULL);
 
         get_rsc_attributes(local_rsc_params, rsc, node, data_set);
 #if ENABLE_VERSIONED_ATTRS
         pe_get_versioned_attributes(local_versioned_params, rsc, node, data_set);
 #endif
 
         data->params_all = create_xml_node(NULL, XML_TAG_PARAMS);
 
         // REMOTE_CONTAINER_HACK: Allow remote nodes that start containers with pacemaker remote inside
         if (pe__add_bundle_remote_name(rsc, data->params_all,
                                        XML_RSC_ATTR_REMOTE_RA_ADDR)) {
             crm_trace("Set address for bundle connection %s (on %s)",
                       rsc->id, node->details->uname);
         }
 
         g_hash_table_foreach(local_rsc_params, hash2field, data->params_all);
         g_hash_table_foreach(action->extra, hash2field, data->params_all);
         g_hash_table_foreach(rsc->parameters, hash2field, data->params_all);
         g_hash_table_foreach(action->meta, hash2metafield, data->params_all);
 
         if(xml_op) {
             secure_list = crm_element_value(xml_op, XML_LRM_ATTR_OP_SECURE);
             restart_list = crm_element_value(xml_op, XML_LRM_ATTR_OP_RESTART);
 
             op_version = crm_element_value(xml_op, XML_ATTR_CRM_VERSION);
 #if ENABLE_VERSIONED_ATTRS
             ra_version = crm_element_value(xml_op, XML_ATTR_RA_VERSION);
 #endif
 
         } else {
             op_version = CRM_FEATURE_SET;
         }
 
 #if ENABLE_VERSIONED_ATTRS
         append_versioned_params(local_versioned_params, ra_version, data->params_all);
         append_versioned_params(rsc->versioned_parameters, ra_version, data->params_all);
 
         {
             pe_rsc_action_details_t *details = pe_rsc_action_details(action);
             append_versioned_params(details->versioned_parameters, ra_version, data->params_all);
         }
 #endif
 
         pcmk__filter_op_for_digest(data->params_all);
 
         g_hash_table_destroy(local_rsc_params);
         pe_free_action(action);
 
         data->digest_all_calc = calculate_operation_digest(data->params_all, op_version);
 
         if (calc_secure) {
             data->params_secure = copy_xml(data->params_all);
             if(secure_list) {
                 filter_parameters(data->params_secure, secure_list, FALSE);
             }
             data->digest_secure_calc = calculate_operation_digest(data->params_secure, op_version);
         }
 
         if(xml_op && crm_element_value(xml_op, XML_LRM_ATTR_RESTART_DIGEST) != NULL) {
             data->params_restart = copy_xml(data->params_all);
             if (restart_list) {
                 filter_parameters(data->params_restart, restart_list, TRUE);
             }
             data->digest_restart_calc = calculate_operation_digest(data->params_restart, op_version);
         }
 
         g_hash_table_insert(node->details->digest_cache, strdup(key), data);
     }
 
     return data;
 }
 
 op_digest_cache_t *
 rsc_action_digest_cmp(pe_resource_t * rsc, xmlNode * xml_op, pe_node_t * node,
                       pe_working_set_t * data_set)
 {
     op_digest_cache_t *data = NULL;
 
     char *key = NULL;
     guint interval_ms = 0;
 
     const char *op_version;
     const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
     const char *digest_all;
     const char *digest_restart;
 
     CRM_ASSERT(node != NULL);
 
     op_version = crm_element_value(xml_op, XML_ATTR_CRM_VERSION);
     digest_all = crm_element_value(xml_op, XML_LRM_ATTR_OP_DIGEST);
     digest_restart = crm_element_value(xml_op, XML_LRM_ATTR_RESTART_DIGEST);
 
     crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms);
     key = pcmk__op_key(rsc->id, task, interval_ms);
     data = rsc_action_digest(rsc, task, key, node, xml_op,
                              is_set(data_set->flags, pe_flag_sanitized),
                              data_set);
 
     data->rc = RSC_DIGEST_MATCH;
     if (digest_restart && data->digest_restart_calc && strcmp(data->digest_restart_calc, digest_restart) != 0) {
         pe_rsc_info(rsc, "Parameters to %s on %s changed: was %s vs. now %s (restart:%s) %s",
                  key, node->details->uname,
                  crm_str(digest_restart), data->digest_restart_calc,
                  op_version, crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC));
         data->rc = RSC_DIGEST_RESTART;
 
     } else if (digest_all == NULL) {
         /* it is unknown what the previous op digest was */
         data->rc = RSC_DIGEST_UNKNOWN;
 
     } else if (strcmp(digest_all, data->digest_all_calc) != 0) {
         pe_rsc_info(rsc, "Parameters to %s on %s changed: was %s vs. now %s (%s:%s) %s",
                  key, node->details->uname,
                  crm_str(digest_all), data->digest_all_calc,
                  (interval_ms > 0)? "reschedule" : "reload",
                  op_version, crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC));
         data->rc = RSC_DIGEST_ALL;
     }
 
     free(key);
     return data;
 }
 
 /*!
  * \internal
  * \brief Create an unfencing summary for use in special node attribute
  *
  * Create a string combining a fence device's resource ID, agent type, and
  * parameter digest (whether for all parameters or just non-private parameters).
  * This can be stored in a special node attribute, allowing us to detect changes
  * in either the agent type or parameters, to know whether unfencing must be
  * redone or can be safely skipped when the device's history is cleaned.
  *
  * \param[in] rsc_id        Fence device resource ID
  * \param[in] agent_type    Fence device agent
  * \param[in] param_digest  Fence device parameter digest
  *
  * \return Newly allocated string with unfencing digest
  * \note The caller is responsible for freeing the result.
  */
 static inline char *
 create_unfencing_summary(const char *rsc_id, const char *agent_type,
                          const char *param_digest)
 {
     return crm_strdup_printf("%s:%s:%s", rsc_id, agent_type, param_digest);
 }
 
 /*!
  * \internal
  * \brief Check whether a node can skip unfencing
  *
  * Check whether a fence device's current definition matches a node's
  * stored summary of when it was last unfenced by the device.
  *
  * \param[in] rsc_id        Fence device's resource ID
  * \param[in] agent         Fence device's agent type
  * \param[in] digest_calc   Fence device's current parameter digest
  * \param[in] node_summary  Value of node's special unfencing node attribute
  *                          (a comma-separated list of unfencing summaries for
  *                          all devices that have unfenced this node)
  *
  * \return TRUE if digest matches, FALSE otherwise
  */
 static bool
 unfencing_digest_matches(const char *rsc_id, const char *agent,
                          const char *digest_calc, const char *node_summary)
 {
     bool matches = FALSE;
 
     if (rsc_id && agent && digest_calc && node_summary) {
         char *search_secure = create_unfencing_summary(rsc_id, agent,
                                                        digest_calc);
 
         /* The digest was calculated including the device ID and agent,
          * so there is no risk of collision using strstr().
          */
         matches = (strstr(node_summary, search_secure) != NULL);
         crm_trace("Calculated unfencing digest '%s' %sfound in '%s'",
                   search_secure, matches? "" : "not ", node_summary);
         free(search_secure);
     }
     return matches;
 }
 
 /* Magic string to use as action name for digest cache entries used for
  * unfencing checks. This is not a real action name (i.e. "on"), so
  * check_action_definition() won't confuse these entries with real actions.
  */
 #define STONITH_DIGEST_TASK "stonith-on"
 
 /*!
  * \internal
  * \brief Calculate fence device digests and digest comparison result
  *
  * \param[in] rsc       Fence device resource
  * \param[in] agent     Fence device's agent type
  * \param[in] node      Node with digest cache to use
  * \param[in] data_set  Cluster working set
  *
  * \return Node's digest cache entry
  */
 static op_digest_cache_t *
 fencing_action_digest_cmp(pe_resource_t *rsc, const char *agent,
                           pe_node_t *node, pe_working_set_t *data_set)
 {
     const char *node_summary = NULL;
 
     // Calculate device's current parameter digests
     char *key = pcmk__op_key(rsc->id, STONITH_DIGEST_TASK, 0);
     op_digest_cache_t *data = rsc_action_digest(rsc, STONITH_DIGEST_TASK, key,
                                                 node, NULL, TRUE, data_set);
 
     free(key);
 
     // Check whether node has special unfencing summary node attribute
     node_summary = pe_node_attribute_raw(node, CRM_ATTR_DIGESTS_ALL);
     if (node_summary == NULL) {
         data->rc = RSC_DIGEST_UNKNOWN;
         return data;
     }
 
     // Check whether full parameter digest matches
     if (unfencing_digest_matches(rsc->id, agent, data->digest_all_calc,
                                  node_summary)) {
         data->rc = RSC_DIGEST_MATCH;
         return data;
     }
 
     // Check whether secure parameter digest matches
     node_summary = pe_node_attribute_raw(node, CRM_ATTR_DIGESTS_SECURE);
     if (unfencing_digest_matches(rsc->id, agent, data->digest_secure_calc,
                                  node_summary)) {
         data->rc = RSC_DIGEST_MATCH;
         if (is_set(data_set->flags, pe_flag_stdout)) {
             printf("Only 'private' parameters to %s for unfencing %s changed\n",
                    rsc->id, node->details->uname);
         }
         return data;
     }
 
     // Parameters don't match
     data->rc = RSC_DIGEST_ALL;
     if (is_set(data_set->flags, (pe_flag_sanitized|pe_flag_stdout))
         && data->digest_secure_calc) {
         char *digest = create_unfencing_summary(rsc->id, agent,
                                                 data->digest_secure_calc);
 
         printf("Parameters to %s for unfencing %s changed, try '%s'\n",
                rsc->id, node->details->uname, digest);
         free(digest);
     }
     return data;
 }
 
 const char *rsc_printable_id(pe_resource_t *rsc)
 {
     if (is_not_set(rsc->flags, pe_rsc_unique)) {
         return ID(rsc->xml);
     }
     return rsc->id;
 }
 
 void
 clear_bit_recursive(pe_resource_t * rsc, unsigned long long flag)
 {
     GListPtr gIter = rsc->children;
 
     clear_bit(rsc->flags, flag);
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         clear_bit_recursive(child_rsc, flag);
     }
 }
 
 void
 set_bit_recursive(pe_resource_t * rsc, unsigned long long flag)
 {
     GListPtr gIter = rsc->children;
 
     set_bit(rsc->flags, flag);
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         set_bit_recursive(child_rsc, flag);
     }
 }
 
 static GListPtr
 find_unfencing_devices(GListPtr candidates, GListPtr matches) 
 {
     for (GListPtr gIter = candidates; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *candidate = gIter->data;
         const char *provides = g_hash_table_lookup(candidate->meta, XML_RSC_ATTR_PROVIDES);
         const char *requires = g_hash_table_lookup(candidate->meta, XML_RSC_ATTR_REQUIRES);
 
         if(candidate->children) {
             matches = find_unfencing_devices(candidate->children, matches);
         } else if (is_not_set(candidate->flags, pe_rsc_fence_device)) {
             continue;
 
         } else if (crm_str_eq(provides, "unfencing", FALSE) || crm_str_eq(requires, "unfencing", FALSE)) {
             matches = g_list_prepend(matches, candidate);
         }
     }
     return matches;
 }
 
 static int
 node_priority_fencing_delay(pe_node_t * node, pe_working_set_t * data_set)
 {
     int member_count = 0;
     int online_count = 0;
     int top_priority = 0;
     int lowest_priority = 0;
     GListPtr gIter = NULL;
 
     // `priority-fencing-delay` is disabled
     if (data_set->priority_fencing_delay <= 0) {
         return 0;
     }
 
     /* No need to request a delay if the fencing target is not a normal cluster
      * member, for example if it's a remote node or a guest node. */
     if (node->details->type != node_member) {
         return 0;
     }
 
     // No need to request a delay if the fencing target is in our partition
     if (node->details->online) {
         return 0;
     }
 
     for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
         pe_node_t *n =  gIter->data;
 
         if (n->details->type != node_member) {
             continue;
         }
 
         member_count ++;
 
         if (n->details->online) {
             online_count++;
         }
 
         if (member_count == 1
             || n->details->priority > top_priority) {
             top_priority = n->details->priority;
         }
 
         if (member_count == 1
             || n->details->priority < lowest_priority) {
             lowest_priority = n->details->priority;
         }
     }
 
     // No need to delay if we have more than half of the cluster members
     if (online_count > member_count / 2) {
         return 0;
     }
 
     /* All the nodes have equal priority.
      * Any configured corresponding `pcmk_delay_base/max` will be applied. */
     if (lowest_priority == top_priority) {
         return 0;
     }
 
     if (node->details->priority < top_priority) {
         return 0;
     }
 
     return data_set->priority_fencing_delay;
 }
 
 pe_action_t *
 pe_fence_op(pe_node_t * node, const char *op, bool optional, const char *reason,
             bool priority_delay, pe_working_set_t * data_set)
 {
     char *op_key = NULL;
     pe_action_t *stonith_op = NULL;
 
     if(op == NULL) {
         op = data_set->stonith_action;
     }
 
     op_key = crm_strdup_printf("%s-%s-%s", CRM_OP_FENCE, node->details->uname, op);
 
     if(data_set->singletons) {
         stonith_op = g_hash_table_lookup(data_set->singletons, op_key);
     }
 
     if(stonith_op == NULL) {
         stonith_op = custom_action(NULL, op_key, CRM_OP_FENCE, node, TRUE, TRUE, data_set);
 
         add_hash_param(stonith_op->meta, XML_LRM_ATTR_TARGET, node->details->uname);
         add_hash_param(stonith_op->meta, XML_LRM_ATTR_TARGET_UUID, node->details->id);
         add_hash_param(stonith_op->meta, "stonith_action", op);
 
         if (pe__is_guest_or_remote_node(node)
             && is_set(data_set->flags, pe_flag_enable_unfencing)) {
             /* Extra work to detect device changes on remotes
              *
              * We may do this for all nodes in the future, but for now
              * the check_action_definition() based stuff works fine.
              */
             long max = 1024;
             long digests_all_offset = 0;
             long digests_secure_offset = 0;
 
             char *digests_all = calloc(max, sizeof(char));
             char *digests_secure = calloc(max, sizeof(char));
             GListPtr matches = find_unfencing_devices(data_set->resources, NULL);
 
             for (GListPtr gIter = matches; gIter != NULL; gIter = gIter->next) {
                 pe_resource_t *match = gIter->data;
                 const char *agent = g_hash_table_lookup(match->meta,
                                                         XML_ATTR_TYPE);
                 op_digest_cache_t *data = NULL;
 
                 data = fencing_action_digest_cmp(match, agent, node, data_set);
                 if(data->rc == RSC_DIGEST_ALL) {
                     optional = FALSE;
                     crm_notice("Unfencing %s (remote): because the definition of %s changed", node->details->uname, match->id);
                     if (is_set(data_set->flags, pe_flag_stdout)) {
                         fprintf(stdout, "  notice: Unfencing %s (remote): because the definition of %s changed\n", node->details->uname, match->id);
                     }
                 }
 
                 digests_all_offset += snprintf(
                     digests_all+digests_all_offset, max-digests_all_offset,
                     "%s:%s:%s,", match->id, agent, data->digest_all_calc);
 
                 digests_secure_offset += snprintf(
                     digests_secure+digests_secure_offset, max-digests_secure_offset,
                     "%s:%s:%s,", match->id, agent, data->digest_secure_calc);
             }
             g_hash_table_insert(stonith_op->meta,
                                 strdup(XML_OP_ATTR_DIGESTS_ALL),
                                 digests_all);
             g_hash_table_insert(stonith_op->meta,
                                 strdup(XML_OP_ATTR_DIGESTS_SECURE),
                                 digests_secure);
         }
 
     } else {
         free(op_key);
     }
 
     if (data_set->priority_fencing_delay > 0
 
             /* It's a suitable case where `priority-fencing-delay` applies.
              * At least add `priority-fencing-delay` field as an indicator. */
         && (priority_delay
 
             /* Re-calculate priority delay for the suitable case when
              * pe_fence_op() is called again by stage6() after node priority has
              * been actually calculated with native_add_running() */
             || g_hash_table_lookup(stonith_op->meta,
                                    XML_CONFIG_ATTR_PRIORITY_FENCING_DELAY) != NULL)) {
 
             /* Add `priority-fencing-delay` to the fencing op even if it's 0 for
              * the targeting node. So that it takes precedence over any possible
              * `pcmk_delay_base/max`.
              */
             char *delay_s = crm_itoa(node_priority_fencing_delay(node, data_set));
 
             g_hash_table_insert(stonith_op->meta,
                                 strdup(XML_CONFIG_ATTR_PRIORITY_FENCING_DELAY),
                                 delay_s);
     }
 
     if(optional == FALSE && pe_can_fence(data_set, node)) {
         pe_action_required(stonith_op, NULL, reason);
     } else if(reason && stonith_op->reason == NULL) {
         stonith_op->reason = strdup(reason);
     }
 
     return stonith_op;
 }
 
 void
 trigger_unfencing(
     pe_resource_t * rsc, pe_node_t *node, const char *reason, pe_action_t *dependency, pe_working_set_t * data_set) 
 {
     if(is_not_set(data_set->flags, pe_flag_enable_unfencing)) {
         /* No resources require it */
         return;
 
     } else if (rsc != NULL && is_not_set(rsc->flags, pe_rsc_fence_device)) {
         /* Wasn't a stonith device */
         return;
 
     } else if(node
               && node->details->online
               && node->details->unclean == FALSE
               && node->details->shutdown == FALSE) {
         pe_action_t *unfence = pe_fence_op(node, "on", FALSE, reason, FALSE, data_set);
 
         if(dependency) {
             order_actions(unfence, dependency, pe_order_optional);
         }
 
     } else if(rsc) {
         GHashTableIter iter;
 
         g_hash_table_iter_init(&iter, rsc->allowed_nodes);
         while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
             if(node->details->online && node->details->unclean == FALSE && node->details->shutdown == FALSE) {
                 trigger_unfencing(rsc, node, reason, dependency, data_set);
             }
         }
     }
 }
 
 gboolean
 add_tag_ref(GHashTable * tags, const char * tag_name,  const char * obj_ref)
 {
     pe_tag_t *tag = NULL;
     GListPtr gIter = NULL;
     gboolean is_existing = FALSE;
 
     CRM_CHECK(tags && tag_name && obj_ref, return FALSE);
 
     tag = g_hash_table_lookup(tags, tag_name);
     if (tag == NULL) {
         tag = calloc(1, sizeof(pe_tag_t));
         if (tag == NULL) {
             return FALSE;
         }
         tag->id = strdup(tag_name);
         tag->refs = NULL;
         g_hash_table_insert(tags, strdup(tag_name), tag);
     }
 
     for (gIter = tag->refs; gIter != NULL; gIter = gIter->next) {
         const char *existing_ref = (const char *) gIter->data;
 
         if (crm_str_eq(existing_ref, obj_ref, TRUE)){
             is_existing = TRUE;
             break;
         }
     }
 
     if (is_existing == FALSE) {
         tag->refs = g_list_append(tag->refs, strdup(obj_ref));
         crm_trace("Added: tag=%s ref=%s", tag->id, obj_ref);
     }
 
     return TRUE;
 }
 
 void pe_action_set_flag_reason(const char *function, long line,
                                pe_action_t *action, pe_action_t *reason, const char *text,
                                enum pe_action_flags flags, bool overwrite)
 {
     bool unset = FALSE;
     bool update = FALSE;
     const char *change = NULL;
 
     if(is_set(flags, pe_action_runnable)) {
         unset = TRUE;
         change = "unrunnable";
     } else if(is_set(flags, pe_action_optional)) {
         unset = TRUE;
         change = "required";
     } else if(is_set(flags, pe_action_migrate_runnable)) {
         unset = TRUE;
         overwrite = TRUE;
         change = "unrunnable";
     } else if(is_set(flags, pe_action_dangle)) {
         change = "dangling";
     } else if(is_set(flags, pe_action_requires_any)) {
         change = "required";
     } else {
         crm_err("Unknown flag change to %x by %s: 0x%s",
                 flags, action->uuid, (reason? reason->uuid : "0"));
     }
 
     if(unset) {
         if(is_set(action->flags, flags)) {
             action->flags = crm_clear_bit(function, line, action->uuid, action->flags, flags);
             update = TRUE;
         }
 
     } else {
         if(is_not_set(action->flags, flags)) {
             action->flags = crm_set_bit(function, line, action->uuid, action->flags, flags);
             update = TRUE;
         }
     }
 
     if((change && update) || text) {
         char *reason_text = NULL;
         if(reason == NULL) {
             pe_action_set_reason(action, text, overwrite);
 
         } else if(reason->rsc == NULL) {
             reason_text = crm_strdup_printf("%s %s%c %s", change, reason->task, text?':':0, text?text:"");
         } else {
             reason_text = crm_strdup_printf("%s %s %s%c %s", change, reason->rsc->id, reason->task, text?':':0, text?text:"NA");
         }
 
         if(reason_text && action->rsc != reason->rsc) {
             pe_action_set_reason(action, reason_text, overwrite);
         }
         free(reason_text);
     }
  }
 
 void pe_action_set_reason(pe_action_t *action, const char *reason, bool overwrite) 
 {
     if (action->reason != NULL && overwrite) {
         pe_rsc_trace(action->rsc, "Changing %s reason from '%s' to '%s'",
                      action->uuid, action->reason, crm_str(reason));
         free(action->reason);
     } else if (action->reason == NULL) {
         pe_rsc_trace(action->rsc, "Set %s reason to '%s'",
                      action->uuid, crm_str(reason));
     } else {
         // crm_assert(action->reason != NULL && !overwrite);
         return;
     }
 
     if (reason != NULL) {
         action->reason = strdup(reason);
     } else {
         action->reason = NULL;
     }
 }
 
 /*!
  * \internal
  * \brief Check whether shutdown has been requested for a node
  *
  * \param[in] node  Node to check
  *
  * \return TRUE if node has shutdown attribute set and nonzero, FALSE otherwise
  * \note This differs from simply using node->details->shutdown in that it can
  *       be used before that has been determined (and in fact to determine it),
  *       and it can also be used to distinguish requested shutdown from implicit
  *       shutdown of remote nodes by virtue of their connection stopping.
  */
 bool
 pe__shutdown_requested(pe_node_t *node)
 {
     const char *shutdown = pe_node_attribute_raw(node, XML_CIB_ATTR_SHUTDOWN);
 
     return shutdown && strcmp(shutdown, "0");
 }
 
 /*!
  * \internal
  * \brief Update a data set's "recheck by" time
  *
  * \param[in]     recheck   Epoch time when recheck should happen
  * \param[in,out] data_set  Current working set
  */
 void
 pe__update_recheck_time(time_t recheck, pe_working_set_t *data_set)
 {
     if ((recheck > get_effective_time(data_set))
         && ((data_set->recheck_by == 0)
             || (data_set->recheck_by > recheck))) {
         data_set->recheck_by = recheck;
     }
 }
 
 /*!
  * \internal
  * \brief Wrapper for pe_unpack_nvpairs() using a cluster working set
  */
 void
 pe__unpack_dataset_nvpairs(xmlNode *xml_obj, const char *set_name,
                            GHashTable *node_hash, GHashTable *hash,
                            const char *always_first, gboolean overwrite,
                            pe_working_set_t *data_set)
 {
     crm_time_t *next_change = crm_time_new_undefined();
 
     pe_unpack_nvpairs(data_set->input, xml_obj, set_name, node_hash, hash,
                       always_first, overwrite, data_set->now, next_change);
     if (crm_time_is_defined(next_change)) {
         time_t recheck = (time_t) crm_time_get_seconds_since_epoch(next_change);
 
         pe__update_recheck_time(recheck, data_set);
     }
     crm_time_free(next_change);
 }
 
 bool
 pe__resource_is_disabled(pe_resource_t *rsc)
 {
     const char *target_role = NULL;
 
     CRM_CHECK(rsc != NULL, return false);
     target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
     if (target_role) {
         enum rsc_role_e target_role_e = text2role(target_role);
 
         if ((target_role_e == RSC_ROLE_STOPPED)
             || ((target_role_e == RSC_ROLE_SLAVE)
                 && is_set(uber_parent(rsc)->flags, pe_rsc_promotable))) {
             return true;
         }
     }
     return false;
 }
 
 /*!
  * \internal
  * \brief Create an action to clear a resource's history from CIB
  *
  * \param[in] rsc   Resource to clear
  * \param[in] node  Node to clear history on
  *
  * \return New action to clear resource history
  */
 pe_action_t *
 pe__clear_resource_history(pe_resource_t *rsc, pe_node_t *node,
                            pe_working_set_t *data_set)
 {
     char *key = NULL;
 
     CRM_ASSERT(rsc && node);
     key = pcmk__op_key(rsc->id, CRM_OP_LRM_DELETE, 0);
     return custom_action(rsc, key, CRM_OP_LRM_DELETE, node, FALSE, TRUE,
                          data_set);
 }
 
 bool
 pe__rsc_running_on_any_node_in_list(pe_resource_t *rsc, GListPtr node_list)
 {
-    /* If this resource is inactive, we will always return false unless
-     * node_list contains just '*'.  Inactive resources aren't running on
-     * any node.
-     */
-    gboolean is_active = rsc->fns->active(rsc, TRUE);
-    gboolean partially_active = rsc->fns->active(rsc, FALSE);
-
-    if (!is_active && !partially_active &&
-        node_list != NULL && strcmp(node_list->data, "*") == 0 && node_list->next == NULL) {
-        return true;
-    }
-
-    /* Otherwise, this resource must be running on one of the nodes in the
-     * given list.
-     */
     for (GListPtr ele = rsc->running_on; ele; ele = ele->next) {
         pe_node_t *node = (pe_node_t *) ele->data;
         if (pcmk__str_in_list(node_list, node->details->uname)) {
             return true;
         }
     }
 
     return false;
 }
+
+bool
+pcmk__rsc_is_filtered(pe_resource_t *rsc, GListPtr only_show)
+{
+    return (rsc->fns->active(rsc, FALSE) && !pe__rsc_running_on_any_node_in_list(rsc, only_show));
+}
diff --git a/tools/crm_mon_print.c b/tools/crm_mon_print.c
index e2fd631f3f..92ddbd959e 100644
--- a/tools/crm_mon_print.c
+++ b/tools/crm_mon_print.c
@@ -1,1093 +1,1092 @@
 /*
  * Copyright 2019-2020 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <glib.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <time.h>
 
 #include <config.h>
 #include <crm/cib/util.h>
 #include <crm/common/curses_internal.h>
 #include <crm/common/iso8601_internal.h>
 #include <crm/common/xml.h>
 #include <crm/msg_xml.h>
 #include <crm/pengine/internal.h>
 #include <crm/pengine/pe_types.h>
 #include <crm/stonith-ng.h>
 #include <crm/common/internal.h>
 #include <crm/common/util.h>
 #include <crm/fencing/internal.h>
 
 #include "crm_mon.h"
 
 static void print_resources_heading(pcmk__output_t *out, unsigned int mon_ops);
 static void print_resources_closing(pcmk__output_t *out, unsigned int mon_ops);
 static int print_resources(pcmk__output_t *out, pe_working_set_t *data_set,
                            unsigned int print_opts, unsigned int mon_ops, gboolean brief_output,
                            gboolean print_summary, GListPtr only_show, gboolean print_spacer);
 static int print_rsc_history(pcmk__output_t *out, pe_working_set_t *data_set,
                              pe_node_t *node, xmlNode *rsc_entry, unsigned int mon_ops,
                              GListPtr op_list);
 static int print_node_history(pcmk__output_t *out, pe_working_set_t *data_set,
                               pe_node_t *node, xmlNode *node_state, gboolean operations,
                               unsigned int mon_ops, GListPtr only_show);
 static gboolean add_extra_info(pcmk__output_t *out, pe_node_t * node, GListPtr rsc_list,
                                const char *attrname, int *expected_score);
 static void print_node_attribute(gpointer name, gpointer user_data);
 static int print_node_summary(pcmk__output_t *out, pe_working_set_t * data_set,
                               gboolean operations, unsigned int mon_ops,
                               GListPtr only_snow, gboolean print_spacer);
 static int print_cluster_tickets(pcmk__output_t *out, pe_working_set_t * data_set,
                                  gboolean print_spacer);
 static int print_neg_locations(pcmk__output_t *out, pe_working_set_t *data_set,
                                unsigned int mon_ops, const char *prefix,
                                gboolean print_spacer);
 static int print_node_attributes(pcmk__output_t *out, pe_working_set_t *data_set,
                                  unsigned int mon_ops, GListPtr only_show,
                                  gboolean print_spacer);
 static int print_failed_actions(pcmk__output_t *out, pe_working_set_t *data_set,
                                 GListPtr only_show, gboolean print_spacer);
 
 static GListPtr
 build_uname_list(pe_working_set_t *data_set, const char *s) {
     GListPtr unames = NULL;
 
     if (s == NULL || strcmp(s, "*") == 0) {
         /* Nothing was given so return a list of all node names.  Or, '*' was
          * given.  This would normally fall into the pe__unames_with_tag branch
          * where it will return an empty list.  Catch it here instead.
          */
         unames = g_list_prepend(unames, strdup("*"));
     } else {
         pe_node_t *node = pe_find_node(data_set->nodes, s);
 
         if (node) {
             /* The given string was a valid uname for a node.  Return a
              * singleton list containing just that uname.
              */
             unames = g_list_prepend(unames, strdup(s));
         } else {
             /* The given string was not a valid uname.  It's either a tag or
              * it's a typo or something.  In the first case, we'll return a
              * list of all the unames of the nodes with the given tag.  In the
              * second case, we'll return a NULL pointer and nothing will
              * get displayed.
              */
             unames = pe__unames_with_tag(data_set, s);
         }
     }
 
     return unames;
 }
 
 /*!
  * \internal
  * \brief Print resources section heading appropriate to options
  *
  * \param[in] out     The output functions structure.
  * \param[in] mon_ops Bitmask of mon_op_*.
  */
 static void
 print_resources_heading(pcmk__output_t *out, unsigned int mon_ops)
 {
     const char *heading;
 
     if (is_set(mon_ops, mon_op_group_by_node)) {
 
         /* Active resources have already been printed by node */
         heading = is_set(mon_ops, mon_op_inactive_resources) ? "Inactive Resources" : NULL;
 
     } else if (is_set(mon_ops, mon_op_inactive_resources)) {
         heading = "Full List of Resources";
 
     } else {
         heading = "Active Resources";
     }
 
     /* Print section heading */
     out->begin_list(out, NULL, NULL, "%s", heading);
 }
 
 /*!
  * \internal
  * \brief Print whatever resource section closing is appropriate
  *
  * \param[in] out     The output functions structure.
  * \param[in] mon_ops Bitmask of mon_op_*.
  */
 static void
 print_resources_closing(pcmk__output_t *out, unsigned int mon_ops)
 {
     const char *heading;
 
     /* What type of resources we did or did not display */
     if (is_set(mon_ops, mon_op_group_by_node)) {
         heading = "inactive ";
     } else if (is_set(mon_ops, mon_op_inactive_resources)) {
         heading = "";
     } else {
         heading = "active ";
     }
 
     out->list_item(out, NULL, "No %sresources", heading);
 }
 
 /*!
  * \internal
  * \brief Print whatever resource section(s) are appropriate
  *
  * \param[in] out           The output functions structure.
  * \param[in] data_set      Cluster state to display.
  * \param[in] print_opts    Bitmask of pe_print_*.
  * \param[in] mon_ops       Bitmask of mon_op_*.
  * \param[in] brief_output  Whether to display full or brief output.
  * \param[in] print_summary Whether to display a failure summary.
  */
 static int
 print_resources(pcmk__output_t *out, pe_working_set_t *data_set,
                 unsigned int print_opts, unsigned int mon_ops, gboolean brief_output,
                 gboolean print_summary, GListPtr only_show, gboolean print_spacer)
 {
     GListPtr rsc_iter;
     int rc = pcmk_rc_no_output;
 
     /* If we already showed active resources by node, and
      * we're not showing inactive resources, we have nothing to do
      */
     if (is_set(mon_ops, mon_op_group_by_node) && is_not_set(mon_ops, mon_op_inactive_resources)) {
         return rc;
     }
 
     /* Add a blank line between this section and the one before it. */
     if (print_spacer) {
         out->info(out, "%s", "");
     }
 
     print_resources_heading(out, mon_ops);
 
     /* If we haven't already printed resources grouped by node,
      * and brief output was requested, print resource summary */
     if (brief_output && is_not_set(mon_ops, mon_op_group_by_node)) {
         pe__rscs_brief_output(out, data_set->resources, print_opts,
                               is_set(mon_ops, mon_op_inactive_resources));
     }
 
     /* For each resource, display it if appropriate */
     for (rsc_iter = data_set->resources; rsc_iter != NULL; rsc_iter = rsc_iter->next) {
         pe_resource_t *rsc = (pe_resource_t *) rsc_iter->data;
 
         /* Complex resources may have some sub-resources active and some inactive */
         gboolean is_active = rsc->fns->active(rsc, TRUE);
         gboolean partially_active = rsc->fns->active(rsc, FALSE);
 
         /* Skip inactive orphans (deleted but still in CIB) */
         if (is_set(rsc->flags, pe_rsc_orphan) && !is_active) {
             continue;
 
         /* Skip active resources if we already displayed them by node */
         } else if (is_set(mon_ops, mon_op_group_by_node)) {
             if (is_active) {
                 continue;
             }
 
         /* Skip primitives already counted in a brief summary */
         } else if (brief_output && (rsc->variant == pe_native)) {
             continue;
 
         /* Skip resources that aren't at least partially active,
          * unless we're displaying inactive resources
          */
         } else if (!partially_active && is_not_set(mon_ops, mon_op_inactive_resources)) {
             continue;
-        }
 
-        if (!pe__rsc_running_on_any_node_in_list(rsc, only_show)) {
+        } else if (partially_active && !pe__rsc_running_on_any_node_in_list(rsc, only_show)) {
             continue;
         }
 
         /* Print this resource */
         rc = pcmk_rc_ok;
         out->message(out, crm_map_element_name(rsc->xml), print_opts, rsc, only_show);
     }
 
     if (print_summary && rc != pcmk_rc_ok) {
         print_resources_closing(out, mon_ops);
     }
 
     out->end_list(out);
     return pcmk_rc_ok;
 }
 
 static int
 failure_count(pe_working_set_t *data_set, pe_node_t *node, pe_resource_t *rsc, time_t *last_failure) {
     return rsc ? pe_get_failcount(node, rsc, last_failure, pe_fc_default,
                                   NULL, data_set)
                : 0;
 }
 
 static GListPtr
 get_operation_list(xmlNode *rsc_entry) {
     GListPtr op_list = NULL;
     xmlNode *rsc_op = NULL;
 
     for (rsc_op = __xml_first_child_element(rsc_entry); rsc_op != NULL;
          rsc_op = __xml_next_element(rsc_op)) {
         if (crm_str_eq((const char *) rsc_op->name, XML_LRM_TAG_RSC_OP, TRUE)) {
             op_list = g_list_append(op_list, rsc_op);
         }
     }
 
     op_list = g_list_sort(op_list, sort_op_by_callid);
     return op_list;
 }
 
 /*!
  * \internal
  * \brief Print resource operation/failure history
  *
  * \param[in] out       The output functions structure.
  * \param[in] data_set  Cluster state to display.
  * \param[in] node      Node that ran this resource.
  * \param[in] rsc_entry Root of XML tree describing resource status.
  * \param[in] mon_ops   Bitmask of mon_op_*.
  * \param[in] op_list   A list of operations to print.
  */
 static int
 print_rsc_history(pcmk__output_t *out, pe_working_set_t *data_set, pe_node_t *node,
                   xmlNode *rsc_entry, unsigned int mon_ops, GListPtr op_list)
 {
     GListPtr gIter = NULL;
     int rc = pcmk_rc_no_output;
     const char *rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID);
     pe_resource_t *rsc = pe_find_resource(data_set->resources, rsc_id);
 
     /* Print each operation */
     for (gIter = op_list; gIter != NULL; gIter = gIter->next) {
         xmlNode *xml_op = (xmlNode *) gIter->data;
         const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
         const char *interval_ms_s = crm_element_value(xml_op,
                                                       XML_LRM_ATTR_INTERVAL_MS);
         const char *op_rc = crm_element_value(xml_op, XML_LRM_ATTR_RC);
         int op_rc_i = crm_parse_int(op_rc, "0");
 
         /* Display 0-interval monitors as "probe" */
         if (safe_str_eq(task, CRMD_ACTION_STATUS)
             && ((interval_ms_s == NULL) || safe_str_eq(interval_ms_s, "0"))) {
             task = "probe";
         }
 
         /* Ignore notifies and some probes */
         if (safe_str_eq(task, CRMD_ACTION_NOTIFY) || (safe_str_eq(task, "probe") && (op_rc_i == 7))) {
             continue;
         }
 
         /* If this is the first printed operation, print heading for resource */
         if (rc == pcmk_rc_no_output) {
             time_t last_failure = 0;
             int failcount = failure_count(data_set, node, rsc, &last_failure);
 
             out->message(out, "resource-history", rsc, rsc_id, TRUE, failcount, last_failure, TRUE);
             rc = pcmk_rc_ok;
         }
 
         /* Print the operation */
         out->message(out, "op-history", xml_op, task, interval_ms_s,
                      op_rc_i, is_set(mon_ops, mon_op_print_timing));
     }
 
     /* Free the list we created (no need to free the individual items) */
     g_list_free(op_list);
 
     /* If we printed anything, close the resource */
     if (rc == pcmk_rc_ok) {
         out->end_list(out);
     }
 
     return rc;
 }
 
 /*!
  * \internal
  * \brief Print node operation/failure history
  *
  * \param[in] out        The output functions structure.
  * \param[in] data_set   Cluster state to display.
  * \param[in] node_state Root of XML tree describing node status.
  * \param[in] operations Whether to print operations or just failcounts.
  * \param[in] mon_ops    Bitmask of mon_op_*.
  */
 static int
 print_node_history(pcmk__output_t *out, pe_working_set_t *data_set,
                    pe_node_t *node, xmlNode *node_state, gboolean operations,
                    unsigned int mon_ops, GListPtr only_show)
 {
     xmlNode *lrm_rsc = NULL;
     xmlNode *rsc_entry = NULL;
     int rc = pcmk_rc_no_output;
 
     lrm_rsc = find_xml_node(node_state, XML_CIB_TAG_LRM, FALSE);
     lrm_rsc = find_xml_node(lrm_rsc, XML_LRM_TAG_RESOURCES, FALSE);
 
     /* Print history of each of the node's resources */
     for (rsc_entry = __xml_first_child_element(lrm_rsc); rsc_entry != NULL;
          rsc_entry = __xml_next_element(rsc_entry)) {
 
         if (!crm_str_eq((const char *)rsc_entry->name, XML_LRM_TAG_RESOURCE, TRUE)) {
             continue;
         }
 
         if (operations == FALSE) {
             const char *rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID);
             pe_resource_t *rsc = pe_find_resource(data_set->resources, rsc_id);
             time_t last_failure = 0;
             int failcount = failure_count(data_set, node, rsc, &last_failure);
 
             if (failcount > 0) {
                 if (rc == pcmk_rc_no_output) {
                     rc = pcmk_rc_ok;
                     out->message(out, "node", node, get_resource_display_options(mon_ops),
                                  FALSE, NULL, is_set(mon_ops, mon_op_print_clone_detail),
                                  is_set(mon_ops, mon_op_print_brief), is_set(mon_ops, mon_op_group_by_node),
                                  only_show);
                 }
 
                 out->message(out, "resource-history", rsc, rsc_id, FALSE,
                              failcount, last_failure, FALSE);
             }
         } else {
             GListPtr op_list = get_operation_list(rsc_entry);
 
             if (rc == pcmk_rc_no_output) {
                 rc = pcmk_rc_ok;
                 out->message(out, "node", node, get_resource_display_options(mon_ops),
                              FALSE, NULL, is_set(mon_ops, mon_op_print_clone_detail),
                              is_set(mon_ops, mon_op_print_brief), is_set(mon_ops, mon_op_group_by_node),
                              only_show);
             }
 
             if (op_list != NULL) {
                 print_rsc_history(out, data_set, node, rsc_entry, mon_ops, op_list);
             }
         }
     }
 
     if (rc == pcmk_rc_ok) {
         out->end_list(out);
     }
 
     return rc;
 }
 
 /*!
  * \internal
  * \brief Determine whether extended information about an attribute should be added.
  *
  * \param[in]  out            The output functions structure.
  * \param[in]  node           Node that ran this resource.
  * \param[in]  rsc_list       The list of resources for this node.
  * \param[in]  attrname       The attribute to find.
  * \param[out] expected_score The expected value for this attribute.
  *
  * \return TRUE if extended information should be printed, FALSE otherwise
  * \note Currently, extended information is only supported for ping/pingd
  *       resources, for which a message will be printed if connectivity is lost
  *       or degraded.
  */
 static gboolean
 add_extra_info(pcmk__output_t *out, pe_node_t *node, GListPtr rsc_list,
                const char *attrname, int *expected_score)
 {
     GListPtr gIter = NULL;
 
     for (gIter = rsc_list; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *rsc = (pe_resource_t *) gIter->data;
         const char *type = g_hash_table_lookup(rsc->meta, "type");
         const char *name = NULL;
 
         if (rsc->children != NULL) {
             if (add_extra_info(out, node, rsc->children, attrname, expected_score)) {
                 return TRUE;
             }
         }
 
         if (safe_str_neq(type, "ping") && safe_str_neq(type, "pingd")) {
             return FALSE;
         }
 
         name = g_hash_table_lookup(rsc->parameters, "name");
 
         if (name == NULL) {
             name = "pingd";
         }
 
         /* To identify the resource with the attribute name. */
         if (safe_str_eq(name, attrname)) {
             int host_list_num = 0;
             /* int value = crm_parse_int(attrvalue, "0"); */
             const char *hosts = g_hash_table_lookup(rsc->parameters, "host_list");
             const char *multiplier = g_hash_table_lookup(rsc->parameters, "multiplier");
 
             if (hosts) {
                 char **host_list = g_strsplit(hosts, " ", 0);
                 host_list_num = g_strv_length(host_list);
                 g_strfreev(host_list);
             }
 
             /* pingd multiplier is the same as the default value. */
             *expected_score = host_list_num * crm_parse_int(multiplier, "1");
 
             return TRUE;
         }
     }
     return FALSE;
 }
 
 /* structure for passing multiple user data to g_list_foreach() */
 struct mon_attr_data {
     pcmk__output_t *out;
     pe_node_t *node;
 };
 
 static void
 print_node_attribute(gpointer name, gpointer user_data)
 {
     const char *value = NULL;
     int expected_score = 0;
     gboolean add_extra = FALSE;
     struct mon_attr_data *data = (struct mon_attr_data *) user_data;
 
     value = pe_node_attribute_raw(data->node, name);
 
     add_extra = add_extra_info(data->out, data->node, data->node->details->running_rsc,
                                name, &expected_score);
 
     /* Print attribute name and value */
     data->out->message(data->out, "node-attribute", name, value, add_extra,
                        expected_score);
 }
 
 /*!
  * \internal
  * \brief Print history for all nodes.
  *
  * \param[in] out        The output functions structure.
  * \param[in] data_set   Cluster state to display.
  * \param[in] operations Whether to print operations or just failcounts.
  * \param[in] mon_ops    Bitmask of mon_op_*.
  */
 static int
 print_node_summary(pcmk__output_t *out, pe_working_set_t * data_set,
                    gboolean operations, unsigned int mon_ops, GListPtr only_show,
                    gboolean print_spacer)
 {
     xmlNode *node_state = NULL;
     xmlNode *cib_status = get_object_root(XML_CIB_TAG_STATUS, data_set->input);
     int rc = pcmk_rc_no_output;
 
     if (xmlChildElementCount(cib_status) == 0) {
         return rc;
     }
 
     /* Print each node in the CIB status */
     for (node_state = __xml_first_child_element(cib_status); node_state != NULL;
          node_state = __xml_next_element(node_state)) {
         pe_node_t *node;
 
         if (!crm_str_eq((const char *)node_state->name, XML_CIB_TAG_STATE, TRUE)) {
             continue;
         }
 
         node = pe_find_node_id(data_set->nodes, ID(node_state));
 
         if (!node || !node->details || !node->details->online) {
             continue;
         }
 
         if (!pcmk__str_in_list(only_show, node->details->uname)) {
             continue;
         }
 
         if (rc == pcmk_rc_no_output) {
             /* Add a blank line between this section and the one before it. */
             if (print_spacer) {
                 out->info(out, "%s", "");
             }
 
             if (operations) {
                 out->begin_list(out, NULL, NULL, "Operations");
             } else {
                 out->begin_list(out, NULL, NULL, "Migration Summary");
             }
 
             rc = pcmk_rc_ok;
         }
 
         print_node_history(out, data_set, node, node_state, operations, mon_ops,
                            only_show);
     }
 
     if (rc == pcmk_rc_ok) {
         out->end_list(out);
     }
 
     return rc;
 }
 
 /*!
  * \internal
  * \brief Print all tickets.
  *
  * \param[in] out      The output functions structure.
  * \param[in] data_set Cluster state to display.
  */
 static int
 print_cluster_tickets(pcmk__output_t *out, pe_working_set_t * data_set,
                       gboolean print_spacer)
 {
     GHashTableIter iter;
     gpointer key, value;
 
     if (g_hash_table_size(data_set->tickets) == 0) {
         return pcmk_rc_no_output;
     }
 
     /* Add a blank line between this section and the one before it. */
     if (print_spacer) {
         out->info(out, "%s", "");
     }
 
     /* Print section heading */
     out->begin_list(out, NULL, NULL, "Tickets");
 
     /* Print each ticket */
     g_hash_table_iter_init(&iter, data_set->tickets);
     while (g_hash_table_iter_next(&iter, &key, &value)) {
         pe_ticket_t *ticket = (pe_ticket_t *) value;
         out->message(out, "ticket", ticket);
     }
 
     /* Close section */
     out->end_list(out);
     return pcmk_rc_ok;
 }
 
 /*!
  * \internal
  * \brief Print section for negative location constraints
  *
  * \param[in] out      The output functions structure.
  * \param[in] data_set Cluster state to display.
  * \param[in] mon_ops  Bitmask of mon_op_*.
  * \param[in] prefix   ID prefix to filter results by.
  */
 static int
 print_neg_locations(pcmk__output_t *out, pe_working_set_t *data_set,
                     unsigned int mon_ops, const char *prefix, gboolean print_spacer)
 {
     GListPtr gIter, gIter2;
     int rc = pcmk_rc_no_output;
 
     /* Print each ban */
     for (gIter = data_set->placement_constraints; gIter != NULL; gIter = gIter->next) {
         pe__location_t *location = gIter->data;
         if (prefix != NULL && !g_str_has_prefix(location->id, prefix))
             continue;
         for (gIter2 = location->node_list_rh; gIter2 != NULL; gIter2 = gIter2->next) {
             pe_node_t *node = (pe_node_t *) gIter2->data;
 
             if (node->weight < 0) {
                 if (rc == pcmk_rc_no_output) {
                     /* Add a blank line between this section and the one before it. */
                     if (print_spacer) {
                         out->info(out, "%s", "");
                     }
 
                     rc = pcmk_rc_ok;
                     out->begin_list(out, NULL, NULL, "Negative Location Constraints");
                 }
 
                 out->message(out, "ban", node, location, is_set(mon_ops, mon_op_print_clone_detail));
             }
         }
     }
 
     if (rc == pcmk_rc_ok) {
         out->end_list(out);
     }
 
     return rc;
 }
 
 /*!
  * \internal
  * \brief Print node attributes section
  *
  * \param[in] out      The output functions structure.
  * \param[in] data_set Cluster state to display.
  * \param[in] mon_ops  Bitmask of mon_op_*.
  */
 static int
 print_node_attributes(pcmk__output_t *out, pe_working_set_t *data_set,
                       unsigned int mon_ops, GListPtr only_show,
                       gboolean print_spacer)
 {
     GListPtr gIter = NULL;
     int rc = pcmk_rc_no_output;
 
     /* Unpack all resource parameters (it would be more efficient to do this
      * only when needed for the first time in add_extra_info())
      */
     for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
         crm_mon_get_parameters(gIter->data, data_set);
     }
 
     /* Display each node's attributes */
     for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
         struct mon_attr_data data;
 
         data.out = out;
         data.node = (pe_node_t *) gIter->data;
 
         if (data.node && data.node->details && data.node->details->online) {
             GList *attr_list = NULL;
             GHashTableIter iter;
             gpointer key, value;
 
             g_hash_table_iter_init(&iter, data.node->details->attrs);
             while (g_hash_table_iter_next (&iter, &key, &value)) {
                 attr_list = append_attr_list(attr_list, key);
             }
 
             if (attr_list == NULL) {
                 continue;
             }
 
             if (!pcmk__str_in_list(only_show, data.node->details->uname)) {
                 continue;
             }
 
             if (rc == pcmk_rc_no_output) {
                 /* Add a blank line between this section and the one before it. */
                 if (print_spacer) {
                     out->info(out, "%s", "");
                 }
 
                 rc = pcmk_rc_ok;
                 out->begin_list(out, NULL, NULL, "Node Attributes");
             }
 
             out->message(out, "node", data.node, get_resource_display_options(mon_ops),
                          FALSE, NULL, is_set(mon_ops, mon_op_print_clone_detail),
                          is_set(mon_ops, mon_op_print_brief), is_set(mon_ops, mon_op_group_by_node),
                          only_show);
             g_list_foreach(attr_list, print_node_attribute, &data);
             g_list_free(attr_list);
             out->end_list(out);
         }
     }
 
     /* Print section footer */
     if (rc == pcmk_rc_ok) {
         out->end_list(out);
     }
 
     return rc;
 }
 
 /*!
  * \internal
  * \brief Print a section for failed actions
  *
  * \param[in] out      The output functions structure.
  * \param[in] data_set Cluster state to display.
  */
 static int
 print_failed_actions(pcmk__output_t *out, pe_working_set_t *data_set,
                      GListPtr only_show, gboolean print_spacer)
 {
     xmlNode *xml_op = NULL;
     int rc = pcmk_rc_no_output;
 
     if (xmlChildElementCount(data_set->failed) == 0) {
         return rc;
     }
 
     for (xml_op = __xml_first_child(data_set->failed); xml_op != NULL;
          xml_op = __xml_next(xml_op)) {
         if (!pcmk__str_in_list(only_show, crm_element_value(xml_op, XML_ATTR_UNAME))) {
             continue;
         }
 
         if (rc == pcmk_rc_no_output) {
             /* Add a blank line between this section and the one before it. */
             if (print_spacer) {
                 out->info(out, "%s", "");
             }
 
             rc = pcmk_rc_ok;
             out->begin_list(out, NULL, NULL, "Failed Resource Actions");
         }
 
         out->message(out, "failed-action", xml_op);
     }
 
     if (rc == pcmk_rc_ok) {
         out->end_list(out);
     }
 
     return rc;
 }
 
 /*!
  * \internal
  * \brief Top-level printing function for text/curses output.
  *
  * \param[in] out             The output functions structure.
  * \param[in] data_set        Cluster state to display.
  * \param[in] stonith_history List of stonith actions.
  * \param[in] mon_ops         Bitmask of mon_op_*.
  * \param[in] show            Bitmask of mon_show_*.
  * \param[in] prefix          ID prefix to filter results by.
  */
 void
 print_status(pcmk__output_t *out, pe_working_set_t *data_set,
              stonith_history_t *stonith_history, unsigned int mon_ops,
              unsigned int show, char *prefix, char *only_show)
 {
     GListPtr unames = NULL;
 
     unsigned int print_opts = get_resource_display_options(mon_ops);
     int rc = pcmk_rc_no_output;
     int x;
 
     x = out->message(out, "cluster-summary", data_set,
                      is_set(mon_ops, mon_op_print_clone_detail),
                      is_set(show, mon_show_stack),
                      is_set(show, mon_show_dc),
                      is_set(show, mon_show_times),
                      is_set(show, mon_show_counts),
                      is_set(show, mon_show_options));
 
     if (x == pcmk_rc_ok) {
         rc = pcmk_rc_ok;
     }
 
     unames = build_uname_list(data_set, only_show);
 
     if (is_set(show, mon_show_nodes) && unames) {
         if (rc == pcmk_rc_ok) {
             out->info(out, "%s", "");
         }
 
         x = out->message(out, "node-list", data_set->nodes, unames, print_opts,
                          is_set(mon_ops, mon_op_print_clone_detail),
                          is_set(mon_ops, mon_op_print_brief),
                          is_set(mon_ops, mon_op_group_by_node));
 
         if (x == pcmk_rc_ok) {
             rc = pcmk_rc_ok;
         }
     }
 
     /* Print resources section, if needed */
     if (is_set(show, mon_show_resources)) {
         x = print_resources(out, data_set, print_opts, mon_ops,
                             is_set(mon_ops, mon_op_print_brief), TRUE,
                             unames, rc == pcmk_rc_ok);
         if (x == pcmk_rc_ok) {
             rc = pcmk_rc_ok;
         }
     }
 
     /* print Node Attributes section if requested */
     if (is_set(show, mon_show_attributes)) {
         x =  print_node_attributes(out, data_set, mon_ops, unames,
                                    rc == pcmk_rc_ok);
         if (x == pcmk_rc_ok) {
             rc = pcmk_rc_ok;
         }
     }
 
     /* If requested, print resource operations (which includes failcounts)
      * or just failcounts
      */
     if (is_set(show, mon_show_operations) || is_set(show, mon_show_failcounts)) {
         x = print_node_summary(out, data_set, is_set(show, mon_show_operations),
                                mon_ops, unames, rc == pcmk_rc_ok);
         if (x == pcmk_rc_ok) {
             rc = pcmk_rc_ok;
         }
     }
 
     /* If there were any failed actions, print them */
     if (is_set(show, mon_show_failures) && xml_has_children(data_set->failed)) {
         x = print_failed_actions(out, data_set, unames, rc == pcmk_rc_ok);
         if (x == pcmk_rc_ok) {
             rc = pcmk_rc_ok;
         }
     }
 
     /* Print failed stonith actions */
     if (is_set(show, mon_show_fence_failed) && is_set(mon_ops, mon_op_fence_history)) {
         for (stonith_history_t *hp = stonith_history; hp; hp = hp->next) {
             if (hp->state == st_failed) {
                 x = out->message(out, "failed-fencing-history", hp, unames,
                                  is_set(mon_ops, mon_op_fence_full_history),
                                  rc == pcmk_rc_ok);
 
                 if (x == pcmk_rc_ok) {
                     rc = pcmk_rc_ok;
                 }
 
                 break;
             }
         }
     }
 
     /* Print tickets if requested */
     if (is_set(show, mon_show_tickets)) {
         x = print_cluster_tickets(out, data_set, rc == pcmk_rc_ok);
         if (x == pcmk_rc_ok) {
             rc = pcmk_rc_ok;
         }
     }
 
     /* Print negative location constraints if requested */
     if (is_set(show, mon_show_bans)) {
         x = print_neg_locations(out, data_set, mon_ops, prefix, rc == pcmk_rc_ok);
         if (x == pcmk_rc_ok) {
             rc = pcmk_rc_ok;
         }
     }
 
     /* Print stonith history */
     if (is_set(mon_ops, mon_op_fence_history)) {
         if (is_set(show, mon_show_fence_worked)) {
             for (stonith_history_t *hp = stonith_history; hp; hp = hp->next) {
                 if (hp->state != st_failed) {
                     x = out->message(out, "fencing-history", hp, unames,
                                      is_set(mon_ops, mon_op_fence_full_history),
                                      rc == pcmk_rc_ok);
 
                     if (x == pcmk_rc_ok) {
                         rc = pcmk_rc_ok;
                     }
 
                     break;
                 }
             }
         } else if (is_set(show, mon_show_fence_pending)) {
             for (stonith_history_t *hp = stonith_history; hp; hp = hp->next) {
                 if (hp->state != st_failed && hp->state != st_done) {
                     x = out->message(out, "pending-fencing-actions", hp, unames,
                                      is_set(mon_ops, mon_op_fence_full_history),
                                      rc == pcmk_rc_ok);
 
                     if (x == pcmk_rc_ok) {
                         rc = pcmk_rc_ok;
                     }
 
                     break;
                 }
             }
         }
     }
 
     g_list_free_full(unames, free);
 }
 
 /*!
  * \internal
  * \brief Top-level printing function for XML output.
  *
  * \param[in] out             The output functions structure.
  * \param[in] data_set        Cluster state to display.
  * \param[in] stonith_history List of stonith actions.
  * \param[in] mon_ops         Bitmask of mon_op_*.
  * \param[in] show            Bitmask of mon_show_*.
  * \param[in] prefix          ID prefix to filter results by.
  */
 void
 print_xml_status(pcmk__output_t *out, pe_working_set_t *data_set,
                  crm_exit_t history_rc, stonith_history_t *stonith_history,
                  unsigned int mon_ops, unsigned int show, char *prefix,
                  char *only_show)
 {
     GListPtr unames = NULL;
     unsigned int print_opts = get_resource_display_options(mon_ops);
 
     out->message(out, "cluster-summary", data_set,
                  is_set(mon_ops, mon_op_print_clone_detail),
                  is_set(show, mon_show_stack),
                  is_set(show, mon_show_dc),
                  is_set(show, mon_show_times),
                  is_set(show, mon_show_counts),
                  is_set(show, mon_show_options));
 
     unames = build_uname_list(data_set, only_show);
 
     /*** NODES ***/
     if (is_set(show, mon_show_nodes)) {
         out->message(out, "node-list", data_set->nodes, unames, print_opts,
                      is_set(mon_ops, mon_op_print_clone_detail),
                      is_set(mon_ops, mon_op_print_brief),
                      is_set(mon_ops, mon_op_group_by_node));
     }
 
     /* Print resources section, if needed */
     if (is_set(show, mon_show_resources)) {
         print_resources(out, data_set, print_opts, mon_ops, FALSE, FALSE, unames, FALSE);
     }
 
     /* print Node Attributes section if requested */
     if (is_set(show, mon_show_attributes)) {
         print_node_attributes(out, data_set, mon_ops, unames, FALSE);
     }
 
     /* If requested, print resource operations (which includes failcounts)
      * or just failcounts
      */
     if (is_set(show, mon_show_operations) || is_set(show, mon_show_failcounts)) {
         print_node_summary(out, data_set, is_set(show, mon_show_operations),
                            mon_ops, unames, FALSE);
     }
 
     /* If there were any failed actions, print them */
     if (is_set(show, mon_show_failures) && xml_has_children(data_set->failed)) {
         print_failed_actions(out, data_set, unames, FALSE);
     }
 
     /* Print stonith history */
     if (is_set(show, mon_show_fencing_all) && is_set(mon_ops, mon_op_fence_history)) {
         out->message(out, "full-fencing-history", history_rc, stonith_history,
                      unames, is_set(mon_ops, mon_op_fence_full_history), FALSE);
     }
 
     /* Print tickets if requested */
     if (is_set(show, mon_show_tickets)) {
         print_cluster_tickets(out, data_set, FALSE);
     }
 
     /* Print negative location constraints if requested */
     if (is_set(show, mon_show_bans)) {
         print_neg_locations(out, data_set, mon_ops, prefix, FALSE);
     }
 
     g_list_free_full(unames, free);
 }
 
 /*!
  * \internal
  * \brief Top-level printing function for HTML output.
  *
  * \param[in] out             The output functions structure.
  * \param[in] data_set        Cluster state to display.
  * \param[in] stonith_history List of stonith actions.
  * \param[in] mon_ops         Bitmask of mon_op_*.
  * \param[in] show            Bitmask of mon_show_*.
  * \param[in] prefix          ID prefix to filter results by.
  */
 int
 print_html_status(pcmk__output_t *out, pe_working_set_t *data_set,
                   stonith_history_t *stonith_history, unsigned int mon_ops,
                   unsigned int show, char *prefix, char *only_show)
 {
     GListPtr unames = NULL;
     unsigned int print_opts = get_resource_display_options(mon_ops);
 
     out->message(out, "cluster-summary", data_set,
                  is_set(mon_ops, mon_op_print_clone_detail),
                  is_set(show, mon_show_stack),
                  is_set(show, mon_show_dc),
                  is_set(show, mon_show_times),
                  is_set(show, mon_show_counts),
                  is_set(show, mon_show_options));
 
     unames = build_uname_list(data_set, only_show);
 
     /*** NODE LIST ***/
     if (is_set(show, mon_show_nodes) && unames) {
         out->message(out, "node-list", data_set->nodes, unames, print_opts,
                      is_set(mon_ops, mon_op_print_clone_detail),
                      is_set(mon_ops, mon_op_print_brief),
                      is_set(mon_ops, mon_op_group_by_node));
     }
 
     /* Print resources section, if needed */
     if (is_set(show, mon_show_resources)) {
         print_resources(out, data_set, print_opts, mon_ops,
                         is_set(mon_ops, mon_op_print_brief), TRUE, unames, FALSE);
     }
 
     /* print Node Attributes section if requested */
     if (is_set(show, mon_show_attributes)) {
         print_node_attributes(out, data_set, mon_ops, unames, FALSE);
     }
 
     /* If requested, print resource operations (which includes failcounts)
      * or just failcounts
      */
     if (is_set(show, mon_show_operations) || is_set(show, mon_show_failcounts)) {
         print_node_summary(out, data_set, is_set(show, mon_show_operations),
                            mon_ops, unames, FALSE);
     }
 
     /* If there were any failed actions, print them */
     if (is_set(show, mon_show_failures) && xml_has_children(data_set->failed)) {
         print_failed_actions(out, data_set, unames, FALSE);
     }
 
     /* Print failed stonith actions */
     if (is_set(show, mon_show_fence_failed) && is_set(mon_ops, mon_op_fence_history)) {
         for (stonith_history_t *hp = stonith_history; hp; hp = hp->next) {
             if (hp->state == st_failed) {
                 out->message(out, "failed-fencing-history", hp, unames,
                              is_set(mon_ops, mon_op_fence_full_history), FALSE);
                 break;
             }
         }
     }
 
     /* Print stonith history */
     if (is_set(mon_ops, mon_op_fence_history)) {
         if (is_set(show, mon_show_fence_worked)) {
             for (stonith_history_t *hp = stonith_history; hp; hp = hp->next) {
                 if (hp->state != st_failed) {
                     out->message(out, "fencing-history", hp, unames,
                                  is_set(mon_ops, mon_op_fence_full_history),
                                  FALSE);
                     break;
                 }
             }
         } else if (is_set(show, mon_show_fence_pending)) {
             for (stonith_history_t *hp = stonith_history; hp; hp = hp->next) {
                 if (hp->state != st_failed && hp->state != st_done) {
                     out->message(out, "pending-fencing-actions", hp, unames,
                                  is_set(mon_ops, mon_op_fence_full_history),
                                  FALSE);
                     break;
                 }
             }
         }
     }
 
     /* Print tickets if requested */
     if (is_set(show, mon_show_tickets)) {
         print_cluster_tickets(out, data_set, FALSE);
     }
 
     /* Print negative location constraints if requested */
     if (is_set(show, mon_show_bans)) {
         print_neg_locations(out, data_set, mon_ops, prefix, FALSE);
     }
 
     g_list_free_full(unames, free);
     return 0;
 }