diff --git a/cts/cli/regression.crm_mon.exp b/cts/cli/regression.crm_mon.exp
index 070a3205a3..a45037aeea 100644
--- a/cts/cli/regression.crm_mon.exp
+++ b/cts/cli/regression.crm_mon.exp
@@ -1,3966 +1,3966 @@
 =#=#=#= Begin test: Basic text output =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
 
 Active Resources:
   * Clone Set: ping-clone [ping]:
     * Started: [ cluster01 cluster02 ]
   * Fencing	(stonith:fence_xvm):	 Started cluster01
   * dummy	(ocf:pacemaker:Dummy):	 Started cluster02
   * Container bundle set: httpd-bundle [pcmk:http]:
     * httpd-bundle-0 (192.168.122.131)	(ocf:heartbeat:apache):	 Started cluster01
     * httpd-bundle-1 (192.168.122.132)	(ocf:heartbeat:apache):	 Started cluster02
     * httpd-bundle-2 (192.168.122.133)	(ocf:heartbeat:apache):	 Stopped
   * Resource Group: exim-group:
     * Public-IP	(ocf:heartbeat:IPaddr):	 Started cluster02
     * Email	(lsb:exim):	 Started cluster02
   * Clone Set: mysql-clone-group [mysql-group]:
     * Started: [ cluster01 cluster02 ]
   * Clone Set: promotable-clone [promotable-rsc] (promotable):
     * Promoted: [ cluster02 ]
     * Unpromoted: [ cluster01 ]
 =#=#=#= End test: Basic text output - OK (0) =#=#=#=
 * Passed: crm_mon        - Basic text output
 =#=#=#= Begin test: XML output =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" with_quorum="true"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <clone id="ping-clone" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
     </clone>
     <resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
       <node name="cluster01" id="1" cached="true"/>
     </resource>
     <resource id="dummy" resource_agent="ocf:pacemaker:Dummy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
       <node name="cluster02" id="2" cached="true"/>
     </resource>
     <clone id="inactive-clone" multi_state="false" unique="false" managed="true" disabled="true" failed="false" failure_ignored="false" target_role="stopped">
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
     <group id="inactive-group" number_resources="2" managed="true" disabled="true">
       <resource id="inactive-dummy-1" resource_agent="ocf:pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dummy-2" resource_agent="ocf:pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </group>
     <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
       <replica id="0">
         <resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="httpd-bundle-0" id="httpd-bundle-0" cached="true"/>
         </resource>
         <resource id="httpd-bundle-docker-0" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
         <resource id="httpd-bundle-0" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </replica>
       <replica id="1">
         <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="httpd-bundle-1" id="httpd-bundle-1" cached="true"/>
         </resource>
         <resource id="httpd-bundle-docker-1" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
         <resource id="httpd-bundle-1" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </replica>
       <replica id="2">
         <resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf:heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-docker-2" resource_agent="ocf:heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-2" resource_agent="ocf:pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </replica>
     </bundle>
     <group id="exim-group" number_resources="2" managed="true" disabled="false">
       <resource id="Public-IP" resource_agent="ocf:heartbeat:IPaddr" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="Email" resource_agent="lsb:exim" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
     </group>
     <clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <group id="mysql-group:0" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:1" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:2" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:3" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:4" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
     </clone>
     <clone id="promotable-clone" multi_state="true" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Promoted" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Unpromoted" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="11" task="start" rc="0" rc_text="ok" exec-time="2044ms" queue-time="0ms"/>
         <operation_history call="12" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2031ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="dummy" orphan="false" migration-threshold="1000000">
         <operation_history call="18" task="start" rc="0" rc_text="ok" exec-time="6020ms" queue-time="0ms"/>
         <operation_history call="19" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="6015ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Public-IP" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Email" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="promotable-rsc" orphan="false" migration-threshold="1000000">
         <operation_history call="4" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="5" task="cancel" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="6" task="promote" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="7" task="monitor" rc="8" rc_text="promoted" interval="5000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-ip-192.168.122.132" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="17" task="start" rc="0" rc_text="ok" exec-time="2038ms" queue-time="0ms"/>
         <operation_history call="18" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2034ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Fencing" orphan="false" migration-threshold="1000000">
         <operation_history call="15" task="start" rc="0" rc_text="ok" exec-time="36ms" queue-time="0ms"/>
         <operation_history call="20" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="dummy" orphan="false" migration-threshold="1000000">
         <operation_history call="16" task="stop" rc="0" rc_text="ok" exec-time="6048ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="promotable-rsc" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="4" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-ip-192.168.122.131" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-0">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-1">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <bans>
     <ban id="not-on-cluster1" resource="dummy" node="cluster01" weight="-1000000" promoted-only="false" master_only="false"/>
   </bans>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output
 =#=#=#= Begin test: Basic text output without node section =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Active Resources:
   * Clone Set: ping-clone [ping]:
     * Started: [ cluster01 cluster02 ]
   * Fencing	(stonith:fence_xvm):	 Started cluster01
   * dummy	(ocf:pacemaker:Dummy):	 Started cluster02
   * Container bundle set: httpd-bundle [pcmk:http]:
     * httpd-bundle-0 (192.168.122.131)	(ocf:heartbeat:apache):	 Started cluster01
     * httpd-bundle-1 (192.168.122.132)	(ocf:heartbeat:apache):	 Started cluster02
     * httpd-bundle-2 (192.168.122.133)	(ocf:heartbeat:apache):	 Stopped
   * Resource Group: exim-group:
     * Public-IP	(ocf:heartbeat:IPaddr):	 Started cluster02
     * Email	(lsb:exim):	 Started cluster02
   * Clone Set: mysql-clone-group [mysql-group]:
     * Started: [ cluster01 cluster02 ]
   * Clone Set: promotable-clone [promotable-rsc] (promotable):
     * Promoted: [ cluster02 ]
     * Unpromoted: [ cluster01 ]
 =#=#=#= End test: Basic text output without node section - OK (0) =#=#=#=
 * Passed: crm_mon        - Basic text output without node section
 =#=#=#= Begin test: XML output without the node section =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --exclude=nodes">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" with_quorum="true"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <resources>
     <clone id="ping-clone" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
     </clone>
     <resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
       <node name="cluster01" id="1" cached="true"/>
     </resource>
     <resource id="dummy" resource_agent="ocf:pacemaker:Dummy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
       <node name="cluster02" id="2" cached="true"/>
     </resource>
     <clone id="inactive-clone" multi_state="false" unique="false" managed="true" disabled="true" failed="false" failure_ignored="false" target_role="stopped">
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
     <group id="inactive-group" number_resources="2" managed="true" disabled="true">
       <resource id="inactive-dummy-1" resource_agent="ocf:pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dummy-2" resource_agent="ocf:pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </group>
     <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
       <replica id="0">
         <resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="httpd-bundle-0" id="httpd-bundle-0" cached="true"/>
         </resource>
         <resource id="httpd-bundle-docker-0" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
         <resource id="httpd-bundle-0" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </replica>
       <replica id="1">
         <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="httpd-bundle-1" id="httpd-bundle-1" cached="true"/>
         </resource>
         <resource id="httpd-bundle-docker-1" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
         <resource id="httpd-bundle-1" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </replica>
       <replica id="2">
         <resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf:heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-docker-2" resource_agent="ocf:heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-2" resource_agent="ocf:pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </replica>
     </bundle>
     <group id="exim-group" number_resources="2" managed="true" disabled="false">
       <resource id="Public-IP" resource_agent="ocf:heartbeat:IPaddr" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="Email" resource_agent="lsb:exim" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
     </group>
     <clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <group id="mysql-group:0" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:1" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:2" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:3" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:4" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
     </clone>
     <clone id="promotable-clone" multi_state="true" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Promoted" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Unpromoted" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="11" task="start" rc="0" rc_text="ok" exec-time="2044ms" queue-time="0ms"/>
         <operation_history call="12" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2031ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="dummy" orphan="false" migration-threshold="1000000">
         <operation_history call="18" task="start" rc="0" rc_text="ok" exec-time="6020ms" queue-time="0ms"/>
         <operation_history call="19" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="6015ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Public-IP" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Email" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="promotable-rsc" orphan="false" migration-threshold="1000000">
         <operation_history call="4" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="5" task="cancel" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="6" task="promote" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="7" task="monitor" rc="8" rc_text="promoted" interval="5000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-ip-192.168.122.132" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="17" task="start" rc="0" rc_text="ok" exec-time="2038ms" queue-time="0ms"/>
         <operation_history call="18" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2034ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Fencing" orphan="false" migration-threshold="1000000">
         <operation_history call="15" task="start" rc="0" rc_text="ok" exec-time="36ms" queue-time="0ms"/>
         <operation_history call="20" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="dummy" orphan="false" migration-threshold="1000000">
         <operation_history call="16" task="stop" rc="0" rc_text="ok" exec-time="6048ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="promotable-rsc" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="4" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-ip-192.168.122.131" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-0">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-1">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <bans>
     <ban id="not-on-cluster1" resource="dummy" node="cluster01" weight="-1000000" promoted-only="false" master_only="false"/>
   </bans>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output without the node section - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output without the node section
 =#=#=#= Begin test: Text output with only the node section =#=#=#=
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
 =#=#=#= End test: Text output with only the node section - OK (0) =#=#=#=
 * Passed: crm_mon        - Text output with only the node section
 =#=#=#= Begin test: Complete text output =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
 
 Active Resources:
   * Clone Set: ping-clone [ping]:
     * Started: [ cluster01 cluster02 ]
   * Fencing	(stonith:fence_xvm):	 Started cluster01
   * dummy	(ocf:pacemaker:Dummy):	 Started cluster02
   * Container bundle set: httpd-bundle [pcmk:http]:
     * httpd-bundle-0 (192.168.122.131)	(ocf:heartbeat:apache):	 Started cluster01
     * httpd-bundle-1 (192.168.122.132)	(ocf:heartbeat:apache):	 Started cluster02
     * httpd-bundle-2 (192.168.122.133)	(ocf:heartbeat:apache):	 Stopped
   * Resource Group: exim-group:
     * Public-IP	(ocf:heartbeat:IPaddr):	 Started cluster02
     * Email	(lsb:exim):	 Started cluster02
   * Clone Set: mysql-clone-group [mysql-group]:
     * Started: [ cluster01 cluster02 ]
   * Clone Set: promotable-clone [promotable-rsc] (promotable):
     * Promoted: [ cluster02 ]
     * Unpromoted: [ cluster01 ]
 
 Node Attributes:
   * Node: cluster01:
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02:
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02:
     * ping: migration-threshold=1000000:
       * (11) start
       * (12) monitor: interval="10000ms"
     * dummy: migration-threshold=1000000:
       * (18) start
       * (19) monitor: interval="60000ms"
     * Public-IP: migration-threshold=1000000:
       * (2) start
     * Email: migration-threshold=1000000:
       * (2) start
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
     * promotable-rsc: migration-threshold=1000000:
       * (4) monitor: interval="10000ms"
       * (5) cancel: interval="10000ms"
       * (6) promote
       * (7) monitor: interval="5000ms"
     * httpd-bundle-ip-192.168.122.132: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-docker-1: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-1: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="30000ms"
   * Node: cluster01:
     * ping: migration-threshold=1000000:
       * (17) start
       * (18) monitor: interval="10000ms"
     * Fencing: migration-threshold=1000000:
       * (15) start
       * (20) monitor: interval="60000ms"
     * dummy: migration-threshold=1000000:
       * (16) stop
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
     * promotable-rsc: migration-threshold=1000000:
       * (2) start
       * (4) monitor: interval="10000ms"
     * httpd-bundle-ip-192.168.122.131: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-docker-0: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-0: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="30000ms"
   * Node: httpd-bundle-0@cluster01:
     * httpd: migration-threshold=1000000:
       * (1) start
   * Node: httpd-bundle-1@cluster02:
     * httpd: migration-threshold=1000000:
       * (1) start
 
 Negative Location Constraints:
   * not-on-cluster1	prevents dummy from running on cluster01
 =#=#=#= End test: Complete text output - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete text output
 =#=#=#= Begin test: Complete text output with detail =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (2) (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 (1) cluster02 (2) ]
   * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
 
 Active Resources:
   * Clone Set: ping-clone [ping]:
     * ping	(ocf:pacemaker:ping):	 Started cluster02
     * ping	(ocf:pacemaker:ping):	 Started cluster01
   * Fencing	(stonith:fence_xvm):	 Started cluster01
   * dummy	(ocf:pacemaker:Dummy):	 Started cluster02
   * Container bundle set: httpd-bundle [pcmk:http]:
     * Replica[0]
       * httpd-bundle-ip-192.168.122.131	(ocf:heartbeat:IPaddr2):	 Started cluster01
       * httpd	(ocf:heartbeat:apache):	 Started httpd-bundle-0
       * httpd-bundle-docker-0	(ocf:heartbeat:docker):	 Started cluster01
       * httpd-bundle-0	(ocf:pacemaker:remote):	 Started cluster01
     * Replica[1]
       * httpd-bundle-ip-192.168.122.132	(ocf:heartbeat:IPaddr2):	 Started cluster02
       * httpd	(ocf:heartbeat:apache):	 Started httpd-bundle-1
       * httpd-bundle-docker-1	(ocf:heartbeat:docker):	 Started cluster02
       * httpd-bundle-1	(ocf:pacemaker:remote):	 Started cluster02
     * Replica[2]
       * httpd-bundle-ip-192.168.122.133	(ocf:heartbeat:IPaddr2):	 Stopped
       * httpd	(ocf:heartbeat:apache):	 Stopped
       * httpd-bundle-docker-2	(ocf:heartbeat:docker):	 Stopped
       * httpd-bundle-2	(ocf:pacemaker:remote):	 Stopped
   * Resource Group: exim-group:
     * Public-IP	(ocf:heartbeat:IPaddr):	 Started cluster02
     * Email	(lsb:exim):	 Started cluster02
   * Clone Set: mysql-clone-group [mysql-group]:
     * Resource Group: mysql-group:0:
       * mysql-proxy	(lsb:mysql-proxy):	 Started cluster02
     * Resource Group: mysql-group:1:
       * mysql-proxy	(lsb:mysql-proxy):	 Started cluster01
   * Clone Set: promotable-clone [promotable-rsc] (promotable):
     * promotable-rsc	(ocf:pacemaker:Stateful):	 Promoted cluster02
     * promotable-rsc	(ocf:pacemaker:Stateful):	 Unpromoted cluster01
     * promotable-rsc	(ocf:pacemaker:Stateful):	 Stopped
     * promotable-rsc	(ocf:pacemaker:Stateful):	 Stopped
     * promotable-rsc	(ocf:pacemaker:Stateful):	 Stopped
 
 Node Attributes:
   * Node: cluster01 (1):
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02 (2):
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02 (2):
     * ping: migration-threshold=1000000:
       * (11) start
       * (12) monitor: interval="10000ms"
     * dummy: migration-threshold=1000000:
       * (18) start
       * (19) monitor: interval="60000ms"
     * Public-IP: migration-threshold=1000000:
       * (2) start
     * Email: migration-threshold=1000000:
       * (2) start
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
     * promotable-rsc: migration-threshold=1000000:
       * (4) monitor: interval="10000ms"
       * (5) cancel: interval="10000ms"
       * (6) promote
       * (7) monitor: interval="5000ms"
     * httpd-bundle-ip-192.168.122.132: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-docker-1: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-1: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="30000ms"
   * Node: cluster01 (1):
     * ping: migration-threshold=1000000:
       * (17) start
       * (18) monitor: interval="10000ms"
     * Fencing: migration-threshold=1000000:
       * (15) start
       * (20) monitor: interval="60000ms"
     * dummy: migration-threshold=1000000:
       * (16) stop
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
     * promotable-rsc: migration-threshold=1000000:
       * (2) start
       * (4) monitor: interval="10000ms"
     * httpd-bundle-ip-192.168.122.131: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-docker-0: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-0: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="30000ms"
   * Node: httpd-bundle-0@cluster01:
     * httpd: migration-threshold=1000000:
       * (1) start
   * Node: httpd-bundle-1@cluster02:
     * httpd: migration-threshold=1000000:
       * (1) start
 
 Negative Location Constraints:
   * not-on-cluster1	prevents dummy from running on cluster01 (1)
 =#=#=#= End test: Complete text output with detail - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete text output with detail
 =#=#=#= Begin test: Complete brief text output =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
 
 Active Resources:
   * 1	(ocf:pacemaker:Dummy):	Active cluster02
   * 1	(stonith:fence_xvm):	Active cluster01
   * Clone Set: ping-clone [ping]:
     * Started: [ cluster01 cluster02 ]
   * Container bundle set: httpd-bundle [pcmk:http]:
     * httpd-bundle-0 (192.168.122.131)	(ocf:heartbeat:apache):	 Started cluster01
     * httpd-bundle-1 (192.168.122.132)	(ocf:heartbeat:apache):	 Started cluster02
     * httpd-bundle-2 (192.168.122.133)	(ocf:heartbeat:apache):	 Stopped
   * Resource Group: exim-group:
     * 1/1	(lsb:exim):	Active cluster02
     * 1/1	(ocf:heartbeat:IPaddr):	Active cluster02
   * Clone Set: mysql-clone-group [mysql-group]:
     * Started: [ cluster01 cluster02 ]
   * Clone Set: promotable-clone [promotable-rsc] (promotable):
     * Promoted: [ cluster02 ]
     * Unpromoted: [ cluster01 ]
 
 Node Attributes:
   * Node: cluster01:
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02:
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02:
     * ping: migration-threshold=1000000:
       * (11) start
       * (12) monitor: interval="10000ms"
     * dummy: migration-threshold=1000000:
       * (18) start
       * (19) monitor: interval="60000ms"
     * Public-IP: migration-threshold=1000000:
       * (2) start
     * Email: migration-threshold=1000000:
       * (2) start
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
     * promotable-rsc: migration-threshold=1000000:
       * (4) monitor: interval="10000ms"
       * (5) cancel: interval="10000ms"
       * (6) promote
       * (7) monitor: interval="5000ms"
     * httpd-bundle-ip-192.168.122.132: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-docker-1: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-1: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="30000ms"
   * Node: cluster01:
     * ping: migration-threshold=1000000:
       * (17) start
       * (18) monitor: interval="10000ms"
     * Fencing: migration-threshold=1000000:
       * (15) start
       * (20) monitor: interval="60000ms"
     * dummy: migration-threshold=1000000:
       * (16) stop
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
     * promotable-rsc: migration-threshold=1000000:
       * (2) start
       * (4) monitor: interval="10000ms"
     * httpd-bundle-ip-192.168.122.131: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-docker-0: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-0: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="30000ms"
   * Node: httpd-bundle-0@cluster01:
     * httpd: migration-threshold=1000000:
       * (1) start
   * Node: httpd-bundle-1@cluster02:
     * httpd: migration-threshold=1000000:
       * (1) start
 
 Negative Location Constraints:
   * not-on-cluster1	prevents dummy from running on cluster01
 =#=#=#= End test: Complete brief text output - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete brief text output
 =#=#=#= Begin test: Complete text output grouped by node =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Node cluster01: online:
     * Resources:
       * ping	(ocf:pacemaker:ping):	 Started
       * Fencing	(stonith:fence_xvm):	 Started
       * mysql-proxy	(lsb:mysql-proxy):	 Started
       * promotable-rsc	(ocf:pacemaker:Stateful):	 Unpromoted
       * httpd-bundle-ip-192.168.122.131	(ocf:heartbeat:IPaddr2):	 Started
       * httpd-bundle-docker-0	(ocf:heartbeat:docker):	 Started
   * Node cluster02: online:
     * Resources:
       * ping	(ocf:pacemaker:ping):	 Started
       * dummy	(ocf:pacemaker:Dummy):	 Started
       * Public-IP	(ocf:heartbeat:IPaddr):	 Started
       * Email	(lsb:exim):	 Started
       * mysql-proxy	(lsb:mysql-proxy):	 Started
       * promotable-rsc	(ocf:pacemaker:Stateful):	 Promoted
       * httpd-bundle-ip-192.168.122.132	(ocf:heartbeat:IPaddr2):	 Started
       * httpd-bundle-docker-1	(ocf:heartbeat:docker):	 Started
   * GuestNode httpd-bundle-0@cluster01: online:
     * Resources:
       * httpd	(ocf:heartbeat:apache):	 Started
   * GuestNode httpd-bundle-1@cluster02: online:
     * Resources:
       * httpd	(ocf:heartbeat:apache):	 Started
   * GuestNode httpd-bundle-2@: OFFLINE:
     * Resources:
 
 Node Attributes:
   * Node: cluster01:
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02:
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02:
     * ping: migration-threshold=1000000:
       * (11) start
       * (12) monitor: interval="10000ms"
     * dummy: migration-threshold=1000000:
       * (18) start
       * (19) monitor: interval="60000ms"
     * Public-IP: migration-threshold=1000000:
       * (2) start
     * Email: migration-threshold=1000000:
       * (2) start
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
     * promotable-rsc: migration-threshold=1000000:
       * (4) monitor: interval="10000ms"
       * (5) cancel: interval="10000ms"
       * (6) promote
       * (7) monitor: interval="5000ms"
     * httpd-bundle-ip-192.168.122.132: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-docker-1: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-1: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="30000ms"
   * Node: cluster01:
     * ping: migration-threshold=1000000:
       * (17) start
       * (18) monitor: interval="10000ms"
     * Fencing: migration-threshold=1000000:
       * (15) start
       * (20) monitor: interval="60000ms"
     * dummy: migration-threshold=1000000:
       * (16) stop
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
     * promotable-rsc: migration-threshold=1000000:
       * (2) start
       * (4) monitor: interval="10000ms"
     * httpd-bundle-ip-192.168.122.131: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-docker-0: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-0: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="30000ms"
   * Node: httpd-bundle-0@cluster01:
     * httpd: migration-threshold=1000000:
       * (1) start
   * Node: httpd-bundle-1@cluster02:
     * httpd: migration-threshold=1000000:
       * (1) start
 
 Negative Location Constraints:
   * not-on-cluster1	prevents dummy from running on cluster01
 =#=#=#= End test: Complete text output grouped by node - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete text output grouped by node
 =#=#=#= Begin test: Complete brief text output grouped by node =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Node cluster01: online:
     * Resources:
       * 1	(lsb:mysql-proxy):	Active 
       * 1	(ocf:heartbeat:IPaddr2):	Active 
       * 1	(ocf:heartbeat:docker):	Active 
       * 1	(ocf:pacemaker:Stateful):	Active 
       * 1	(ocf:pacemaker:ping):	Active 
       * 1	(ocf:pacemaker:remote):	Active 
       * 1	(stonith:fence_xvm):	Active 
   * Node cluster02: online:
     * Resources:
       * 1	(lsb:exim):	Active 
       * 1	(lsb:mysql-proxy):	Active 
       * 1	(ocf:heartbeat:IPaddr):	Active 
       * 1	(ocf:heartbeat:IPaddr2):	Active 
       * 1	(ocf:heartbeat:docker):	Active 
       * 1	(ocf:pacemaker:Dummy):	Active 
       * 1	(ocf:pacemaker:Stateful):	Active 
       * 1	(ocf:pacemaker:ping):	Active 
       * 1	(ocf:pacemaker:remote):	Active 
   * GuestNode httpd-bundle-0@cluster01: online:
     * Resources:
       * 1	(ocf:heartbeat:apache):	Active 
   * GuestNode httpd-bundle-1@cluster02: online:
     * Resources:
       * 1	(ocf:heartbeat:apache):	Active 
 
 Node Attributes:
   * Node: cluster01:
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02:
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02:
     * ping: migration-threshold=1000000:
       * (11) start
       * (12) monitor: interval="10000ms"
     * dummy: migration-threshold=1000000:
       * (18) start
       * (19) monitor: interval="60000ms"
     * Public-IP: migration-threshold=1000000:
       * (2) start
     * Email: migration-threshold=1000000:
       * (2) start
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
     * promotable-rsc: migration-threshold=1000000:
       * (4) monitor: interval="10000ms"
       * (5) cancel: interval="10000ms"
       * (6) promote
       * (7) monitor: interval="5000ms"
     * httpd-bundle-ip-192.168.122.132: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-docker-1: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-1: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="30000ms"
   * Node: cluster01:
     * ping: migration-threshold=1000000:
       * (17) start
       * (18) monitor: interval="10000ms"
     * Fencing: migration-threshold=1000000:
       * (15) start
       * (20) monitor: interval="60000ms"
     * dummy: migration-threshold=1000000:
       * (16) stop
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
     * promotable-rsc: migration-threshold=1000000:
       * (2) start
       * (4) monitor: interval="10000ms"
     * httpd-bundle-ip-192.168.122.131: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-docker-0: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-0: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="30000ms"
   * Node: httpd-bundle-0@cluster01:
     * httpd: migration-threshold=1000000:
       * (1) start
   * Node: httpd-bundle-1@cluster02:
     * httpd: migration-threshold=1000000:
       * (1) start
 
 Negative Location Constraints:
   * not-on-cluster1	prevents dummy from running on cluster01
 =#=#=#= End test: Complete brief text output grouped by node - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete brief text output grouped by node
 =#=#=#= Begin test: XML output grouped by node =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon -1 --output-as=xml --group-by-node">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" with_quorum="true"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member">
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
       <resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
       <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Unpromoted" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
       <resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
       <resource id="httpd-bundle-docker-0" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
       <resource id="httpd-bundle-0" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
     </node>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member">
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="dummy" resource_agent="ocf:pacemaker:Dummy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="Public-IP" resource_agent="ocf:heartbeat:IPaddr" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="Email" resource_agent="lsb:exim" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Promoted" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="httpd-bundle-docker-1" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="httpd-bundle-1" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
     </node>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0">
       <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="httpd-bundle-0" id="httpd-bundle-0" cached="true"/>
       </resource>
     </node>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1">
       <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="httpd-bundle-1" id="httpd-bundle-1" cached="true"/>
       </resource>
     </node>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <clone id="inactive-clone" multi_state="false" unique="false" managed="true" disabled="true" failed="false" failure_ignored="false" target_role="stopped">
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
     <group id="inactive-group" number_resources="2" managed="true" disabled="true">
       <resource id="inactive-dummy-1" resource_agent="ocf:pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dummy-2" resource_agent="ocf:pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </group>
     <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
       <replica id="0">
         <resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="httpd-bundle-0" id="httpd-bundle-0" cached="true"/>
         </resource>
         <resource id="httpd-bundle-docker-0" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
         <resource id="httpd-bundle-0" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </replica>
       <replica id="1">
         <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="httpd-bundle-1" id="httpd-bundle-1" cached="true"/>
         </resource>
         <resource id="httpd-bundle-docker-1" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
         <resource id="httpd-bundle-1" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </replica>
       <replica id="2">
         <resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf:heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-docker-2" resource_agent="ocf:heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-2" resource_agent="ocf:pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </replica>
     </bundle>
     <clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <group id="mysql-group:0" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:1" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:2" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:3" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:4" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
     </clone>
     <clone id="promotable-clone" multi_state="true" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Promoted" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Unpromoted" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="11" task="start" rc="0" rc_text="ok" exec-time="2044ms" queue-time="0ms"/>
         <operation_history call="12" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2031ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="dummy" orphan="false" migration-threshold="1000000">
         <operation_history call="18" task="start" rc="0" rc_text="ok" exec-time="6020ms" queue-time="0ms"/>
         <operation_history call="19" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="6015ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Public-IP" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Email" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="promotable-rsc" orphan="false" migration-threshold="1000000">
         <operation_history call="4" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="5" task="cancel" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="6" task="promote" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="7" task="monitor" rc="8" rc_text="promoted" interval="5000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-ip-192.168.122.132" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="17" task="start" rc="0" rc_text="ok" exec-time="2038ms" queue-time="0ms"/>
         <operation_history call="18" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2034ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Fencing" orphan="false" migration-threshold="1000000">
         <operation_history call="15" task="start" rc="0" rc_text="ok" exec-time="36ms" queue-time="0ms"/>
         <operation_history call="20" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="dummy" orphan="false" migration-threshold="1000000">
         <operation_history call="16" task="stop" rc="0" rc_text="ok" exec-time="6048ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="promotable-rsc" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="4" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-ip-192.168.122.131" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-0">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-1">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <bans>
     <ban id="not-on-cluster1" resource="dummy" node="cluster01" weight="-1000000" promoted-only="false" master_only="false"/>
   </bans>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output grouped by node - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output grouped by node
 =#=#=#= Begin test: Complete text output filtered by node =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 ]
 
 Active Resources:
   * Clone Set: ping-clone [ping]:
     * Started: [ cluster01 ]
   * Fencing	(stonith:fence_xvm):	 Started cluster01
   * Container bundle set: httpd-bundle [pcmk:http]:
     * httpd-bundle-0 (192.168.122.131)	(ocf:heartbeat:apache):	 Started cluster01
     * httpd-bundle-2 (192.168.122.133)	(ocf:heartbeat:apache):	 Stopped
   * Clone Set: mysql-clone-group [mysql-group]:
     * Started: [ cluster01 ]
   * Clone Set: promotable-clone [promotable-rsc] (promotable):
     * Unpromoted: [ cluster01 ]
 
 Node Attributes:
   * Node: cluster01:
     * location                        	: office    
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster01:
     * ping: migration-threshold=1000000:
       * (17) start
       * (18) monitor: interval="10000ms"
     * Fencing: migration-threshold=1000000:
       * (15) start
       * (20) monitor: interval="60000ms"
     * dummy: migration-threshold=1000000:
       * (16) stop
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
     * promotable-rsc: migration-threshold=1000000:
       * (2) start
       * (4) monitor: interval="10000ms"
     * httpd-bundle-ip-192.168.122.131: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-docker-0: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-0: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="30000ms"
 
 Negative Location Constraints:
   * not-on-cluster1	prevents dummy from running on cluster01
 =#=#=#= End test: Complete text output filtered by node - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete text output filtered by node
 =#=#=#= Begin test: XML output filtered by node =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as xml --include=all --node=cluster01">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" with_quorum="true"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
   </nodes>
   <resources>
     <clone id="ping-clone" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
     </clone>
     <resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
       <node name="cluster01" id="1" cached="true"/>
     </resource>
     <clone id="inactive-clone" multi_state="false" unique="false" managed="true" disabled="true" failed="false" failure_ignored="false" target_role="stopped">
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
     <group id="inactive-group" number_resources="2" managed="true" disabled="true">
       <resource id="inactive-dummy-1" resource_agent="ocf:pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dummy-2" resource_agent="ocf:pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </group>
     <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
       <replica id="0">
         <resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="httpd-bundle-0" id="httpd-bundle-0" cached="true"/>
         </resource>
         <resource id="httpd-bundle-docker-0" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
         <resource id="httpd-bundle-0" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </replica>
       <replica id="2">
         <resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf:heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-docker-2" resource_agent="ocf:heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-2" resource_agent="ocf:pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </replica>
     </bundle>
     <clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <group id="mysql-group:1" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:2" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:3" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:4" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
     </clone>
     <clone id="promotable-clone" multi_state="true" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Unpromoted" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster01">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="17" task="start" rc="0" rc_text="ok" exec-time="2038ms" queue-time="0ms"/>
         <operation_history call="18" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2034ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Fencing" orphan="false" migration-threshold="1000000">
         <operation_history call="15" task="start" rc="0" rc_text="ok" exec-time="36ms" queue-time="0ms"/>
         <operation_history call="20" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="dummy" orphan="false" migration-threshold="1000000">
         <operation_history call="16" task="stop" rc="0" rc_text="ok" exec-time="6048ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="promotable-rsc" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="4" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-ip-192.168.122.131" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <bans>
     <ban id="not-on-cluster1" resource="dummy" node="cluster01" weight="-1000000" promoted-only="false" master_only="false"/>
   </bans>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output filtered by node - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output filtered by node
 =#=#=#= Begin test: Complete text output filtered by tag =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster02 ]
 
 Active Resources:
   * Clone Set: ping-clone [ping]:
     * Started: [ cluster02 ]
   * dummy	(ocf:pacemaker:Dummy):	 Started cluster02
   * Container bundle set: httpd-bundle [pcmk:http]:
     * httpd-bundle-1 (192.168.122.132)	(ocf:heartbeat:apache):	 Started cluster02
     * httpd-bundle-2 (192.168.122.133)	(ocf:heartbeat:apache):	 Stopped
   * Resource Group: exim-group:
     * Public-IP	(ocf:heartbeat:IPaddr):	 Started cluster02
     * Email	(lsb:exim):	 Started cluster02
   * Clone Set: mysql-clone-group [mysql-group]:
     * Started: [ cluster02 ]
   * Clone Set: promotable-clone [promotable-rsc] (promotable):
     * Promoted: [ cluster02 ]
 
 Node Attributes:
   * Node: cluster02:
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02:
     * ping: migration-threshold=1000000:
       * (11) start
       * (12) monitor: interval="10000ms"
     * dummy: migration-threshold=1000000:
       * (18) start
       * (19) monitor: interval="60000ms"
     * Public-IP: migration-threshold=1000000:
       * (2) start
     * Email: migration-threshold=1000000:
       * (2) start
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
     * promotable-rsc: migration-threshold=1000000:
       * (4) monitor: interval="10000ms"
       * (5) cancel: interval="10000ms"
       * (6) promote
       * (7) monitor: interval="5000ms"
     * httpd-bundle-ip-192.168.122.132: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-docker-1: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-1: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="30000ms"
 
 Negative Location Constraints:
   * not-on-cluster1	prevents dummy from running on cluster01
 =#=#=#= End test: Complete text output filtered by tag - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete text output filtered by tag
 =#=#=#= Begin test: XML output filtered by tag =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --include=all --node=even-nodes">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" with_quorum="true"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
   </nodes>
   <resources>
     <clone id="ping-clone" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
     </clone>
     <resource id="dummy" resource_agent="ocf:pacemaker:Dummy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
       <node name="cluster02" id="2" cached="true"/>
     </resource>
     <clone id="inactive-clone" multi_state="false" unique="false" managed="true" disabled="true" failed="false" failure_ignored="false" target_role="stopped">
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
     <group id="inactive-group" number_resources="2" managed="true" disabled="true">
       <resource id="inactive-dummy-1" resource_agent="ocf:pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dummy-2" resource_agent="ocf:pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </group>
     <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
       <replica id="1">
         <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="httpd-bundle-1" id="httpd-bundle-1" cached="true"/>
         </resource>
         <resource id="httpd-bundle-docker-1" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
         <resource id="httpd-bundle-1" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </replica>
       <replica id="2">
         <resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf:heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-docker-2" resource_agent="ocf:heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-2" resource_agent="ocf:pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </replica>
     </bundle>
     <group id="exim-group" number_resources="2" managed="true" disabled="false">
       <resource id="Public-IP" resource_agent="ocf:heartbeat:IPaddr" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="Email" resource_agent="lsb:exim" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
     </group>
     <clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <group id="mysql-group:0" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:2" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:3" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:4" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
     </clone>
     <clone id="promotable-clone" multi_state="true" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Promoted" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
   </resources>
   <node_attributes>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="11" task="start" rc="0" rc_text="ok" exec-time="2044ms" queue-time="0ms"/>
         <operation_history call="12" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2031ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="dummy" orphan="false" migration-threshold="1000000">
         <operation_history call="18" task="start" rc="0" rc_text="ok" exec-time="6020ms" queue-time="0ms"/>
         <operation_history call="19" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="6015ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Public-IP" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Email" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="promotable-rsc" orphan="false" migration-threshold="1000000">
         <operation_history call="4" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="5" task="cancel" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="6" task="promote" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="7" task="monitor" rc="8" rc_text="promoted" interval="5000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-ip-192.168.122.132" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <bans>
     <ban id="not-on-cluster1" resource="dummy" node="cluster01" weight="-1000000" promoted-only="false" master_only="false"/>
   </bans>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output filtered by tag - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output filtered by tag
 =#=#=#= Begin test: Complete text output filtered by resource tag =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
 
 Active Resources:
   * Fencing	(stonith:fence_xvm):	 Started cluster01
 
 Node Attributes:
   * Node: cluster01:
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02:
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster01:
     * Fencing: migration-threshold=1000000:
       * (15) start
       * (20) monitor: interval="60000ms"
 =#=#=#= End test: Complete text output filtered by resource tag - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete text output filtered by resource tag
 =#=#=#= Begin test: XML output filtered by resource tag =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --include=all --resource=fencing-rscs">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" with_quorum="true"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
       <node name="cluster01" id="1" cached="true"/>
     </resource>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster01">
       <resource_history id="Fencing" orphan="false" migration-threshold="1000000">
         <operation_history call="15" task="start" rc="0" rc_text="ok" exec-time="36ms" queue-time="0ms"/>
         <operation_history call="20" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output filtered by resource tag - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output filtered by resource tag
 =#=#=#= Begin test: Basic text output filtered by node that doesn't exist =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Active Resources:
   * No active resources
 =#=#=#= End test: Basic text output filtered by node that doesn't exist - OK (0) =#=#=#=
 * Passed: crm_mon        - Basic text output filtered by node that doesn't exist
 =#=#=#= Begin test: XML output filtered by node that doesn't exist =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --node=blah">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" with_quorum="true"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes/>
   <resources>
     <clone id="inactive-clone" multi_state="false" unique="false" managed="true" disabled="true" failed="false" failure_ignored="false" target_role="stopped">
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
     <group id="inactive-group" number_resources="2" managed="true" disabled="true">
       <resource id="inactive-dummy-1" resource_agent="ocf:pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dummy-2" resource_agent="ocf:pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </group>
   </resources>
   <bans>
     <ban id="not-on-cluster1" resource="dummy" node="cluster01" weight="-1000000" promoted-only="false" master_only="false"/>
   </bans>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output filtered by node that doesn't exist - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output filtered by node that doesn't exist
 =#=#=#= Begin test: Basic text output with inactive resources =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
 
 Full List of Resources:
   * Clone Set: ping-clone [ping]:
     * Started: [ cluster01 cluster02 ]
   * Fencing	(stonith:fence_xvm):	 Started cluster01
   * dummy	(ocf:pacemaker:Dummy):	 Started cluster02
   * Clone Set: inactive-clone [inactive-dhcpd] (disabled):
     * Stopped (disabled): [ cluster01 cluster02 ]
   * Resource Group: inactive-group (disabled):
     * inactive-dummy-1	(ocf:pacemaker:Dummy):	 Stopped (disabled)
     * inactive-dummy-2	(ocf:pacemaker:Dummy):	 Stopped (disabled)
   * Container bundle set: httpd-bundle [pcmk:http]:
     * httpd-bundle-0 (192.168.122.131)	(ocf:heartbeat:apache):	 Started cluster01
     * httpd-bundle-1 (192.168.122.132)	(ocf:heartbeat:apache):	 Started cluster02
     * httpd-bundle-2 (192.168.122.133)	(ocf:heartbeat:apache):	 Stopped
   * Resource Group: exim-group:
     * Public-IP	(ocf:heartbeat:IPaddr):	 Started cluster02
     * Email	(lsb:exim):	 Started cluster02
   * Clone Set: mysql-clone-group [mysql-group]:
     * Started: [ cluster01 cluster02 ]
   * Clone Set: promotable-clone [promotable-rsc] (promotable):
     * Promoted: [ cluster02 ]
     * Unpromoted: [ cluster01 ]
 =#=#=#= End test: Basic text output with inactive resources - OK (0) =#=#=#=
 * Passed: crm_mon        - Basic text output with inactive resources
 =#=#=#= Begin test: Basic text output with inactive resources, filtered by node =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster02 ]
 
 Full List of Resources:
   * Clone Set: ping-clone [ping]:
     * Started: [ cluster02 ]
   * dummy	(ocf:pacemaker:Dummy):	 Started cluster02
   * Clone Set: inactive-clone [inactive-dhcpd] (disabled):
     * Stopped (disabled): [ cluster02 ]
   * Resource Group: inactive-group (disabled):
     * inactive-dummy-1	(ocf:pacemaker:Dummy):	 Stopped (disabled)
     * inactive-dummy-2	(ocf:pacemaker:Dummy):	 Stopped (disabled)
   * Container bundle set: httpd-bundle [pcmk:http]:
     * httpd-bundle-1 (192.168.122.132)	(ocf:heartbeat:apache):	 Started cluster02
     * httpd-bundle-2 (192.168.122.133)	(ocf:heartbeat:apache):	 Stopped
   * Resource Group: exim-group:
     * Public-IP	(ocf:heartbeat:IPaddr):	 Started cluster02
     * Email	(lsb:exim):	 Started cluster02
   * Clone Set: mysql-clone-group [mysql-group]:
     * Started: [ cluster02 ]
   * Clone Set: promotable-clone [promotable-rsc] (promotable):
     * Promoted: [ cluster02 ]
 =#=#=#= End test: Basic text output with inactive resources, filtered by node - OK (0) =#=#=#=
 * Passed: crm_mon        - Basic text output with inactive resources, filtered by node
 =#=#=#= Begin test: Complete text output filtered by primitive resource =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
 
 Active Resources:
   * Fencing	(stonith:fence_xvm):	 Started cluster01
 
 Node Attributes:
   * Node: cluster01:
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02:
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster01:
     * Fencing: migration-threshold=1000000:
       * (15) start
       * (20) monitor: interval="60000ms"
 =#=#=#= End test: Complete text output filtered by primitive resource - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete text output filtered by primitive resource
 =#=#=#= Begin test: XML output filtered by primitive resource =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=Fencing">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" with_quorum="true"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
       <node name="cluster01" id="1" cached="true"/>
     </resource>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster01">
       <resource_history id="Fencing" orphan="false" migration-threshold="1000000">
         <operation_history call="15" task="start" rc="0" rc_text="ok" exec-time="36ms" queue-time="0ms"/>
         <operation_history call="20" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output filtered by primitive resource - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output filtered by primitive resource
 =#=#=#= Begin test: Complete text output filtered by group resource =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
 
 Active Resources:
   * Resource Group: exim-group:
     * Public-IP	(ocf:heartbeat:IPaddr):	 Started cluster02
     * Email	(lsb:exim):	 Started cluster02
 
 Node Attributes:
   * Node: cluster01:
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02:
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02:
     * Public-IP: migration-threshold=1000000:
       * (2) start
     * Email: migration-threshold=1000000:
       * (2) start
 =#=#=#= End test: Complete text output filtered by group resource - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete text output filtered by group resource
 =#=#=#= Begin test: XML output filtered by group resource =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=exim-group">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" with_quorum="true"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <group id="exim-group" number_resources="2" managed="true" disabled="false">
       <resource id="Public-IP" resource_agent="ocf:heartbeat:IPaddr" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="Email" resource_agent="lsb:exim" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
     </group>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="Public-IP" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Email" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output filtered by group resource - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output filtered by group resource
 =#=#=#= Begin test: Complete text output filtered by group resource member =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
 
 Active Resources:
   * Resource Group: exim-group:
     * Public-IP	(ocf:heartbeat:IPaddr):	 Started cluster02
 
 Node Attributes:
   * Node: cluster01:
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02:
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02:
     * Public-IP: migration-threshold=1000000:
       * (2) start
 =#=#=#= End test: Complete text output filtered by group resource member - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete text output filtered by group resource member
 =#=#=#= Begin test: XML output filtered by group resource member =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=Email">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" with_quorum="true"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <group id="exim-group" number_resources="2" managed="true" disabled="false">
       <resource id="Email" resource_agent="lsb:exim" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
     </group>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="Email" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output filtered by group resource member - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output filtered by group resource member
 =#=#=#= Begin test: Complete text output filtered by clone resource =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
 
 Active Resources:
   * Clone Set: ping-clone [ping]:
     * Started: [ cluster01 cluster02 ]
 
 Node Attributes:
   * Node: cluster01:
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02:
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02:
     * ping: migration-threshold=1000000:
       * (11) start
       * (12) monitor: interval="10000ms"
   * Node: cluster01:
     * ping: migration-threshold=1000000:
       * (17) start
       * (18) monitor: interval="10000ms"
 =#=#=#= End test: Complete text output filtered by clone resource - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete text output filtered by clone resource
 =#=#=#= Begin test: XML output filtered by clone resource =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=ping-clone">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" with_quorum="true"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <clone id="ping-clone" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
     </clone>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="11" task="start" rc="0" rc_text="ok" exec-time="2044ms" queue-time="0ms"/>
         <operation_history call="12" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2031ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="17" task="start" rc="0" rc_text="ok" exec-time="2038ms" queue-time="0ms"/>
         <operation_history call="18" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2034ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output filtered by clone resource - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output filtered by clone resource
 =#=#=#= Begin test: Complete text output filtered by clone resource instance =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
 
 Active Resources:
   * Clone Set: ping-clone [ping]:
     * Started: [ cluster01 cluster02 ]
 
 Node Attributes:
   * Node: cluster01:
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02:
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02:
     * ping: migration-threshold=1000000:
       * (11) start
       * (12) monitor: interval="10000ms"
   * Node: cluster01:
     * ping: migration-threshold=1000000:
       * (17) start
       * (18) monitor: interval="10000ms"
 =#=#=#= End test: Complete text output filtered by clone resource instance - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete text output filtered by clone resource instance
 =#=#=#= Begin test: XML output filtered by clone resource instance =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=ping">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" with_quorum="true"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <clone id="ping-clone" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
     </clone>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="11" task="start" rc="0" rc_text="ok" exec-time="2044ms" queue-time="0ms"/>
         <operation_history call="12" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2031ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="17" task="start" rc="0" rc_text="ok" exec-time="2038ms" queue-time="0ms"/>
         <operation_history call="18" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2034ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output filtered by clone resource instance - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output filtered by clone resource instance
 =#=#=#= Begin test: Complete text output filtered by exact clone resource instance =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (2) (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 (1) cluster02 (2) ]
   * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
 
 Active Resources:
   * Clone Set: ping-clone [ping]:
     * ping	(ocf:pacemaker:ping):	 Started cluster02
 
 Node Attributes:
   * Node: cluster01 (1):
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02 (2):
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02 (2):
     * ping: migration-threshold=1000000:
       * (11) start
       * (12) monitor: interval="10000ms"
   * Node: cluster01 (1):
     * ping: migration-threshold=1000000:
       * (17) start
       * (18) monitor: interval="10000ms"
 =#=#=#= End test: Complete text output filtered by exact clone resource instance - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete text output filtered by exact clone resource instance
 =#=#=#= Begin test: XML output filtered by exact clone resource instance =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=ping:1">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" with_quorum="true"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <clone id="ping-clone" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
     </clone>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="11" task="start" rc="0" rc_text="ok" exec-time="2044ms" queue-time="0ms"/>
         <operation_history call="12" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2031ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="17" task="start" rc="0" rc_text="ok" exec-time="2038ms" queue-time="0ms"/>
         <operation_history call="18" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2034ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output filtered by exact clone resource instance - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output filtered by exact clone resource instance
 =#=#=#= Begin test: Basic text output filtered by resource that doesn't exist =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
 
 Active Resources:
   * No active resources
 =#=#=#= End test: Basic text output filtered by resource that doesn't exist - OK (0) =#=#=#=
 * Passed: crm_mon        - Basic text output filtered by resource that doesn't exist
 =#=#=#= Begin test: XML output filtered by resource that doesn't exist =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=blah">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" with_quorum="true"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources/>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history/>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output filtered by resource that doesn't exist - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output filtered by resource that doesn't exist
 =#=#=#= Begin test: Basic text output with inactive resources, filtered by tag =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
 
 Full List of Resources:
   * Clone Set: inactive-clone [inactive-dhcpd] (disabled):
     * Stopped (disabled): [ cluster01 cluster02 ]
   * Resource Group: inactive-group (disabled):
     * inactive-dummy-1	(ocf:pacemaker:Dummy):	 Stopped (disabled)
     * inactive-dummy-2	(ocf:pacemaker:Dummy):	 Stopped (disabled)
 =#=#=#= End test: Basic text output with inactive resources, filtered by tag - OK (0) =#=#=#=
 * Passed: crm_mon        - Basic text output with inactive resources, filtered by tag
 =#=#=#= Begin test: Basic text output with inactive resources, filtered by bundle resource =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
 
 Full List of Resources:
   * Container bundle set: httpd-bundle [pcmk:http]:
     * httpd-bundle-0 (192.168.122.131)	(ocf:heartbeat:apache):	 Started cluster01
     * httpd-bundle-1 (192.168.122.132)	(ocf:heartbeat:apache):	 Started cluster02
     * httpd-bundle-2 (192.168.122.133)	(ocf:heartbeat:apache):	 Stopped
 =#=#=#= End test: Basic text output with inactive resources, filtered by bundle resource - OK (0) =#=#=#=
 * Passed: crm_mon        - Basic text output with inactive resources, filtered by bundle resource
 =#=#=#= Begin test: XML output filtered by inactive bundle resource =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=httpd-bundle">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" with_quorum="true"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
       <replica id="0">
         <resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="httpd-bundle-0" id="httpd-bundle-0" cached="true"/>
         </resource>
         <resource id="httpd-bundle-docker-0" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
         <resource id="httpd-bundle-0" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </replica>
       <replica id="1">
         <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="httpd-bundle-1" id="httpd-bundle-1" cached="true"/>
         </resource>
         <resource id="httpd-bundle-docker-1" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
         <resource id="httpd-bundle-1" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </replica>
       <replica id="2">
         <resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf:heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-docker-2" resource_agent="ocf:heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-2" resource_agent="ocf:pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </replica>
     </bundle>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="httpd-bundle-ip-192.168.122.132" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="httpd-bundle-ip-192.168.122.131" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-0">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-1">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output filtered by inactive bundle resource - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output filtered by inactive bundle resource
 =#=#=#= Begin test: Basic text output with inactive resources, filtered by bundled IP address resource =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
 
 Full List of Resources:
   * Container bundle set: httpd-bundle [pcmk:http]:
     * Replica[0]
       * httpd-bundle-ip-192.168.122.131	(ocf:heartbeat:IPaddr2):	 Started cluster01
 =#=#=#= End test: Basic text output with inactive resources, filtered by bundled IP address resource - OK (0) =#=#=#=
 * Passed: crm_mon        - Basic text output with inactive resources, filtered by bundled IP address resource
 =#=#=#= Begin test: XML output filtered by bundled IP address resource =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=httpd-bundle-ip-192.168.122.132">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" with_quorum="true"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
       <replica id="1">
         <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </replica>
     </bundle>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="httpd-bundle-ip-192.168.122.132" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="httpd-bundle-ip-192.168.122.131" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-0">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-1">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output filtered by bundled IP address resource - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output filtered by bundled IP address resource
 =#=#=#= Begin test: Basic text output with inactive resources, filtered by bundled container =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
 
 Full List of Resources:
   * Container bundle set: httpd-bundle [pcmk:http]:
     * Replica[1]
       * httpd-bundle-docker-1	(ocf:heartbeat:docker):	 Started cluster02
 =#=#=#= End test: Basic text output with inactive resources, filtered by bundled container - OK (0) =#=#=#=
 * Passed: crm_mon        - Basic text output with inactive resources, filtered by bundled container
 =#=#=#= Begin test: XML output filtered by bundled container =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=httpd-bundle-docker-2">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" with_quorum="true"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
       <replica id="2">
         <resource id="httpd-bundle-docker-2" resource_agent="ocf:heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </replica>
     </bundle>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="httpd-bundle-ip-192.168.122.132" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="httpd-bundle-ip-192.168.122.131" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-0">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-1">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output filtered by bundled container - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output filtered by bundled container
 =#=#=#= Begin test: Basic text output with inactive resources, filtered by bundle connection =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
 
 Full List of Resources:
   * Container bundle set: httpd-bundle [pcmk:http]:
     * Replica[0]
       * httpd-bundle-0	(ocf:pacemaker:remote):	 Started cluster01
 =#=#=#= End test: Basic text output with inactive resources, filtered by bundle connection - OK (0) =#=#=#=
 * Passed: crm_mon        - Basic text output with inactive resources, filtered by bundle connection
 =#=#=#= Begin test: XML output filtered by bundle connection =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=httpd-bundle-0">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" with_quorum="true"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
       <replica id="0">
         <resource id="httpd-bundle-0" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </replica>
     </bundle>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="httpd-bundle-ip-192.168.122.132" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="httpd-bundle-ip-192.168.122.131" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-0">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-1">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output filtered by bundle connection - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output filtered by bundle connection
 =#=#=#= Begin test: Basic text output with inactive resources, filtered by bundled primitive resource =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
 
 Full List of Resources:
   * Container bundle set: httpd-bundle [pcmk:http]:
     * Replica[0]
       * httpd	(ocf:heartbeat:apache):	 Started httpd-bundle-0
     * Replica[1]
       * httpd	(ocf:heartbeat:apache):	 Started httpd-bundle-1
     * Replica[2]
       * httpd	(ocf:heartbeat:apache):	 Stopped
 =#=#=#= End test: Basic text output with inactive resources, filtered by bundled primitive resource - OK (0) =#=#=#=
 * Passed: crm_mon        - Basic text output with inactive resources, filtered by bundled primitive resource
 =#=#=#= Begin test: XML output filtered by bundled primitive resource =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=httpd">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" with_quorum="true"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
       <replica id="0">
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="httpd-bundle-0" id="httpd-bundle-0" cached="true"/>
         </resource>
       </replica>
       <replica id="1">
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="httpd-bundle-1" id="httpd-bundle-1" cached="true"/>
         </resource>
       </replica>
       <replica id="2">
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </replica>
     </bundle>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="httpd-bundle-ip-192.168.122.132" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="httpd-bundle-ip-192.168.122.131" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-0">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-1">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output filtered by bundled primitive resource - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output filtered by bundled primitive resource
 =#=#=#= Begin test: Complete text output, filtered by clone name in cloned group =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (2) (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 (1) cluster02 (2) ]
   * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
 
 Active Resources:
   * Clone Set: mysql-clone-group [mysql-group]:
     * Resource Group: mysql-group:0:
       * mysql-proxy	(lsb:mysql-proxy):	 Started cluster02
     * Resource Group: mysql-group:1:
       * mysql-proxy	(lsb:mysql-proxy):	 Started cluster01
 
 Node Attributes:
   * Node: cluster01 (1):
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02 (2):
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02 (2):
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
   * Node: cluster01 (1):
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
 =#=#=#= End test: Complete text output, filtered by clone name in cloned group - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete text output, filtered by clone name in cloned group
 =#=#=#= Begin test: XML output, filtered by clone name in cloned group =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=mysql-clone-group">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" with_quorum="true"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false"/>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output, filtered by clone name in cloned group - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output, filtered by clone name in cloned group
 =#=#=#= Begin test: Complete text output, filtered by group name in cloned group =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (2) (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 (1) cluster02 (2) ]
   * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
 
 Active Resources:
   * Clone Set: mysql-clone-group [mysql-group]:
     * Resource Group: mysql-group:0:
       * mysql-proxy	(lsb:mysql-proxy):	 Started cluster02
     * Resource Group: mysql-group:1:
       * mysql-proxy	(lsb:mysql-proxy):	 Started cluster01
 
 Node Attributes:
   * Node: cluster01 (1):
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02 (2):
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02 (2):
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
   * Node: cluster01 (1):
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
 =#=#=#= End test: Complete text output, filtered by group name in cloned group - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete text output, filtered by group name in cloned group
 =#=#=#= Begin test: XML output, filtered by group name in cloned group =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=mysql-group">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" with_quorum="true"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <group id="mysql-group:0" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:1" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:2" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:3" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:4" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
     </clone>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output, filtered by group name in cloned group - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output, filtered by group name in cloned group
 =#=#=#= Begin test: Complete text output, filtered by exact group instance name in cloned group =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (2) (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 (1) cluster02 (2) ]
   * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
 
 Active Resources:
   * Clone Set: mysql-clone-group [mysql-group]:
     * Resource Group: mysql-group:1:
       * mysql-proxy	(lsb:mysql-proxy):	 Started cluster01
 
 Node Attributes:
   * Node: cluster01 (1):
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02 (2):
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02 (2):
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
   * Node: cluster01 (1):
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
 =#=#=#= End test: Complete text output, filtered by exact group instance name in cloned group - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete text output, filtered by exact group instance name in cloned group
 =#=#=#= Begin test: XML output, filtered by exact group instance name in cloned group =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=mysql-group:1">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" with_quorum="true"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <group id="mysql-group:1" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </group>
     </clone>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output, filtered by exact group instance name in cloned group - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output, filtered by exact group instance name in cloned group
 =#=#=#= Begin test: Complete text output, filtered by primitive name in cloned group =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (2) (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 (1) cluster02 (2) ]
   * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
 
 Active Resources:
   * Clone Set: mysql-clone-group [mysql-group]:
     * Resource Group: mysql-group:0:
       * mysql-proxy	(lsb:mysql-proxy):	 Started cluster02
     * Resource Group: mysql-group:1:
       * mysql-proxy	(lsb:mysql-proxy):	 Started cluster01
 
 Node Attributes:
   * Node: cluster01 (1):
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02 (2):
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02 (2):
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
   * Node: cluster01 (1):
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
 =#=#=#= End test: Complete text output, filtered by primitive name in cloned group - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete text output, filtered by primitive name in cloned group
 =#=#=#= Begin test: XML output, filtered by primitive name in cloned group =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=mysql-proxy">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" with_quorum="true"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <group id="mysql-group:0" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:1" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:2" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:3" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:4" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
     </clone>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output, filtered by primitive name in cloned group - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output, filtered by primitive name in cloned group
 =#=#=#= Begin test: Complete text output, filtered by exact primitive instance name in cloned group =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (2) (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 (1) cluster02 (2) ]
   * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
 
 Active Resources:
   * Clone Set: mysql-clone-group [mysql-group]:
     * Resource Group: mysql-group:1:
       * mysql-proxy	(lsb:mysql-proxy):	 Started cluster01
 
 Node Attributes:
   * Node: cluster01 (1):
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02 (2):
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02 (2):
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
   * Node: cluster01 (1):
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
 =#=#=#= End test: Complete text output, filtered by exact primitive instance name in cloned group - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete text output, filtered by exact primitive instance name in cloned group
 =#=#=#= Begin test: XML output, filtered by exact primitive instance name in cloned group =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=mysql-proxy:1">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" with_quorum="true"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <group id="mysql-group:1" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </group>
     </clone>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output, filtered by exact primitive instance name in cloned group - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output, filtered by exact primitive instance name in cloned group
 =#=#=#= Begin test: Text output of partially active resources =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 4 nodes configured
   * 13 resource instances configured (1 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0@cluster02 httpd-bundle-1@cluster01 ]
 
 Active Resources:
   * Clone Set: ping-clone [ping]:
     * Started: [ cluster01 ]
   * Fencing	(stonith:fence_xvm):	 Started cluster01
   * Container bundle set: httpd-bundle [pcmk:http]:
     * httpd-bundle-0 (192.168.122.131)	(ocf:heartbeat:apache):	 Started cluster02
     * httpd-bundle-1 (192.168.122.132)	(ocf:heartbeat:apache):	 Stopped cluster01
   * Resource Group: partially-active-group (1 member inactive):
     * dummy-1	(ocf:pacemaker:Dummy):	 Started cluster02
 =#=#=#= End test: Text output of partially active resources - OK (0) =#=#=#=
 * Passed: crm_mon        - Text output of partially active resources
 =#=#=#= Begin test: XML output of partially active resources =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon -1 --output-as=xml">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" with_quorum="true"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="4"/>
     <resources_configured number="13" disabled="1" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="5" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="4" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
   </nodes>
   <resources>
     <clone id="ping-clone" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
     <resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
       <node name="cluster01" id="1" cached="true"/>
     </resource>
     <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
       <replica id="0">
         <resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="httpd-bundle-0" id="httpd-bundle-0" cached="true"/>
         </resource>
         <resource id="httpd-bundle-docker-0" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
         <resource id="httpd-bundle-0" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </replica>
       <replica id="1">
         <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-docker-1" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
         <resource id="httpd-bundle-1" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </replica>
     </bundle>
     <group id="partially-active-group" number_resources="2" managed="true" disabled="false">
       <resource id="dummy-1" resource_agent="ocf:pacemaker:Dummy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="dummy-2" resource_agent="ocf:pacemaker:Dummy" role="Stopped" target_role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </group>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="httpd-bundle-ip-192.168.122.131" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="dummy-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="Fencing" orphan="false" migration-threshold="1000000">
         <operation_history call="15" task="start" rc="0" rc_text="ok" exec-time="36ms" queue-time="0ms"/>
         <operation_history call="20" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-ip-192.168.122.132" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-0">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output of partially active resources - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output of partially active resources
 =#=#=#= Begin test: Text output of partially active resources, with inactive resources =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 4 nodes configured
   * 13 resource instances configured (1 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0@cluster02 httpd-bundle-1@cluster01 ]
 
 Full List of Resources:
   * Clone Set: ping-clone [ping]:
     * Started: [ cluster01 ]
     * Stopped: [ cluster02 ]
   * Fencing	(stonith:fence_xvm):	 Started cluster01
   * Container bundle set: httpd-bundle [pcmk:http]:
     * httpd-bundle-0 (192.168.122.131)	(ocf:heartbeat:apache):	 Started cluster02
     * httpd-bundle-1 (192.168.122.132)	(ocf:heartbeat:apache):	 Stopped cluster01
   * Resource Group: partially-active-group:
     * dummy-1	(ocf:pacemaker:Dummy):	 Started cluster02
     * dummy-2	(ocf:pacemaker:Dummy):	 Stopped (disabled)
 =#=#=#= End test: Text output of partially active resources, with inactive resources - OK (0) =#=#=#=
 * Passed: crm_mon        - Text output of partially active resources, with inactive resources
 =#=#=#= Begin test: Complete brief text output, with inactive resources =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 4 nodes configured
   * 13 resource instances configured (1 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0@cluster02 httpd-bundle-1@cluster01 ]
 
 Full List of Resources:
   * 1/1	(stonith:fence_xvm):	Active cluster01
   * Clone Set: ping-clone [ping]:
     * Started: [ cluster01 ]
     * Stopped: [ cluster02 ]
   * Container bundle set: httpd-bundle [pcmk:http]:
     * httpd-bundle-0 (192.168.122.131)	(ocf:heartbeat:apache):	 Started cluster02
     * httpd-bundle-1 (192.168.122.132)	(ocf:heartbeat:apache):	 Stopped cluster01
   * Resource Group: partially-active-group:
     * 1/2	(ocf:pacemaker:Dummy):	Active cluster02
 
 Node Attributes:
   * Node: cluster01:
     * pingd                           	: 1000      
   * Node: cluster02:
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02:
     * httpd-bundle-ip-192.168.122.131: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-docker-0: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-0: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="30000ms"
     * dummy-1: migration-threshold=1000000:
       * (2) start
   * Node: cluster01:
     * Fencing: migration-threshold=1000000:
       * (15) start
       * (20) monitor: interval="60000ms"
     * ping: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
     * httpd-bundle-ip-192.168.122.132: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-docker-1: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-1: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="30000ms"
   * Node: httpd-bundle-0@cluster02:
     * httpd: migration-threshold=1000000:
       * (1) start
 =#=#=#= End test: Complete brief text output, with inactive resources - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete brief text output, with inactive resources
 =#=#=#= Begin test: Text output of partially active group =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 4 nodes configured
   * 13 resource instances configured (1 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0@cluster02 httpd-bundle-1@cluster01 ]
 
 Active Resources:
   * Resource Group: partially-active-group (1 member inactive):
     * dummy-1	(ocf:pacemaker:Dummy):	 Started cluster02
 =#=#=#= End test: Text output of partially active group - OK (0) =#=#=#=
 * Passed: crm_mon        - Text output of partially active group
 =#=#=#= Begin test: Text output of partially active group, with inactive resources =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 4 nodes configured
   * 13 resource instances configured (1 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0@cluster02 httpd-bundle-1@cluster01 ]
 
 Full List of Resources:
   * Resource Group: partially-active-group:
     * dummy-1	(ocf:pacemaker:Dummy):	 Started cluster02
     * dummy-2	(ocf:pacemaker:Dummy):	 Stopped (disabled)
 =#=#=#= End test: Text output of partially active group, with inactive resources - OK (0) =#=#=#=
 * Passed: crm_mon        - Text output of partially active group, with inactive resources
 =#=#=#= Begin test: Text output of active member of partially active group =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 4 nodes configured
   * 13 resource instances configured (1 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0@cluster02 httpd-bundle-1@cluster01 ]
 
 Active Resources:
   * Resource Group: partially-active-group (1 member inactive):
     * dummy-1	(ocf:pacemaker:Dummy):	 Started cluster02
 =#=#=#= End test: Text output of active member of partially active group - OK (0) =#=#=#=
 * Passed: crm_mon        - Text output of active member of partially active group
 =#=#=#= Begin test: Text output of inactive member of partially active group =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 4 nodes configured
   * 13 resource instances configured (1 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0@cluster02 httpd-bundle-1@cluster01 ]
 
 Active Resources:
   * Resource Group: partially-active-group (1 member inactive):
     * dummy-2	(ocf:pacemaker:Dummy):	 Stopped (disabled)
 =#=#=#= End test: Text output of inactive member of partially active group - OK (0) =#=#=#=
 * Passed: crm_mon        - Text output of inactive member of partially active group
 =#=#=#= Begin test: Complete brief text output grouped by node, with inactive resources =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 4 nodes configured
   * 13 resource instances configured (1 DISABLED)
 
 Node List:
   * Node cluster01: online:
     * Resources:
       * 1	(ocf:heartbeat:IPaddr2):	Active 
       * 1	(ocf:heartbeat:docker):	Active 
       * 1	(ocf:pacemaker:ping):	Active 
       * 1	(ocf:pacemaker:remote):	Active 
       * 1	(stonith:fence_xvm):	Active 
   * Node cluster02: online:
     * Resources:
       * 1	(ocf:heartbeat:IPaddr2):	Active 
       * 1	(ocf:heartbeat:docker):	Active 
       * 1	(ocf:pacemaker:Dummy):	Active 
       * 1	(ocf:pacemaker:remote):	Active 
   * GuestNode httpd-bundle-0@cluster02: online:
     * Resources:
       * 1	(ocf:heartbeat:apache):	Active 
 
 Inactive Resources:
   * Clone Set: ping-clone [ping]:
     * Started: [ cluster01 ]
     * Stopped: [ cluster02 ]
   * Container bundle set: httpd-bundle [pcmk:http]:
     * httpd-bundle-0 (192.168.122.131)	(ocf:heartbeat:apache):	 Started cluster02
     * httpd-bundle-1 (192.168.122.132)	(ocf:heartbeat:apache):	 Stopped cluster01
   * Resource Group: partially-active-group:
     * 1/2	(ocf:pacemaker:Dummy):	Active cluster02
 
 Node Attributes:
   * Node: cluster01:
     * pingd                           	: 1000      
   * Node: cluster02:
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02:
     * httpd-bundle-ip-192.168.122.131: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-docker-0: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-0: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="30000ms"
     * dummy-1: migration-threshold=1000000:
       * (2) start
   * Node: cluster01:
     * Fencing: migration-threshold=1000000:
       * (15) start
       * (20) monitor: interval="60000ms"
     * ping: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
     * httpd-bundle-ip-192.168.122.132: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-docker-1: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-1: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="30000ms"
   * Node: httpd-bundle-0@cluster02:
     * httpd: migration-threshold=1000000:
       * (1) start
 =#=#=#= End test: Complete brief text output grouped by node, with inactive resources - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete brief text output grouped by node, with inactive resources
 =#=#=#= Begin test: Text output of partially active resources, with inactive resources, filtered by node =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 4 nodes configured
   * 13 resource instances configured (1 DISABLED)
 
 Node List:
   * Online: [ cluster01 ]
 
 Full List of Resources:
   * Clone Set: ping-clone [ping]:
     * Started: [ cluster01 ]
   * Fencing	(stonith:fence_xvm):	 Started cluster01
   * Container bundle set: httpd-bundle [pcmk:http]:
     * httpd-bundle-1 (192.168.122.132)	(ocf:heartbeat:apache):	 Stopped cluster01
 =#=#=#= End test: Text output of partially active resources, with inactive resources, filtered by node - OK (0) =#=#=#=
 * Passed: crm_mon        - Text output of partially active resources, with inactive resources, filtered by node
 =#=#=#= Begin test: Text output of partially active resources, filtered by node =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon -1 --output-as=xml --node=cluster01">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" with_quorum="true"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="4"/>
     <resources_configured number="13" disabled="1" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="5" type="member"/>
   </nodes>
   <resources>
     <clone id="ping-clone" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
     <resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
       <node name="cluster01" id="1" cached="true"/>
     </resource>
     <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
       <replica id="1">
         <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-docker-1" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
         <resource id="httpd-bundle-1" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </replica>
     </bundle>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster01">
       <resource_history id="Fencing" orphan="false" migration-threshold="1000000">
         <operation_history call="15" task="start" rc="0" rc_text="ok" exec-time="36ms" queue-time="0ms"/>
         <operation_history call="20" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-ip-192.168.122.132" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Text output of partially active resources, filtered by node - OK (0) =#=#=#=
 * Passed: crm_mon        - Text output of partially active resources, filtered by node
 =#=#=#= Begin test: Text output of all resources with maintenance-mode enabled =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
               *** Resource management is DISABLED ***
   The cluster will not attempt to start, stop or recover services
 
 Node List:
   * GuestNode httpd-bundle-0@cluster01: maintenance
   * GuestNode httpd-bundle-1@cluster02: maintenance
   * Online: [ cluster01 cluster02 ]
 
 Full List of Resources:
   * Clone Set: ping-clone [ping] (unmanaged):
     * ping	(ocf:pacemaker:ping):	 Started cluster02 (unmanaged)
     * ping	(ocf:pacemaker:ping):	 Started cluster01 (unmanaged)
   * Fencing	(stonith:fence_xvm):	 Started cluster01 (unmanaged)
   * dummy	(ocf:pacemaker:Dummy):	 Started cluster02 (unmanaged)
-  * Clone Set: inactive-clone [inactive-dhcpd] (unmanaged) (disabled):
+  * Clone Set: inactive-clone [inactive-dhcpd] (unmanaged, disabled):
     * Stopped (disabled): [ cluster01 cluster02 ]
-  * Resource Group: inactive-group (unmanaged) (disabled):
+  * Resource Group: inactive-group (unmanaged, disabled):
     * inactive-dummy-1	(ocf:pacemaker:Dummy):	 Stopped (disabled, unmanaged)
     * inactive-dummy-2	(ocf:pacemaker:Dummy):	 Stopped (disabled, unmanaged)
   * Container bundle set: httpd-bundle [pcmk:http] (unmanaged):
     * httpd-bundle-0 (192.168.122.131)	(ocf:heartbeat:apache):	 Started cluster01 (unmanaged)
     * httpd-bundle-1 (192.168.122.132)	(ocf:heartbeat:apache):	 Started cluster02 (unmanaged)
     * httpd-bundle-2 (192.168.122.133)	(ocf:heartbeat:apache):	 Stopped (unmanaged)
   * Resource Group: exim-group (unmanaged):
     * Public-IP	(ocf:heartbeat:IPaddr):	 Started cluster02 (unmanaged)
     * Email	(lsb:exim):	 Started cluster02 (unmanaged)
   * Clone Set: mysql-clone-group [mysql-group] (unmanaged):
     * Resource Group: mysql-group:0 (unmanaged):
       * mysql-proxy	(lsb:mysql-proxy):	 Started cluster02 (unmanaged)
     * Resource Group: mysql-group:1 (unmanaged):
       * mysql-proxy	(lsb:mysql-proxy):	 Started cluster01 (unmanaged)
-  * Clone Set: promotable-clone [promotable-rsc] (promotable) (unmanaged):
+  * Clone Set: promotable-clone [promotable-rsc] (promotable, unmanaged):
     * promotable-rsc	(ocf:pacemaker:Stateful):	 Promoted cluster02 (unmanaged)
     * promotable-rsc	(ocf:pacemaker:Stateful):	 Unpromoted cluster01 (unmanaged)
 =#=#=#= End test: Text output of all resources with maintenance-mode enabled - OK (0) =#=#=#=
 * Passed: crm_mon        - Text output of all resources with maintenance-mode enabled
diff --git a/cts/scheduler/summary/bug-1822.summary b/cts/scheduler/summary/bug-1822.summary
index f0e4b9b0d1..00457a9c39 100644
--- a/cts/scheduler/summary/bug-1822.summary
+++ b/cts/scheduler/summary/bug-1822.summary
@@ -1,44 +1,44 @@
 Current cluster status:
   * Node List:
     * Online: [ process1a process2b ]
 
   * Full List of Resources:
-    * Clone Set: ms-sf [ms-sf_group] (promotable) (unique):
+    * Clone Set: ms-sf [ms-sf_group] (promotable, unique):
       * Resource Group: ms-sf_group:0:
         * master_slave_Stateful:0	(ocf:heartbeat:Dummy-statful):	 Unpromoted process2b
         * master_slave_procdctl:0	(ocf:heartbeat:procdctl):	 Stopped
       * Resource Group: ms-sf_group:1:
         * master_slave_Stateful:1	(ocf:heartbeat:Dummy-statful):	 Promoted process1a
         * master_slave_procdctl:1	(ocf:heartbeat:procdctl):	 Promoted process1a
 
 Transition Summary:
   * Stop       master_slave_Stateful:1     ( Promoted process1a )  due to node availability
   * Stop       master_slave_procdctl:1     ( Promoted process1a )  due to node availability
 
 Executing Cluster Transition:
   * Pseudo action:   ms-sf_demote_0
   * Pseudo action:   ms-sf_group:1_demote_0
   * Resource action: master_slave_Stateful:1 demote on process1a
   * Resource action: master_slave_procdctl:1 demote on process1a
   * Pseudo action:   ms-sf_group:1_demoted_0
   * Pseudo action:   ms-sf_demoted_0
   * Pseudo action:   ms-sf_stop_0
   * Pseudo action:   ms-sf_group:1_stop_0
   * Resource action: master_slave_Stateful:1 stop on process1a
   * Resource action: master_slave_procdctl:1 stop on process1a
   * Cluster action:  do_shutdown on process1a
   * Pseudo action:   ms-sf_group:1_stopped_0
   * Pseudo action:   ms-sf_stopped_0
 
 Revised Cluster Status:
   * Node List:
     * Online: [ process1a process2b ]
 
   * Full List of Resources:
-    * Clone Set: ms-sf [ms-sf_group] (promotable) (unique):
+    * Clone Set: ms-sf [ms-sf_group] (promotable, unique):
       * Resource Group: ms-sf_group:0:
         * master_slave_Stateful:0	(ocf:heartbeat:Dummy-statful):	 Unpromoted process2b
         * master_slave_procdctl:0	(ocf:heartbeat:procdctl):	 Stopped
       * Resource Group: ms-sf_group:1:
         * master_slave_Stateful:1	(ocf:heartbeat:Dummy-statful):	 Stopped
         * master_slave_procdctl:1	(ocf:heartbeat:procdctl):	 Stopped
diff --git a/cts/scheduler/summary/bug-5140-require-all-false.summary b/cts/scheduler/summary/bug-5140-require-all-false.summary
index 9623dd82da..a56fe6d6cc 100644
--- a/cts/scheduler/summary/bug-5140-require-all-false.summary
+++ b/cts/scheduler/summary/bug-5140-require-all-false.summary
@@ -1,83 +1,83 @@
 4 of 35 resource instances DISABLED and 0 BLOCKED from further action due to failure
 
 Current cluster status:
   * Node List:
     * Node hex-1: standby
     * Node hex-2: standby (with active resources)
     * Node hex-3: OFFLINE (standby)
 
   * Full List of Resources:
     * fencing	(stonith:external/sbd):	 Stopped
     * Clone Set: baseclone [basegrp]:
       * Resource Group: basegrp:0:
         * dlm	(ocf:pacemaker:controld):	 Started hex-2
         * clvmd	(ocf:lvm2:clvmd):	 Started hex-2
         * o2cb	(ocf:ocfs2:o2cb):	 Started hex-2
         * vg1	(ocf:heartbeat:LVM):	 Stopped
         * fs-ocfs-1	(ocf:heartbeat:Filesystem):	 Stopped
       * Stopped: [ hex-1 hex-3 ]
     * fs-xfs-1	(ocf:heartbeat:Filesystem):	 Stopped
     * Clone Set: fs2 [fs-ocfs-2]:
       * Stopped: [ hex-1 hex-2 hex-3 ]
-    * Clone Set: ms-r0 [drbd-r0] (promotable) (disabled):
+    * Clone Set: ms-r0 [drbd-r0] (promotable, disabled):
       * Stopped (disabled): [ hex-1 hex-2 hex-3 ]
-    * Clone Set: ms-r1 [drbd-r1] (promotable) (disabled):
+    * Clone Set: ms-r1 [drbd-r1] (promotable, disabled):
       * Stopped (disabled): [ hex-1 hex-2 hex-3 ]
     * Resource Group: md0-group:
       * md0	(ocf:heartbeat:Raid1):	 Stopped
       * vg-md0	(ocf:heartbeat:LVM):	 Stopped
       * fs-md0	(ocf:heartbeat:Filesystem):	 Stopped
       * dummy1	(ocf:heartbeat:Delay):	 Stopped
     * dummy3	(ocf:heartbeat:Delay):	 Stopped
     * dummy4	(ocf:heartbeat:Delay):	 Stopped
     * dummy5	(ocf:heartbeat:Delay):	 Stopped
     * dummy6	(ocf:heartbeat:Delay):	 Stopped
     * Resource Group: r0-group:
       * fs-r0	(ocf:heartbeat:Filesystem):	 Stopped
       * dummy2	(ocf:heartbeat:Delay):	 Stopped
     * cluster-md0	(ocf:heartbeat:Raid1):	 Stopped
 
 Transition Summary:
   * Stop       dlm:0       ( hex-2 )  due to node availability
   * Stop       clvmd:0     ( hex-2 )  due to node availability
   * Stop       o2cb:0      ( hex-2 )  due to node availability
 
 Executing Cluster Transition:
   * Pseudo action:   baseclone_stop_0
   * Pseudo action:   basegrp:0_stop_0
   * Resource action: o2cb            stop on hex-2
   * Resource action: clvmd           stop on hex-2
   * Resource action: dlm             stop on hex-2
   * Pseudo action:   basegrp:0_stopped_0
   * Pseudo action:   baseclone_stopped_0
 
 Revised Cluster Status:
   * Node List:
     * Node hex-1: standby
     * Node hex-2: standby
     * Node hex-3: OFFLINE (standby)
 
   * Full List of Resources:
     * fencing	(stonith:external/sbd):	 Stopped
     * Clone Set: baseclone [basegrp]:
       * Stopped: [ hex-1 hex-2 hex-3 ]
     * fs-xfs-1	(ocf:heartbeat:Filesystem):	 Stopped
     * Clone Set: fs2 [fs-ocfs-2]:
       * Stopped: [ hex-1 hex-2 hex-3 ]
-    * Clone Set: ms-r0 [drbd-r0] (promotable) (disabled):
+    * Clone Set: ms-r0 [drbd-r0] (promotable, disabled):
       * Stopped (disabled): [ hex-1 hex-2 hex-3 ]
-    * Clone Set: ms-r1 [drbd-r1] (promotable) (disabled):
+    * Clone Set: ms-r1 [drbd-r1] (promotable, disabled):
       * Stopped (disabled): [ hex-1 hex-2 hex-3 ]
     * Resource Group: md0-group:
       * md0	(ocf:heartbeat:Raid1):	 Stopped
       * vg-md0	(ocf:heartbeat:LVM):	 Stopped
       * fs-md0	(ocf:heartbeat:Filesystem):	 Stopped
       * dummy1	(ocf:heartbeat:Delay):	 Stopped
     * dummy3	(ocf:heartbeat:Delay):	 Stopped
     * dummy4	(ocf:heartbeat:Delay):	 Stopped
     * dummy5	(ocf:heartbeat:Delay):	 Stopped
     * dummy6	(ocf:heartbeat:Delay):	 Stopped
     * Resource Group: r0-group:
       * fs-r0	(ocf:heartbeat:Filesystem):	 Stopped
       * dummy2	(ocf:heartbeat:Delay):	 Stopped
     * cluster-md0	(ocf:heartbeat:Raid1):	 Stopped
diff --git a/cts/scheduler/summary/bug-lf-2358.summary b/cts/scheduler/summary/bug-lf-2358.summary
index 7c2c3d220b..b89aadc317 100644
--- a/cts/scheduler/summary/bug-lf-2358.summary
+++ b/cts/scheduler/summary/bug-lf-2358.summary
@@ -1,68 +1,68 @@
 2 of 15 resource instances DISABLED and 0 BLOCKED from further action due to failure
 
 Current cluster status:
   * Node List:
     * Online: [ alice.demo bob.demo ]
 
   * Full List of Resources:
-    * Clone Set: ms_drbd_nfsexport [res_drbd_nfsexport] (promotable) (disabled):
+    * Clone Set: ms_drbd_nfsexport [res_drbd_nfsexport] (promotable, disabled):
       * Stopped (disabled): [ alice.demo bob.demo ]
     * Resource Group: rg_nfs:
       * res_fs_nfsexport	(ocf:heartbeat:Filesystem):	 Stopped
       * res_ip_nfs	(ocf:heartbeat:IPaddr2):	 Stopped
       * res_nfs	(lsb:nfs):	 Stopped
     * Resource Group: rg_mysql1:
       * res_fs_mysql1	(ocf:heartbeat:Filesystem):	 Started bob.demo
       * res_ip_mysql1	(ocf:heartbeat:IPaddr2):	 Started bob.demo
       * res_mysql1	(ocf:heartbeat:mysql):	 Started bob.demo
     * Clone Set: ms_drbd_mysql1 [res_drbd_mysql1] (promotable):
       * Promoted: [ bob.demo ]
       * Stopped: [ alice.demo ]
     * Clone Set: ms_drbd_mysql2 [res_drbd_mysql2] (promotable):
       * Promoted: [ alice.demo ]
       * Unpromoted: [ bob.demo ]
     * Resource Group: rg_mysql2:
       * res_fs_mysql2	(ocf:heartbeat:Filesystem):	 Started alice.demo
       * res_ip_mysql2	(ocf:heartbeat:IPaddr2):	 Started alice.demo
       * res_mysql2	(ocf:heartbeat:mysql):	 Started alice.demo
 
 Transition Summary:
   * Start      res_drbd_mysql1:1     ( alice.demo )
 
 Executing Cluster Transition:
   * Pseudo action:   ms_drbd_mysql1_pre_notify_start_0
   * Resource action: res_drbd_mysql1:0 notify on bob.demo
   * Pseudo action:   ms_drbd_mysql1_confirmed-pre_notify_start_0
   * Pseudo action:   ms_drbd_mysql1_start_0
   * Resource action: res_drbd_mysql1:1 start on alice.demo
   * Pseudo action:   ms_drbd_mysql1_running_0
   * Pseudo action:   ms_drbd_mysql1_post_notify_running_0
   * Resource action: res_drbd_mysql1:0 notify on bob.demo
   * Resource action: res_drbd_mysql1:1 notify on alice.demo
   * Pseudo action:   ms_drbd_mysql1_confirmed-post_notify_running_0
 
 Revised Cluster Status:
   * Node List:
     * Online: [ alice.demo bob.demo ]
 
   * Full List of Resources:
-    * Clone Set: ms_drbd_nfsexport [res_drbd_nfsexport] (promotable) (disabled):
+    * Clone Set: ms_drbd_nfsexport [res_drbd_nfsexport] (promotable, disabled):
       * Stopped (disabled): [ alice.demo bob.demo ]
     * Resource Group: rg_nfs:
       * res_fs_nfsexport	(ocf:heartbeat:Filesystem):	 Stopped
       * res_ip_nfs	(ocf:heartbeat:IPaddr2):	 Stopped
       * res_nfs	(lsb:nfs):	 Stopped
     * Resource Group: rg_mysql1:
       * res_fs_mysql1	(ocf:heartbeat:Filesystem):	 Started bob.demo
       * res_ip_mysql1	(ocf:heartbeat:IPaddr2):	 Started bob.demo
       * res_mysql1	(ocf:heartbeat:mysql):	 Started bob.demo
     * Clone Set: ms_drbd_mysql1 [res_drbd_mysql1] (promotable):
       * Promoted: [ bob.demo ]
       * Unpromoted: [ alice.demo ]
     * Clone Set: ms_drbd_mysql2 [res_drbd_mysql2] (promotable):
       * Promoted: [ alice.demo ]
       * Unpromoted: [ bob.demo ]
     * Resource Group: rg_mysql2:
       * res_fs_mysql2	(ocf:heartbeat:Filesystem):	 Started alice.demo
       * res_ip_mysql2	(ocf:heartbeat:IPaddr2):	 Started alice.demo
       * res_mysql2	(ocf:heartbeat:mysql):	 Started alice.demo
diff --git a/cts/scheduler/summary/bug-pm-11.summary b/cts/scheduler/summary/bug-pm-11.summary
index f638b3fc4f..7a9fc5c1b0 100644
--- a/cts/scheduler/summary/bug-pm-11.summary
+++ b/cts/scheduler/summary/bug-pm-11.summary
@@ -1,48 +1,48 @@
 Current cluster status:
   * Node List:
     * Online: [ node-a node-b ]
 
   * Full List of Resources:
-    * Clone Set: ms-sf [group] (promotable) (unique):
+    * Clone Set: ms-sf [group] (promotable, unique):
       * Resource Group: group:0:
         * stateful-1:0	(ocf:heartbeat:Stateful):	 Unpromoted node-b
         * stateful-2:0	(ocf:heartbeat:Stateful):	 Stopped
       * Resource Group: group:1:
         * stateful-1:1	(ocf:heartbeat:Stateful):	 Promoted node-a
         * stateful-2:1	(ocf:heartbeat:Stateful):	 Stopped
 
 Transition Summary:
   * Start      stateful-2:0     (                   node-b )
   * Promote    stateful-2:1     ( Stopped -> Promoted node-a )
 
 Executing Cluster Transition:
   * Resource action: stateful-2:0    monitor on node-b
   * Resource action: stateful-2:0    monitor on node-a
   * Resource action: stateful-2:1    monitor on node-b
   * Resource action: stateful-2:1    monitor on node-a
   * Pseudo action:   ms-sf_start_0
   * Pseudo action:   group:0_start_0
   * Resource action: stateful-2:0    start on node-b
   * Pseudo action:   group:1_start_0
   * Resource action: stateful-2:1    start on node-a
   * Pseudo action:   group:0_running_0
   * Pseudo action:   group:1_running_0
   * Pseudo action:   ms-sf_running_0
   * Pseudo action:   ms-sf_promote_0
   * Pseudo action:   group:1_promote_0
   * Resource action: stateful-2:1    promote on node-a
   * Pseudo action:   group:1_promoted_0
   * Pseudo action:   ms-sf_promoted_0
 
 Revised Cluster Status:
   * Node List:
     * Online: [ node-a node-b ]
 
   * Full List of Resources:
-    * Clone Set: ms-sf [group] (promotable) (unique):
+    * Clone Set: ms-sf [group] (promotable, unique):
       * Resource Group: group:0:
         * stateful-1:0	(ocf:heartbeat:Stateful):	 Unpromoted node-b
         * stateful-2:0	(ocf:heartbeat:Stateful):	 Unpromoted node-b
       * Resource Group: group:1:
         * stateful-1:1	(ocf:heartbeat:Stateful):	 Promoted node-a
         * stateful-2:1	(ocf:heartbeat:Stateful):	 Promoted node-a
diff --git a/cts/scheduler/summary/bug-pm-12.summary b/cts/scheduler/summary/bug-pm-12.summary
index c4f3adb908..2b473e8b91 100644
--- a/cts/scheduler/summary/bug-pm-12.summary
+++ b/cts/scheduler/summary/bug-pm-12.summary
@@ -1,57 +1,57 @@
 Current cluster status:
   * Node List:
     * Online: [ node-a node-b ]
 
   * Full List of Resources:
-    * Clone Set: ms-sf [group] (promotable) (unique):
+    * Clone Set: ms-sf [group] (promotable, unique):
       * Resource Group: group:0:
         * stateful-1:0	(ocf:heartbeat:Stateful):	 Unpromoted node-b
         * stateful-2:0	(ocf:heartbeat:Stateful):	 Unpromoted node-b
       * Resource Group: group:1:
         * stateful-1:1	(ocf:heartbeat:Stateful):	 Promoted node-a
         * stateful-2:1	(ocf:heartbeat:Stateful):	 Promoted node-a
 
 Transition Summary:
   * Restart    stateful-2:0     (  Unpromoted node-b )  due to resource definition change
   * Restart    stateful-2:1     ( Promoted node-a )  due to resource definition change
 
 Executing Cluster Transition:
   * Pseudo action:   ms-sf_demote_0
   * Pseudo action:   group:1_demote_0
   * Resource action: stateful-2:1    demote on node-a
   * Pseudo action:   group:1_demoted_0
   * Pseudo action:   ms-sf_demoted_0
   * Pseudo action:   ms-sf_stop_0
   * Pseudo action:   group:0_stop_0
   * Resource action: stateful-2:0    stop on node-b
   * Pseudo action:   group:1_stop_0
   * Resource action: stateful-2:1    stop on node-a
   * Pseudo action:   group:0_stopped_0
   * Pseudo action:   group:1_stopped_0
   * Pseudo action:   ms-sf_stopped_0
   * Pseudo action:   ms-sf_start_0
   * Pseudo action:   group:0_start_0
   * Resource action: stateful-2:0    start on node-b
   * Pseudo action:   group:1_start_0
   * Resource action: stateful-2:1    start on node-a
   * Pseudo action:   group:0_running_0
   * Pseudo action:   group:1_running_0
   * Pseudo action:   ms-sf_running_0
   * Pseudo action:   ms-sf_promote_0
   * Pseudo action:   group:1_promote_0
   * Resource action: stateful-2:1    promote on node-a
   * Pseudo action:   group:1_promoted_0
   * Pseudo action:   ms-sf_promoted_0
 
 Revised Cluster Status:
   * Node List:
     * Online: [ node-a node-b ]
 
   * Full List of Resources:
-    * Clone Set: ms-sf [group] (promotable) (unique):
+    * Clone Set: ms-sf [group] (promotable, unique):
       * Resource Group: group:0:
         * stateful-1:0	(ocf:heartbeat:Stateful):	 Unpromoted node-b
         * stateful-2:0	(ocf:heartbeat:Stateful):	 Unpromoted node-b
       * Resource Group: group:1:
         * stateful-1:1	(ocf:heartbeat:Stateful):	 Promoted node-a
         * stateful-2:1	(ocf:heartbeat:Stateful):	 Promoted node-a
diff --git a/cts/scheduler/summary/group14.summary b/cts/scheduler/summary/group14.summary
index a1ba66a2e5..80ded38d78 100644
--- a/cts/scheduler/summary/group14.summary
+++ b/cts/scheduler/summary/group14.summary
@@ -1,102 +1,102 @@
 Current cluster status:
   * Node List:
     * Online: [ c001n06 c001n07 ]
     * OFFLINE: [ c001n02 c001n03 c001n04 c001n05 ]
 
   * Full List of Resources:
     * DcIPaddr	(ocf:heartbeat:IPaddr):	 Stopped
     * Resource Group: group-1:
       * r192.168.100.181	(ocf:heartbeat:IPaddr):	 Started c001n06
       * r192.168.100.182	(ocf:heartbeat:IPaddr):	 Stopped
       * r192.168.100.183	(ocf:heartbeat:IPaddr):	 Stopped
     * lsb_dummy	(lsb:/usr/lib/heartbeat/cts/LSBDummy):	 Stopped
     * migrator	(ocf:heartbeat:Dummy):	 Stopped
     * rsc_c001n03	(ocf:heartbeat:IPaddr):	 Stopped
     * rsc_c001n02	(ocf:heartbeat:IPaddr):	 Stopped
     * rsc_c001n04	(ocf:heartbeat:IPaddr):	 Stopped
     * rsc_c001n05	(ocf:heartbeat:IPaddr):	 Stopped
     * rsc_c001n06	(ocf:heartbeat:IPaddr):	 Stopped
     * rsc_c001n07	(ocf:heartbeat:IPaddr):	 Stopped
     * Clone Set: DoFencing [child_DoFencing]:
       * Stopped: [ c001n02 c001n03 c001n04 c001n05 c001n06 c001n07 ]
-    * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique):
+    * Clone Set: master_rsc_1 [ocf_msdummy] (promotable, unique):
       * ocf_msdummy:0	(ocf:heartbeat:Stateful):	 Stopped
       * ocf_msdummy:1	(ocf:heartbeat:Stateful):	 Stopped
       * ocf_msdummy:2	(ocf:heartbeat:Stateful):	 Stopped
       * ocf_msdummy:3	(ocf:heartbeat:Stateful):	 Stopped
       * ocf_msdummy:4	(ocf:heartbeat:Stateful):	 Stopped
       * ocf_msdummy:5	(ocf:heartbeat:Stateful):	 Stopped
       * ocf_msdummy:6	(ocf:heartbeat:Stateful):	 Stopped
       * ocf_msdummy:7	(ocf:heartbeat:Stateful):	 Stopped
       * ocf_msdummy:8	(ocf:heartbeat:Stateful):	 Stopped
       * ocf_msdummy:9	(ocf:heartbeat:Stateful):	 Stopped
       * ocf_msdummy:10	(ocf:heartbeat:Stateful):	 Stopped
       * ocf_msdummy:11	(ocf:heartbeat:Stateful):	 Stopped
 
 Transition Summary:
   * Start      DcIPaddr             ( c001n06 )  due to no quorum (blocked)
   * Stop       r192.168.100.181     ( c001n06 )  due to no quorum
   * Start      r192.168.100.182     ( c001n07 )  due to no quorum (blocked)
   * Start      r192.168.100.183     ( c001n07 )  due to no quorum (blocked)
   * Start      lsb_dummy            ( c001n06 )  due to no quorum (blocked)
   * Start      migrator             ( c001n06 )  due to no quorum (blocked)
   * Start      rsc_c001n03          ( c001n06 )  due to no quorum (blocked)
   * Start      rsc_c001n02          ( c001n07 )  due to no quorum (blocked)
   * Start      rsc_c001n04          ( c001n06 )  due to no quorum (blocked)
   * Start      rsc_c001n05          ( c001n07 )  due to no quorum (blocked)
   * Start      rsc_c001n06          ( c001n06 )  due to no quorum (blocked)
   * Start      rsc_c001n07          ( c001n07 )  due to no quorum (blocked)
   * Start      child_DoFencing:0    ( c001n06 )
   * Start      child_DoFencing:1    ( c001n07 )
   * Start      ocf_msdummy:0        ( c001n06 )  due to no quorum (blocked)
   * Start      ocf_msdummy:1        ( c001n07 )  due to no quorum (blocked)
   * Start      ocf_msdummy:2        ( c001n06 )  due to no quorum (blocked)
   * Start      ocf_msdummy:3        ( c001n07 )  due to no quorum (blocked)
 
 Executing Cluster Transition:
   * Pseudo action:   group-1_stop_0
   * Resource action: r192.168.100.181 stop on c001n06
   * Pseudo action:   DoFencing_start_0
   * Pseudo action:   group-1_stopped_0
   * Pseudo action:   group-1_start_0
   * Resource action: child_DoFencing:0 start on c001n06
   * Resource action: child_DoFencing:1 start on c001n07
   * Pseudo action:   DoFencing_running_0
   * Resource action: child_DoFencing:0 monitor=20000 on c001n06
   * Resource action: child_DoFencing:1 monitor=20000 on c001n07
 
 Revised Cluster Status:
   * Node List:
     * Online: [ c001n06 c001n07 ]
     * OFFLINE: [ c001n02 c001n03 c001n04 c001n05 ]
 
   * Full List of Resources:
     * DcIPaddr	(ocf:heartbeat:IPaddr):	 Stopped
     * Resource Group: group-1:
       * r192.168.100.181	(ocf:heartbeat:IPaddr):	 Stopped
       * r192.168.100.182	(ocf:heartbeat:IPaddr):	 Stopped
       * r192.168.100.183	(ocf:heartbeat:IPaddr):	 Stopped
     * lsb_dummy	(lsb:/usr/lib/heartbeat/cts/LSBDummy):	 Stopped
     * migrator	(ocf:heartbeat:Dummy):	 Stopped
     * rsc_c001n03	(ocf:heartbeat:IPaddr):	 Stopped
     * rsc_c001n02	(ocf:heartbeat:IPaddr):	 Stopped
     * rsc_c001n04	(ocf:heartbeat:IPaddr):	 Stopped
     * rsc_c001n05	(ocf:heartbeat:IPaddr):	 Stopped
     * rsc_c001n06	(ocf:heartbeat:IPaddr):	 Stopped
     * rsc_c001n07	(ocf:heartbeat:IPaddr):	 Stopped
     * Clone Set: DoFencing [child_DoFencing]:
       * Started: [ c001n06 c001n07 ]
       * Stopped: [ c001n02 c001n03 c001n04 c001n05 ]
-    * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique):
+    * Clone Set: master_rsc_1 [ocf_msdummy] (promotable, unique):
       * ocf_msdummy:0	(ocf:heartbeat:Stateful):	 Stopped
       * ocf_msdummy:1	(ocf:heartbeat:Stateful):	 Stopped
       * ocf_msdummy:2	(ocf:heartbeat:Stateful):	 Stopped
       * ocf_msdummy:3	(ocf:heartbeat:Stateful):	 Stopped
       * ocf_msdummy:4	(ocf:heartbeat:Stateful):	 Stopped
       * ocf_msdummy:5	(ocf:heartbeat:Stateful):	 Stopped
       * ocf_msdummy:6	(ocf:heartbeat:Stateful):	 Stopped
       * ocf_msdummy:7	(ocf:heartbeat:Stateful):	 Stopped
       * ocf_msdummy:8	(ocf:heartbeat:Stateful):	 Stopped
       * ocf_msdummy:9	(ocf:heartbeat:Stateful):	 Stopped
       * ocf_msdummy:10	(ocf:heartbeat:Stateful):	 Stopped
       * ocf_msdummy:11	(ocf:heartbeat:Stateful):	 Stopped
diff --git a/cts/scheduler/summary/inc11.summary b/cts/scheduler/summary/inc11.summary
index 51e838c374..256a10e8f7 100644
--- a/cts/scheduler/summary/inc11.summary
+++ b/cts/scheduler/summary/inc11.summary
@@ -1,43 +1,43 @@
 Current cluster status:
   * Node List:
     * Online: [ node0 node1 node2 ]
 
   * Full List of Resources:
     * simple-rsc	(ocf:heartbeat:apache):	 Stopped
-    * Clone Set: rsc1 [child_rsc1] (promotable) (unique):
+    * Clone Set: rsc1 [child_rsc1] (promotable, unique):
       * child_rsc1:0	(ocf:heartbeat:apache):	 Stopped
       * child_rsc1:1	(ocf:heartbeat:apache):	 Stopped
 
 Transition Summary:
   * Start      simple-rsc     (                   node2 )
   * Start      child_rsc1:0   (                   node1 )
   * Promote    child_rsc1:1   ( Stopped -> Promoted node2 )
 
 Executing Cluster Transition:
   * Resource action: simple-rsc      monitor on node2
   * Resource action: simple-rsc      monitor on node1
   * Resource action: simple-rsc      monitor on node0
   * Resource action: child_rsc1:0    monitor on node2
   * Resource action: child_rsc1:0    monitor on node1
   * Resource action: child_rsc1:0    monitor on node0
   * Resource action: child_rsc1:1    monitor on node2
   * Resource action: child_rsc1:1    monitor on node1
   * Resource action: child_rsc1:1    monitor on node0
   * Pseudo action:   rsc1_start_0
   * Resource action: simple-rsc      start on node2
   * Resource action: child_rsc1:0    start on node1
   * Resource action: child_rsc1:1    start on node2
   * Pseudo action:   rsc1_running_0
   * Pseudo action:   rsc1_promote_0
   * Resource action: child_rsc1:1    promote on node2
   * Pseudo action:   rsc1_promoted_0
 
 Revised Cluster Status:
   * Node List:
     * Online: [ node0 node1 node2 ]
 
   * Full List of Resources:
     * simple-rsc	(ocf:heartbeat:apache):	 Started node2
-    * Clone Set: rsc1 [child_rsc1] (promotable) (unique):
+    * Clone Set: rsc1 [child_rsc1] (promotable, unique):
       * child_rsc1:0	(ocf:heartbeat:apache):	 Unpromoted node1
       * child_rsc1:1	(ocf:heartbeat:apache):	 Promoted node2
diff --git a/cts/scheduler/summary/inc12.summary b/cts/scheduler/summary/inc12.summary
index 1ada08dda0..2c93e2678c 100644
--- a/cts/scheduler/summary/inc12.summary
+++ b/cts/scheduler/summary/inc12.summary
@@ -1,132 +1,132 @@
 Current cluster status:
   * Node List:
     * Online: [ c001n02 c001n03 c001n04 c001n05 c001n06 c001n07 ]
 
   * Full List of Resources:
     * DcIPaddr	(ocf:heartbeat:IPaddr):	 Stopped
     * Resource Group: group-1:
       * ocf_192.168.100.181	(ocf:heartbeat:IPaddr):	 Started c001n02
       * heartbeat_192.168.100.182	(ocf:heartbeat:IPaddr):	 Started c001n02
       * ocf_192.168.100.183	(ocf:heartbeat:IPaddr):	 Started c001n02
     * lsb_dummy	(lsb:/usr/lib/heartbeat/cts/LSBDummy):	 Started c001n04
     * rsc_c001n03	(ocf:heartbeat:IPaddr):	 Started c001n05
     * rsc_c001n02	(ocf:heartbeat:IPaddr):	 Started c001n02
     * rsc_c001n04	(ocf:heartbeat:IPaddr):	 Started c001n04
     * rsc_c001n05	(ocf:heartbeat:IPaddr):	 Started c001n05
     * rsc_c001n06	(ocf:heartbeat:IPaddr):	 Started c001n06
     * rsc_c001n07	(ocf:heartbeat:IPaddr):	 Started c001n07
     * Clone Set: DoFencing [child_DoFencing]:
       * Started: [ c001n02 c001n04 c001n05 c001n06 c001n07 ]
       * Stopped: [ c001n03 ]
-    * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique):
+    * Clone Set: master_rsc_1 [ocf_msdummy] (promotable, unique):
       * ocf_msdummy:0	(ocf:heartbeat:Stateful):	 Stopped
       * ocf_msdummy:1	(ocf:heartbeat:Stateful):	 Stopped
       * ocf_msdummy:2	(ocf:heartbeat:Stateful):	 Unpromoted c001n04
       * ocf_msdummy:3	(ocf:heartbeat:Stateful):	 Unpromoted c001n04
       * ocf_msdummy:4	(ocf:heartbeat:Stateful):	 Unpromoted c001n05
       * ocf_msdummy:5	(ocf:heartbeat:Stateful):	 Unpromoted c001n05
       * ocf_msdummy:6	(ocf:heartbeat:Stateful):	 Unpromoted c001n06
       * ocf_msdummy:7	(ocf:heartbeat:Stateful):	 Unpromoted c001n06
       * ocf_msdummy:8	(ocf:heartbeat:Stateful):	 Unpromoted c001n07
       * ocf_msdummy:9	(ocf:heartbeat:Stateful):	 Unpromoted c001n07
       * ocf_msdummy:10	(ocf:heartbeat:Stateful):	 Unpromoted c001n02
       * ocf_msdummy:11	(ocf:heartbeat:Stateful):	 Unpromoted c001n02
 
 Transition Summary:
   * Stop       ocf_192.168.100.181           (       c001n02 )  due to node availability
   * Stop       heartbeat_192.168.100.182     (       c001n02 )  due to node availability
   * Stop       ocf_192.168.100.183           (       c001n02 )  due to node availability
   * Stop       lsb_dummy                     (       c001n04 )  due to node availability
   * Stop       rsc_c001n03                   (       c001n05 )  due to node availability
   * Stop       rsc_c001n02                   (       c001n02 )  due to node availability
   * Stop       rsc_c001n04                   (       c001n04 )  due to node availability
   * Stop       rsc_c001n05                   (       c001n05 )  due to node availability
   * Stop       rsc_c001n06                   (       c001n06 )  due to node availability
   * Stop       rsc_c001n07                   (       c001n07 )  due to node availability
   * Stop       child_DoFencing:0             (       c001n02 )  due to node availability
   * Stop       child_DoFencing:1             (       c001n04 )  due to node availability
   * Stop       child_DoFencing:2             (       c001n05 )  due to node availability
   * Stop       child_DoFencing:3             (       c001n06 )  due to node availability
   * Stop       child_DoFencing:4             (       c001n07 )  due to node availability
   * Stop       ocf_msdummy:2                 ( Unpromoted c001n04 )  due to node availability
   * Stop       ocf_msdummy:3                 ( Unpromoted c001n04 )  due to node availability
   * Stop       ocf_msdummy:4                 ( Unpromoted c001n05 )  due to node availability
   * Stop       ocf_msdummy:5                 ( Unpromoted c001n05 )  due to node availability
   * Stop       ocf_msdummy:6                 ( Unpromoted c001n06 )  due to node availability
   * Stop       ocf_msdummy:7                 ( Unpromoted c001n06 )  due to node availability
   * Stop       ocf_msdummy:8                 ( Unpromoted c001n07 )  due to node availability
   * Stop       ocf_msdummy:9                 ( Unpromoted c001n07 )  due to node availability
   * Stop       ocf_msdummy:10                ( Unpromoted c001n02 )  due to node availability
   * Stop       ocf_msdummy:11                ( Unpromoted c001n02 )  due to node availability
 
 Executing Cluster Transition:
   * Pseudo action:   group-1_stop_0
   * Resource action: ocf_192.168.100.183 stop on c001n02
   * Resource action: lsb_dummy       stop on c001n04
   * Resource action: rsc_c001n03     stop on c001n05
   * Resource action: rsc_c001n02     stop on c001n02
   * Resource action: rsc_c001n04     stop on c001n04
   * Resource action: rsc_c001n05     stop on c001n05
   * Resource action: rsc_c001n06     stop on c001n06
   * Resource action: rsc_c001n07     stop on c001n07
   * Pseudo action:   DoFencing_stop_0
   * Pseudo action:   master_rsc_1_stop_0
   * Resource action: heartbeat_192.168.100.182 stop on c001n02
   * Resource action: child_DoFencing:1 stop on c001n02
   * Resource action: child_DoFencing:2 stop on c001n04
   * Resource action: child_DoFencing:3 stop on c001n05
   * Resource action: child_DoFencing:4 stop on c001n06
   * Resource action: child_DoFencing:5 stop on c001n07
   * Pseudo action:   DoFencing_stopped_0
   * Resource action: ocf_msdummy:2   stop on c001n04
   * Resource action: ocf_msdummy:3   stop on c001n04
   * Resource action: ocf_msdummy:4   stop on c001n05
   * Resource action: ocf_msdummy:5   stop on c001n05
   * Resource action: ocf_msdummy:6   stop on c001n06
   * Resource action: ocf_msdummy:7   stop on c001n06
   * Resource action: ocf_msdummy:8   stop on c001n07
   * Resource action: ocf_msdummy:9   stop on c001n07
   * Resource action: ocf_msdummy:10  stop on c001n02
   * Resource action: ocf_msdummy:11  stop on c001n02
   * Pseudo action:   master_rsc_1_stopped_0
   * Cluster action:  do_shutdown on c001n07
   * Cluster action:  do_shutdown on c001n06
   * Cluster action:  do_shutdown on c001n05
   * Cluster action:  do_shutdown on c001n04
   * Resource action: ocf_192.168.100.181 stop on c001n02
   * Cluster action:  do_shutdown on c001n02
   * Pseudo action:   group-1_stopped_0
   * Cluster action:  do_shutdown on c001n03
 
 Revised Cluster Status:
   * Node List:
     * Online: [ c001n02 c001n03 c001n04 c001n05 c001n06 c001n07 ]
 
   * Full List of Resources:
     * DcIPaddr	(ocf:heartbeat:IPaddr):	 Stopped
     * Resource Group: group-1:
       * ocf_192.168.100.181	(ocf:heartbeat:IPaddr):	 Stopped
       * heartbeat_192.168.100.182	(ocf:heartbeat:IPaddr):	 Stopped
       * ocf_192.168.100.183	(ocf:heartbeat:IPaddr):	 Stopped
     * lsb_dummy	(lsb:/usr/lib/heartbeat/cts/LSBDummy):	 Stopped
     * rsc_c001n03	(ocf:heartbeat:IPaddr):	 Stopped
     * rsc_c001n02	(ocf:heartbeat:IPaddr):	 Stopped
     * rsc_c001n04	(ocf:heartbeat:IPaddr):	 Stopped
     * rsc_c001n05	(ocf:heartbeat:IPaddr):	 Stopped
     * rsc_c001n06	(ocf:heartbeat:IPaddr):	 Stopped
     * rsc_c001n07	(ocf:heartbeat:IPaddr):	 Stopped
     * Clone Set: DoFencing [child_DoFencing]:
       * Stopped: [ c001n02 c001n03 c001n04 c001n05 c001n06 c001n07 ]
-    * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique):
+    * Clone Set: master_rsc_1 [ocf_msdummy] (promotable, unique):
       * ocf_msdummy:0	(ocf:heartbeat:Stateful):	 Stopped
       * ocf_msdummy:1	(ocf:heartbeat:Stateful):	 Stopped
       * ocf_msdummy:2	(ocf:heartbeat:Stateful):	 Stopped
       * ocf_msdummy:3	(ocf:heartbeat:Stateful):	 Stopped
       * ocf_msdummy:4	(ocf:heartbeat:Stateful):	 Stopped
       * ocf_msdummy:5	(ocf:heartbeat:Stateful):	 Stopped
       * ocf_msdummy:6	(ocf:heartbeat:Stateful):	 Stopped
       * ocf_msdummy:7	(ocf:heartbeat:Stateful):	 Stopped
       * ocf_msdummy:8	(ocf:heartbeat:Stateful):	 Stopped
       * ocf_msdummy:9	(ocf:heartbeat:Stateful):	 Stopped
       * ocf_msdummy:10	(ocf:heartbeat:Stateful):	 Stopped
       * ocf_msdummy:11	(ocf:heartbeat:Stateful):	 Stopped
diff --git a/cts/scheduler/summary/managed-1.summary b/cts/scheduler/summary/managed-1.summary
index 1c417a4fb5..9c25080237 100644
--- a/cts/scheduler/summary/managed-1.summary
+++ b/cts/scheduler/summary/managed-1.summary
@@ -1,132 +1,132 @@
 Current cluster status:
   * Node List:
     * Online: [ c001n02 c001n03 c001n04 c001n05 c001n06 c001n07 c001n08 c001n09 ]
 
   * Full List of Resources:
     * DcIPaddr	(ocf:heartbeat:IPaddr):	 Started c001n09
     * rsc_c001n09	(ocf:heartbeat:IPaddr):	 Started c001n09
     * rsc_c001n02	(ocf:heartbeat:IPaddr):	 Started c001n02
     * rsc_c001n03	(ocf:heartbeat:IPaddr):	 Started c001n03
     * rsc_c001n04	(ocf:heartbeat:IPaddr):	 Started c001n04
     * rsc_c001n05	(ocf:heartbeat:IPaddr):	 Started c001n05
     * rsc_c001n06	(ocf:heartbeat:IPaddr):	 Started c001n06
     * rsc_c001n07	(ocf:heartbeat:IPaddr):	 Started c001n07
     * rsc_c001n08	(ocf:heartbeat:IPaddr):	 Started c001n08
-    * Clone Set: DoFencing [child_DoFencing] (unique) (unmanaged):
+    * Clone Set: DoFencing [child_DoFencing] (unique, unmanaged):
       * child_DoFencing:0	(stonith:ssh):	 Started c001n02 (unmanaged)
       * child_DoFencing:1	(stonith:ssh):	 Started c001n03 (unmanaged)
       * child_DoFencing:2	(stonith:ssh):	 Started c001n04 (unmanaged)
       * child_DoFencing:3	(stonith:ssh):	 Started c001n05 (unmanaged)
       * child_DoFencing:4	(stonith:ssh):	 Started c001n06 (unmanaged)
       * child_DoFencing:5	(stonith:ssh):	 Started c001n07 (unmanaged)
       * child_DoFencing:6	(stonith:ssh):	 Started c001n08 (unmanaged)
       * child_DoFencing:7	(stonith:ssh):	 Started c001n09 (unmanaged)
 
 Transition Summary:
 
 Executing Cluster Transition:
   * Resource action: DcIPaddr        monitor on c001n08
   * Resource action: DcIPaddr        monitor on c001n07
   * Resource action: DcIPaddr        monitor on c001n06
   * Resource action: DcIPaddr        monitor on c001n05
   * Resource action: DcIPaddr        monitor on c001n04
   * Resource action: DcIPaddr        monitor on c001n03
   * Resource action: DcIPaddr        monitor on c001n02
   * Resource action: rsc_c001n09     monitor on c001n08
   * Resource action: rsc_c001n09     monitor on c001n07
   * Resource action: rsc_c001n09     monitor on c001n05
   * Resource action: rsc_c001n09     monitor on c001n04
   * Resource action: rsc_c001n09     monitor on c001n03
   * Resource action: rsc_c001n09     monitor on c001n02
   * Resource action: rsc_c001n02     monitor on c001n09
   * Resource action: rsc_c001n02     monitor on c001n08
   * Resource action: rsc_c001n02     monitor on c001n07
   * Resource action: rsc_c001n02     monitor on c001n05
   * Resource action: rsc_c001n02     monitor on c001n04
   * Resource action: rsc_c001n03     monitor on c001n09
   * Resource action: rsc_c001n03     monitor on c001n08
   * Resource action: rsc_c001n03     monitor on c001n07
   * Resource action: rsc_c001n03     monitor on c001n05
   * Resource action: rsc_c001n03     monitor on c001n04
   * Resource action: rsc_c001n03     monitor on c001n02
   * Resource action: rsc_c001n04     monitor on c001n09
   * Resource action: rsc_c001n04     monitor on c001n08
   * Resource action: rsc_c001n04     monitor on c001n07
   * Resource action: rsc_c001n04     monitor on c001n05
   * Resource action: rsc_c001n04     monitor on c001n03
   * Resource action: rsc_c001n04     monitor on c001n02
   * Resource action: rsc_c001n05     monitor on c001n09
   * Resource action: rsc_c001n05     monitor on c001n08
   * Resource action: rsc_c001n05     monitor on c001n07
   * Resource action: rsc_c001n05     monitor on c001n06
   * Resource action: rsc_c001n05     monitor on c001n04
   * Resource action: rsc_c001n05     monitor on c001n03
   * Resource action: rsc_c001n05     monitor on c001n02
   * Resource action: rsc_c001n06     monitor on c001n09
   * Resource action: rsc_c001n06     monitor on c001n08
   * Resource action: rsc_c001n06     monitor on c001n07
   * Resource action: rsc_c001n06     monitor on c001n05
   * Resource action: rsc_c001n06     monitor on c001n04
   * Resource action: rsc_c001n06     monitor on c001n03
   * Resource action: rsc_c001n07     monitor on c001n09
   * Resource action: rsc_c001n07     monitor on c001n08
   * Resource action: rsc_c001n07     monitor on c001n06
   * Resource action: rsc_c001n07     monitor on c001n05
   * Resource action: rsc_c001n07     monitor on c001n04
   * Resource action: rsc_c001n08     monitor on c001n09
   * Resource action: rsc_c001n08     monitor on c001n07
   * Resource action: rsc_c001n08     monitor on c001n05
   * Resource action: child_DoFencing:0 monitor on c001n09
   * Resource action: child_DoFencing:0 monitor on c001n08
   * Resource action: child_DoFencing:0 monitor on c001n07
   * Resource action: child_DoFencing:1 monitor on c001n08
   * Resource action: child_DoFencing:1 monitor on c001n07
   * Resource action: child_DoFencing:1 monitor on c001n02
   * Resource action: child_DoFencing:2 monitor on c001n09
   * Resource action: child_DoFencing:2 monitor on c001n08
   * Resource action: child_DoFencing:2 monitor on c001n07
   * Resource action: child_DoFencing:2 monitor on c001n03
   * Resource action: child_DoFencing:3 monitor on c001n08
   * Resource action: child_DoFencing:3 monitor on c001n04
   * Resource action: child_DoFencing:3 monitor on c001n02
   * Resource action: child_DoFencing:4 monitor on c001n09
   * Resource action: child_DoFencing:4 monitor on c001n05
   * Resource action: child_DoFencing:4 monitor on c001n03
   * Resource action: child_DoFencing:5 monitor on c001n08
   * Resource action: child_DoFencing:5 monitor on c001n05
   * Resource action: child_DoFencing:5 monitor on c001n04
   * Resource action: child_DoFencing:5 monitor on c001n02
   * Resource action: child_DoFencing:6 monitor on c001n09
   * Resource action: child_DoFencing:6 monitor on c001n07
   * Resource action: child_DoFencing:6 monitor on c001n05
   * Resource action: child_DoFencing:6 monitor on c001n04
   * Resource action: child_DoFencing:7 monitor on c001n08
   * Resource action: child_DoFencing:7 monitor on c001n07
   * Resource action: child_DoFencing:7 monitor on c001n05
   * Resource action: child_DoFencing:7 monitor on c001n04
   * Resource action: child_DoFencing:7 monitor on c001n03
   * Resource action: child_DoFencing:7 monitor on c001n02
 
 Revised Cluster Status:
   * Node List:
     * Online: [ c001n02 c001n03 c001n04 c001n05 c001n06 c001n07 c001n08 c001n09 ]
 
   * Full List of Resources:
     * DcIPaddr	(ocf:heartbeat:IPaddr):	 Started c001n09
     * rsc_c001n09	(ocf:heartbeat:IPaddr):	 Started c001n09
     * rsc_c001n02	(ocf:heartbeat:IPaddr):	 Started c001n02
     * rsc_c001n03	(ocf:heartbeat:IPaddr):	 Started c001n03
     * rsc_c001n04	(ocf:heartbeat:IPaddr):	 Started c001n04
     * rsc_c001n05	(ocf:heartbeat:IPaddr):	 Started c001n05
     * rsc_c001n06	(ocf:heartbeat:IPaddr):	 Started c001n06
     * rsc_c001n07	(ocf:heartbeat:IPaddr):	 Started c001n07
     * rsc_c001n08	(ocf:heartbeat:IPaddr):	 Started c001n08
-    * Clone Set: DoFencing [child_DoFencing] (unique) (unmanaged):
+    * Clone Set: DoFencing [child_DoFencing] (unique, unmanaged):
       * child_DoFencing:0	(stonith:ssh):	 Started c001n02 (unmanaged)
       * child_DoFencing:1	(stonith:ssh):	 Started c001n03 (unmanaged)
       * child_DoFencing:2	(stonith:ssh):	 Started c001n04 (unmanaged)
       * child_DoFencing:3	(stonith:ssh):	 Started c001n05 (unmanaged)
       * child_DoFencing:4	(stonith:ssh):	 Started c001n06 (unmanaged)
       * child_DoFencing:5	(stonith:ssh):	 Started c001n07 (unmanaged)
       * child_DoFencing:6	(stonith:ssh):	 Started c001n08 (unmanaged)
       * child_DoFencing:7	(stonith:ssh):	 Started c001n09 (unmanaged)
diff --git a/cts/scheduler/summary/managed-2.summary b/cts/scheduler/summary/managed-2.summary
index a1d327c3da..dd0a1870b8 100644
--- a/cts/scheduler/summary/managed-2.summary
+++ b/cts/scheduler/summary/managed-2.summary
@@ -1,166 +1,166 @@
 Current cluster status:
   * Node List:
     * Online: [ c001n02 c001n03 c001n04 c001n05 c001n06 c001n07 c001n08 c001n09 ]
 
   * Full List of Resources:
     * DcIPaddr	(ocf:heartbeat:IPaddr):	 Started c001n09
     * rsc_c001n09	(ocf:heartbeat:IPaddr):	 Started c001n09
     * rsc_c001n02	(ocf:heartbeat:IPaddr):	 Started c001n02
     * rsc_c001n03	(ocf:heartbeat:IPaddr):	 Started c001n03
     * rsc_c001n04	(ocf:heartbeat:IPaddr):	 Started c001n04
     * rsc_c001n05	(ocf:heartbeat:IPaddr):	 Started c001n05
     * rsc_c001n06	(ocf:heartbeat:IPaddr):	 Started c001n06
     * rsc_c001n07	(ocf:heartbeat:IPaddr):	 Started c001n07
     * rsc_c001n08	(ocf:heartbeat:IPaddr):	 Started c001n08
-    * Clone Set: DoFencing [child_DoFencing] (unique) (unmanaged):
+    * Clone Set: DoFencing [child_DoFencing] (unique, unmanaged):
       * child_DoFencing:0	(stonith:ssh):	 Stopped (unmanaged)
       * child_DoFencing:1	(stonith:ssh):	 Stopped (unmanaged)
       * child_DoFencing:2	(stonith:ssh):	 Stopped (unmanaged)
       * child_DoFencing:3	(stonith:ssh):	 Stopped (unmanaged)
       * child_DoFencing:4	(stonith:ssh):	 Stopped (unmanaged)
       * child_DoFencing:5	(stonith:ssh):	 Stopped (unmanaged)
       * child_DoFencing:6	(stonith:ssh):	 Stopped (unmanaged)
       * child_DoFencing:7	(stonith:ssh):	 Stopped (unmanaged)
 
 Transition Summary:
 
 Executing Cluster Transition:
   * Resource action: DcIPaddr        monitor on c001n08
   * Resource action: DcIPaddr        monitor on c001n07
   * Resource action: DcIPaddr        monitor on c001n06
   * Resource action: DcIPaddr        monitor on c001n05
   * Resource action: DcIPaddr        monitor on c001n04
   * Resource action: DcIPaddr        monitor on c001n03
   * Resource action: DcIPaddr        monitor on c001n02
   * Resource action: rsc_c001n09     monitor on c001n08
   * Resource action: rsc_c001n09     monitor on c001n07
   * Resource action: rsc_c001n09     monitor on c001n05
   * Resource action: rsc_c001n09     monitor on c001n04
   * Resource action: rsc_c001n09     monitor on c001n03
   * Resource action: rsc_c001n09     monitor on c001n02
   * Resource action: rsc_c001n02     monitor on c001n09
   * Resource action: rsc_c001n02     monitor on c001n08
   * Resource action: rsc_c001n02     monitor on c001n07
   * Resource action: rsc_c001n02     monitor on c001n05
   * Resource action: rsc_c001n02     monitor on c001n04
   * Resource action: rsc_c001n03     monitor on c001n09
   * Resource action: rsc_c001n03     monitor on c001n08
   * Resource action: rsc_c001n03     monitor on c001n07
   * Resource action: rsc_c001n03     monitor on c001n05
   * Resource action: rsc_c001n03     monitor on c001n04
   * Resource action: rsc_c001n03     monitor on c001n02
   * Resource action: rsc_c001n04     monitor on c001n09
   * Resource action: rsc_c001n04     monitor on c001n08
   * Resource action: rsc_c001n04     monitor on c001n07
   * Resource action: rsc_c001n04     monitor on c001n05
   * Resource action: rsc_c001n04     monitor on c001n03
   * Resource action: rsc_c001n04     monitor on c001n02
   * Resource action: rsc_c001n05     monitor on c001n09
   * Resource action: rsc_c001n05     monitor on c001n08
   * Resource action: rsc_c001n05     monitor on c001n07
   * Resource action: rsc_c001n05     monitor on c001n06
   * Resource action: rsc_c001n05     monitor on c001n04
   * Resource action: rsc_c001n05     monitor on c001n03
   * Resource action: rsc_c001n05     monitor on c001n02
   * Resource action: rsc_c001n06     monitor on c001n09
   * Resource action: rsc_c001n06     monitor on c001n08
   * Resource action: rsc_c001n06     monitor on c001n07
   * Resource action: rsc_c001n06     monitor on c001n05
   * Resource action: rsc_c001n06     monitor on c001n04
   * Resource action: rsc_c001n06     monitor on c001n03
   * Resource action: rsc_c001n07     monitor on c001n09
   * Resource action: rsc_c001n07     monitor on c001n08
   * Resource action: rsc_c001n07     monitor on c001n06
   * Resource action: rsc_c001n07     monitor on c001n05
   * Resource action: rsc_c001n07     monitor on c001n04
   * Resource action: rsc_c001n08     monitor on c001n09
   * Resource action: rsc_c001n08     monitor on c001n07
   * Resource action: rsc_c001n08     monitor on c001n05
   * Resource action: child_DoFencing:0 monitor on c001n09
   * Resource action: child_DoFencing:0 monitor on c001n08
   * Resource action: child_DoFencing:0 monitor on c001n07
   * Resource action: child_DoFencing:0 monitor on c001n06
   * Resource action: child_DoFencing:0 monitor on c001n05
   * Resource action: child_DoFencing:0 monitor on c001n04
   * Resource action: child_DoFencing:0 monitor on c001n03
   * Resource action: child_DoFencing:0 monitor on c001n02
   * Resource action: child_DoFencing:1 monitor on c001n09
   * Resource action: child_DoFencing:1 monitor on c001n08
   * Resource action: child_DoFencing:1 monitor on c001n07
   * Resource action: child_DoFencing:1 monitor on c001n06
   * Resource action: child_DoFencing:1 monitor on c001n05
   * Resource action: child_DoFencing:1 monitor on c001n04
   * Resource action: child_DoFencing:1 monitor on c001n03
   * Resource action: child_DoFencing:1 monitor on c001n02
   * Resource action: child_DoFencing:2 monitor on c001n09
   * Resource action: child_DoFencing:2 monitor on c001n08
   * Resource action: child_DoFencing:2 monitor on c001n07
   * Resource action: child_DoFencing:2 monitor on c001n06
   * Resource action: child_DoFencing:2 monitor on c001n05
   * Resource action: child_DoFencing:2 monitor on c001n04
   * Resource action: child_DoFencing:2 monitor on c001n03
   * Resource action: child_DoFencing:2 monitor on c001n02
   * Resource action: child_DoFencing:3 monitor on c001n09
   * Resource action: child_DoFencing:3 monitor on c001n08
   * Resource action: child_DoFencing:3 monitor on c001n07
   * Resource action: child_DoFencing:3 monitor on c001n06
   * Resource action: child_DoFencing:3 monitor on c001n05
   * Resource action: child_DoFencing:3 monitor on c001n04
   * Resource action: child_DoFencing:3 monitor on c001n03
   * Resource action: child_DoFencing:3 monitor on c001n02
   * Resource action: child_DoFencing:4 monitor on c001n09
   * Resource action: child_DoFencing:4 monitor on c001n08
   * Resource action: child_DoFencing:4 monitor on c001n07
   * Resource action: child_DoFencing:4 monitor on c001n06
   * Resource action: child_DoFencing:4 monitor on c001n05
   * Resource action: child_DoFencing:4 monitor on c001n04
   * Resource action: child_DoFencing:4 monitor on c001n03
   * Resource action: child_DoFencing:4 monitor on c001n02
   * Resource action: child_DoFencing:5 monitor on c001n09
   * Resource action: child_DoFencing:5 monitor on c001n08
   * Resource action: child_DoFencing:5 monitor on c001n07
   * Resource action: child_DoFencing:5 monitor on c001n06
   * Resource action: child_DoFencing:5 monitor on c001n05
   * Resource action: child_DoFencing:5 monitor on c001n04
   * Resource action: child_DoFencing:5 monitor on c001n03
   * Resource action: child_DoFencing:5 monitor on c001n02
   * Resource action: child_DoFencing:6 monitor on c001n09
   * Resource action: child_DoFencing:6 monitor on c001n08
   * Resource action: child_DoFencing:6 monitor on c001n07
   * Resource action: child_DoFencing:6 monitor on c001n06
   * Resource action: child_DoFencing:6 monitor on c001n05
   * Resource action: child_DoFencing:6 monitor on c001n04
   * Resource action: child_DoFencing:6 monitor on c001n03
   * Resource action: child_DoFencing:6 monitor on c001n02
   * Resource action: child_DoFencing:7 monitor on c001n09
   * Resource action: child_DoFencing:7 monitor on c001n08
   * Resource action: child_DoFencing:7 monitor on c001n07
   * Resource action: child_DoFencing:7 monitor on c001n06
   * Resource action: child_DoFencing:7 monitor on c001n05
   * Resource action: child_DoFencing:7 monitor on c001n04
   * Resource action: child_DoFencing:7 monitor on c001n03
   * Resource action: child_DoFencing:7 monitor on c001n02
 
 Revised Cluster Status:
   * Node List:
     * Online: [ c001n02 c001n03 c001n04 c001n05 c001n06 c001n07 c001n08 c001n09 ]
 
   * Full List of Resources:
     * DcIPaddr	(ocf:heartbeat:IPaddr):	 Started c001n09
     * rsc_c001n09	(ocf:heartbeat:IPaddr):	 Started c001n09
     * rsc_c001n02	(ocf:heartbeat:IPaddr):	 Started c001n02
     * rsc_c001n03	(ocf:heartbeat:IPaddr):	 Started c001n03
     * rsc_c001n04	(ocf:heartbeat:IPaddr):	 Started c001n04
     * rsc_c001n05	(ocf:heartbeat:IPaddr):	 Started c001n05
     * rsc_c001n06	(ocf:heartbeat:IPaddr):	 Started c001n06
     * rsc_c001n07	(ocf:heartbeat:IPaddr):	 Started c001n07
     * rsc_c001n08	(ocf:heartbeat:IPaddr):	 Started c001n08
-    * Clone Set: DoFencing [child_DoFencing] (unique) (unmanaged):
+    * Clone Set: DoFencing [child_DoFencing] (unique, unmanaged):
       * child_DoFencing:0	(stonith:ssh):	 Stopped (unmanaged)
       * child_DoFencing:1	(stonith:ssh):	 Stopped (unmanaged)
       * child_DoFencing:2	(stonith:ssh):	 Stopped (unmanaged)
       * child_DoFencing:3	(stonith:ssh):	 Stopped (unmanaged)
       * child_DoFencing:4	(stonith:ssh):	 Stopped (unmanaged)
       * child_DoFencing:5	(stonith:ssh):	 Stopped (unmanaged)
       * child_DoFencing:6	(stonith:ssh):	 Stopped (unmanaged)
       * child_DoFencing:7	(stonith:ssh):	 Stopped (unmanaged)
diff --git a/cts/scheduler/summary/promoted-0.summary b/cts/scheduler/summary/promoted-0.summary
index b80bb106cf..3e724ffdc4 100644
--- a/cts/scheduler/summary/promoted-0.summary
+++ b/cts/scheduler/summary/promoted-0.summary
@@ -1,47 +1,47 @@
 Current cluster status:
   * Node List:
     * Online: [ node1 node2 ]
 
   * Full List of Resources:
-    * Clone Set: rsc1 [child_rsc1] (promotable) (unique):
+    * Clone Set: rsc1 [child_rsc1] (promotable, unique):
       * child_rsc1:0	(ocf:heartbeat:apache):	 Stopped
       * child_rsc1:1	(ocf:heartbeat:apache):	 Stopped
       * child_rsc1:2	(ocf:heartbeat:apache):	 Stopped
       * child_rsc1:3	(ocf:heartbeat:apache):	 Stopped
       * child_rsc1:4	(ocf:heartbeat:apache):	 Stopped
 
 Transition Summary:
   * Start      child_rsc1:0     ( node1 )
   * Start      child_rsc1:1     ( node2 )
   * Start      child_rsc1:2     ( node1 )
   * Start      child_rsc1:3     ( node2 )
 
 Executing Cluster Transition:
   * Resource action: child_rsc1:0    monitor on node2
   * Resource action: child_rsc1:0    monitor on node1
   * Resource action: child_rsc1:1    monitor on node2
   * Resource action: child_rsc1:1    monitor on node1
   * Resource action: child_rsc1:2    monitor on node2
   * Resource action: child_rsc1:2    monitor on node1
   * Resource action: child_rsc1:3    monitor on node2
   * Resource action: child_rsc1:3    monitor on node1
   * Resource action: child_rsc1:4    monitor on node2
   * Resource action: child_rsc1:4    monitor on node1
   * Pseudo action:   rsc1_start_0
   * Resource action: child_rsc1:0    start on node1
   * Resource action: child_rsc1:1    start on node2
   * Resource action: child_rsc1:2    start on node1
   * Resource action: child_rsc1:3    start on node2
   * Pseudo action:   rsc1_running_0
 
 Revised Cluster Status:
   * Node List:
     * Online: [ node1 node2 ]
 
   * Full List of Resources:
-    * Clone Set: rsc1 [child_rsc1] (promotable) (unique):
+    * Clone Set: rsc1 [child_rsc1] (promotable, unique):
       * child_rsc1:0	(ocf:heartbeat:apache):	 Unpromoted node1
       * child_rsc1:1	(ocf:heartbeat:apache):	 Unpromoted node2
       * child_rsc1:2	(ocf:heartbeat:apache):	 Unpromoted node1
       * child_rsc1:3	(ocf:heartbeat:apache):	 Unpromoted node2
       * child_rsc1:4	(ocf:heartbeat:apache):	 Stopped
diff --git a/cts/scheduler/summary/promoted-1.summary b/cts/scheduler/summary/promoted-1.summary
index 161f51834a..08100f3e36 100644
--- a/cts/scheduler/summary/promoted-1.summary
+++ b/cts/scheduler/summary/promoted-1.summary
@@ -1,50 +1,50 @@
 Current cluster status:
   * Node List:
     * Online: [ node1 node2 ]
 
   * Full List of Resources:
-    * Clone Set: rsc1 [child_rsc1] (promotable) (unique):
+    * Clone Set: rsc1 [child_rsc1] (promotable, unique):
       * child_rsc1:0	(ocf:heartbeat:apache):	 Stopped
       * child_rsc1:1	(ocf:heartbeat:apache):	 Stopped
       * child_rsc1:2	(ocf:heartbeat:apache):	 Stopped
       * child_rsc1:3	(ocf:heartbeat:apache):	 Stopped
       * child_rsc1:4	(ocf:heartbeat:apache):	 Stopped
 
 Transition Summary:
   * Start      child_rsc1:0     (                   node1 )
   * Promote    child_rsc1:1     ( Stopped -> Promoted node2 )
   * Start      child_rsc1:2     (                   node1 )
   * Start      child_rsc1:3     (                   node2 )
 
 Executing Cluster Transition:
   * Resource action: child_rsc1:0    monitor on node2
   * Resource action: child_rsc1:0    monitor on node1
   * Resource action: child_rsc1:1    monitor on node2
   * Resource action: child_rsc1:1    monitor on node1
   * Resource action: child_rsc1:2    monitor on node2
   * Resource action: child_rsc1:2    monitor on node1
   * Resource action: child_rsc1:3    monitor on node2
   * Resource action: child_rsc1:3    monitor on node1
   * Resource action: child_rsc1:4    monitor on node2
   * Resource action: child_rsc1:4    monitor on node1
   * Pseudo action:   rsc1_start_0
   * Resource action: child_rsc1:0    start on node1
   * Resource action: child_rsc1:1    start on node2
   * Resource action: child_rsc1:2    start on node1
   * Resource action: child_rsc1:3    start on node2
   * Pseudo action:   rsc1_running_0
   * Pseudo action:   rsc1_promote_0
   * Resource action: child_rsc1:1    promote on node2
   * Pseudo action:   rsc1_promoted_0
 
 Revised Cluster Status:
   * Node List:
     * Online: [ node1 node2 ]
 
   * Full List of Resources:
-    * Clone Set: rsc1 [child_rsc1] (promotable) (unique):
+    * Clone Set: rsc1 [child_rsc1] (promotable, unique):
       * child_rsc1:0	(ocf:heartbeat:apache):	 Unpromoted node1
       * child_rsc1:1	(ocf:heartbeat:apache):	 Promoted node2
       * child_rsc1:2	(ocf:heartbeat:apache):	 Unpromoted node1
       * child_rsc1:3	(ocf:heartbeat:apache):	 Unpromoted node2
       * child_rsc1:4	(ocf:heartbeat:apache):	 Stopped
diff --git a/cts/scheduler/summary/promoted-10.summary b/cts/scheduler/summary/promoted-10.summary
index 54dbcd7e69..c35c61c793 100644
--- a/cts/scheduler/summary/promoted-10.summary
+++ b/cts/scheduler/summary/promoted-10.summary
@@ -1,75 +1,75 @@
 Current cluster status:
   * Node List:
     * Online: [ node1 node2 ]
 
   * Full List of Resources:
-    * Clone Set: rsc1 [child_rsc1] (promotable) (unique):
+    * Clone Set: rsc1 [child_rsc1] (promotable, unique):
       * child_rsc1:0	(ocf:heartbeat:apache):	 Stopped
       * child_rsc1:1	(ocf:heartbeat:apache):	 Stopped
       * child_rsc1:2	(ocf:heartbeat:apache):	 Stopped
       * child_rsc1:3	(ocf:heartbeat:apache):	 Stopped
       * child_rsc1:4	(ocf:heartbeat:apache):	 Stopped
 
 Transition Summary:
   * Promote    child_rsc1:0     ( Stopped -> Promoted node1 )
   * Start      child_rsc1:1     (                   node2 )
   * Start      child_rsc1:2     (                   node1 )
   * Promote    child_rsc1:3     ( Stopped -> Promoted node2 )
 
 Executing Cluster Transition:
   * Resource action: child_rsc1:0    monitor on node2
   * Resource action: child_rsc1:0    monitor on node1
   * Resource action: child_rsc1:1    monitor on node2
   * Resource action: child_rsc1:1    monitor on node1
   * Resource action: child_rsc1:2    monitor on node2
   * Resource action: child_rsc1:2    monitor on node1
   * Resource action: child_rsc1:3    monitor on node2
   * Resource action: child_rsc1:3    monitor on node1
   * Resource action: child_rsc1:4    monitor on node2
   * Resource action: child_rsc1:4    monitor on node1
   * Pseudo action:   rsc1_pre_notify_start_0
   * Pseudo action:   rsc1_confirmed-pre_notify_start_0
   * Pseudo action:   rsc1_start_0
   * Resource action: child_rsc1:0    start on node1
   * Resource action: child_rsc1:1    start on node2
   * Resource action: child_rsc1:2    start on node1
   * Resource action: child_rsc1:3    start on node2
   * Pseudo action:   rsc1_running_0
   * Pseudo action:   rsc1_post_notify_running_0
   * Resource action: child_rsc1:0    notify on node1
   * Resource action: child_rsc1:1    notify on node2
   * Resource action: child_rsc1:2    notify on node1
   * Resource action: child_rsc1:3    notify on node2
   * Pseudo action:   rsc1_confirmed-post_notify_running_0
   * Pseudo action:   rsc1_pre_notify_promote_0
   * Resource action: child_rsc1:0    notify on node1
   * Resource action: child_rsc1:1    notify on node2
   * Resource action: child_rsc1:2    notify on node1
   * Resource action: child_rsc1:3    notify on node2
   * Pseudo action:   rsc1_confirmed-pre_notify_promote_0
   * Pseudo action:   rsc1_promote_0
   * Resource action: child_rsc1:0    promote on node1
   * Resource action: child_rsc1:3    promote on node2
   * Pseudo action:   rsc1_promoted_0
   * Pseudo action:   rsc1_post_notify_promoted_0
   * Resource action: child_rsc1:0    notify on node1
   * Resource action: child_rsc1:1    notify on node2
   * Resource action: child_rsc1:2    notify on node1
   * Resource action: child_rsc1:3    notify on node2
   * Pseudo action:   rsc1_confirmed-post_notify_promoted_0
   * Resource action: child_rsc1:0    monitor=11000 on node1
   * Resource action: child_rsc1:1    monitor=1000 on node2
   * Resource action: child_rsc1:2    monitor=1000 on node1
   * Resource action: child_rsc1:3    monitor=11000 on node2
 
 Revised Cluster Status:
   * Node List:
     * Online: [ node1 node2 ]
 
   * Full List of Resources:
-    * Clone Set: rsc1 [child_rsc1] (promotable) (unique):
+    * Clone Set: rsc1 [child_rsc1] (promotable, unique):
       * child_rsc1:0	(ocf:heartbeat:apache):	 Promoted node1
       * child_rsc1:1	(ocf:heartbeat:apache):	 Unpromoted node2
       * child_rsc1:2	(ocf:heartbeat:apache):	 Unpromoted node1
       * child_rsc1:3	(ocf:heartbeat:apache):	 Promoted node2
       * child_rsc1:4	(ocf:heartbeat:apache):	 Stopped
diff --git a/cts/scheduler/summary/promoted-11.summary b/cts/scheduler/summary/promoted-11.summary
index 89fb85776d..47732fb9da 100644
--- a/cts/scheduler/summary/promoted-11.summary
+++ b/cts/scheduler/summary/promoted-11.summary
@@ -1,40 +1,40 @@
 Current cluster status:
   * Node List:
     * Online: [ node1 node2 ]
 
   * Full List of Resources:
     * simple-rsc	(ocf:heartbeat:apache):	 Stopped
-    * Clone Set: rsc1 [child_rsc1] (promotable) (unique):
+    * Clone Set: rsc1 [child_rsc1] (promotable, unique):
       * child_rsc1:0	(ocf:heartbeat:apache):	 Stopped
       * child_rsc1:1	(ocf:heartbeat:apache):	 Stopped
 
 Transition Summary:
   * Start      simple-rsc     (                   node2 )
   * Start      child_rsc1:0   (                   node1 )
   * Promote    child_rsc1:1   ( Stopped -> Promoted node2 )
 
 Executing Cluster Transition:
   * Resource action: simple-rsc      monitor on node2
   * Resource action: simple-rsc      monitor on node1
   * Resource action: child_rsc1:0    monitor on node2
   * Resource action: child_rsc1:0    monitor on node1
   * Resource action: child_rsc1:1    monitor on node2
   * Resource action: child_rsc1:1    monitor on node1
   * Pseudo action:   rsc1_start_0
   * Resource action: simple-rsc      start on node2
   * Resource action: child_rsc1:0    start on node1
   * Resource action: child_rsc1:1    start on node2
   * Pseudo action:   rsc1_running_0
   * Pseudo action:   rsc1_promote_0
   * Resource action: child_rsc1:1    promote on node2
   * Pseudo action:   rsc1_promoted_0
 
 Revised Cluster Status:
   * Node List:
     * Online: [ node1 node2 ]
 
   * Full List of Resources:
     * simple-rsc	(ocf:heartbeat:apache):	 Started node2
-    * Clone Set: rsc1 [child_rsc1] (promotable) (unique):
+    * Clone Set: rsc1 [child_rsc1] (promotable, unique):
       * child_rsc1:0	(ocf:heartbeat:apache):	 Unpromoted node1
       * child_rsc1:1	(ocf:heartbeat:apache):	 Promoted node2
diff --git a/cts/scheduler/summary/promoted-12.summary b/cts/scheduler/summary/promoted-12.summary
index 878d366883..9125a9aa28 100644
--- a/cts/scheduler/summary/promoted-12.summary
+++ b/cts/scheduler/summary/promoted-12.summary
@@ -1,33 +1,33 @@
 Current cluster status:
   * Node List:
     * Online: [ sel3 sel4 ]
 
   * Full List of Resources:
     * Clone Set: ms-drbd0 [drbd0] (promotable):
       * Promoted: [ sel3 ]
       * Unpromoted: [ sel4 ]
-    * Clone Set: ms-sf [sf] (promotable) (unique):
+    * Clone Set: ms-sf [sf] (promotable, unique):
       * sf:0	(ocf:heartbeat:Stateful):	 Unpromoted sel3
       * sf:1	(ocf:heartbeat:Stateful):	 Unpromoted sel4
     * fs0	(ocf:heartbeat:Filesystem):	 Started sel3
 
 Transition Summary:
   * Promote    sf:0    ( Unpromoted -> Promoted sel3 )
 
 Executing Cluster Transition:
   * Pseudo action:   ms-sf_promote_0
   * Resource action: sf:0            promote on sel3
   * Pseudo action:   ms-sf_promoted_0
 
 Revised Cluster Status:
   * Node List:
     * Online: [ sel3 sel4 ]
 
   * Full List of Resources:
     * Clone Set: ms-drbd0 [drbd0] (promotable):
       * Promoted: [ sel3 ]
       * Unpromoted: [ sel4 ]
-    * Clone Set: ms-sf [sf] (promotable) (unique):
+    * Clone Set: ms-sf [sf] (promotable, unique):
       * sf:0	(ocf:heartbeat:Stateful):	 Promoted sel3
       * sf:1	(ocf:heartbeat:Stateful):	 Unpromoted sel4
     * fs0	(ocf:heartbeat:Filesystem):	 Started sel3
diff --git a/cts/scheduler/summary/promoted-2.summary b/cts/scheduler/summary/promoted-2.summary
index 3258499fc8..9adf43ef1d 100644
--- a/cts/scheduler/summary/promoted-2.summary
+++ b/cts/scheduler/summary/promoted-2.summary
@@ -1,71 +1,71 @@
 Current cluster status:
   * Node List:
     * Online: [ node1 node2 ]
 
   * Full List of Resources:
-    * Clone Set: rsc1 [child_rsc1] (promotable) (unique):
+    * Clone Set: rsc1 [child_rsc1] (promotable, unique):
       * child_rsc1:0	(ocf:heartbeat:apache):	 Stopped
       * child_rsc1:1	(ocf:heartbeat:apache):	 Stopped
       * child_rsc1:2	(ocf:heartbeat:apache):	 Stopped
       * child_rsc1:3	(ocf:heartbeat:apache):	 Stopped
       * child_rsc1:4	(ocf:heartbeat:apache):	 Stopped
 
 Transition Summary:
   * Promote    child_rsc1:0     ( Stopped -> Promoted node1 )
   * Start      child_rsc1:1     (                   node2 )
   * Start      child_rsc1:2     (                   node1 )
   * Promote    child_rsc1:3     ( Stopped -> Promoted node2 )
 
 Executing Cluster Transition:
   * Resource action: child_rsc1:0    monitor on node2
   * Resource action: child_rsc1:0    monitor on node1
   * Resource action: child_rsc1:1    monitor on node2
   * Resource action: child_rsc1:1    monitor on node1
   * Resource action: child_rsc1:2    monitor on node2
   * Resource action: child_rsc1:2    monitor on node1
   * Resource action: child_rsc1:3    monitor on node2
   * Resource action: child_rsc1:3    monitor on node1
   * Resource action: child_rsc1:4    monitor on node2
   * Resource action: child_rsc1:4    monitor on node1
   * Pseudo action:   rsc1_pre_notify_start_0
   * Pseudo action:   rsc1_confirmed-pre_notify_start_0
   * Pseudo action:   rsc1_start_0
   * Resource action: child_rsc1:0    start on node1
   * Resource action: child_rsc1:1    start on node2
   * Resource action: child_rsc1:2    start on node1
   * Resource action: child_rsc1:3    start on node2
   * Pseudo action:   rsc1_running_0
   * Pseudo action:   rsc1_post_notify_running_0
   * Resource action: child_rsc1:0    notify on node1
   * Resource action: child_rsc1:1    notify on node2
   * Resource action: child_rsc1:2    notify on node1
   * Resource action: child_rsc1:3    notify on node2
   * Pseudo action:   rsc1_confirmed-post_notify_running_0
   * Pseudo action:   rsc1_pre_notify_promote_0
   * Resource action: child_rsc1:0    notify on node1
   * Resource action: child_rsc1:1    notify on node2
   * Resource action: child_rsc1:2    notify on node1
   * Resource action: child_rsc1:3    notify on node2
   * Pseudo action:   rsc1_confirmed-pre_notify_promote_0
   * Pseudo action:   rsc1_promote_0
   * Resource action: child_rsc1:0    promote on node1
   * Resource action: child_rsc1:3    promote on node2
   * Pseudo action:   rsc1_promoted_0
   * Pseudo action:   rsc1_post_notify_promoted_0
   * Resource action: child_rsc1:0    notify on node1
   * Resource action: child_rsc1:1    notify on node2
   * Resource action: child_rsc1:2    notify on node1
   * Resource action: child_rsc1:3    notify on node2
   * Pseudo action:   rsc1_confirmed-post_notify_promoted_0
 
 Revised Cluster Status:
   * Node List:
     * Online: [ node1 node2 ]
 
   * Full List of Resources:
-    * Clone Set: rsc1 [child_rsc1] (promotable) (unique):
+    * Clone Set: rsc1 [child_rsc1] (promotable, unique):
       * child_rsc1:0	(ocf:heartbeat:apache):	 Promoted node1
       * child_rsc1:1	(ocf:heartbeat:apache):	 Unpromoted node2
       * child_rsc1:2	(ocf:heartbeat:apache):	 Unpromoted node1
       * child_rsc1:3	(ocf:heartbeat:apache):	 Promoted node2
       * child_rsc1:4	(ocf:heartbeat:apache):	 Stopped
diff --git a/cts/scheduler/summary/promoted-3.summary b/cts/scheduler/summary/promoted-3.summary
index 161f51834a..08100f3e36 100644
--- a/cts/scheduler/summary/promoted-3.summary
+++ b/cts/scheduler/summary/promoted-3.summary
@@ -1,50 +1,50 @@
 Current cluster status:
   * Node List:
     * Online: [ node1 node2 ]
 
   * Full List of Resources:
-    * Clone Set: rsc1 [child_rsc1] (promotable) (unique):
+    * Clone Set: rsc1 [child_rsc1] (promotable, unique):
       * child_rsc1:0	(ocf:heartbeat:apache):	 Stopped
       * child_rsc1:1	(ocf:heartbeat:apache):	 Stopped
       * child_rsc1:2	(ocf:heartbeat:apache):	 Stopped
       * child_rsc1:3	(ocf:heartbeat:apache):	 Stopped
       * child_rsc1:4	(ocf:heartbeat:apache):	 Stopped
 
 Transition Summary:
   * Start      child_rsc1:0     (                   node1 )
   * Promote    child_rsc1:1     ( Stopped -> Promoted node2 )
   * Start      child_rsc1:2     (                   node1 )
   * Start      child_rsc1:3     (                   node2 )
 
 Executing Cluster Transition:
   * Resource action: child_rsc1:0    monitor on node2
   * Resource action: child_rsc1:0    monitor on node1
   * Resource action: child_rsc1:1    monitor on node2
   * Resource action: child_rsc1:1    monitor on node1
   * Resource action: child_rsc1:2    monitor on node2
   * Resource action: child_rsc1:2    monitor on node1
   * Resource action: child_rsc1:3    monitor on node2
   * Resource action: child_rsc1:3    monitor on node1
   * Resource action: child_rsc1:4    monitor on node2
   * Resource action: child_rsc1:4    monitor on node1
   * Pseudo action:   rsc1_start_0
   * Resource action: child_rsc1:0    start on node1
   * Resource action: child_rsc1:1    start on node2
   * Resource action: child_rsc1:2    start on node1
   * Resource action: child_rsc1:3    start on node2
   * Pseudo action:   rsc1_running_0
   * Pseudo action:   rsc1_promote_0
   * Resource action: child_rsc1:1    promote on node2
   * Pseudo action:   rsc1_promoted_0
 
 Revised Cluster Status:
   * Node List:
     * Online: [ node1 node2 ]
 
   * Full List of Resources:
-    * Clone Set: rsc1 [child_rsc1] (promotable) (unique):
+    * Clone Set: rsc1 [child_rsc1] (promotable, unique):
       * child_rsc1:0	(ocf:heartbeat:apache):	 Unpromoted node1
       * child_rsc1:1	(ocf:heartbeat:apache):	 Promoted node2
       * child_rsc1:2	(ocf:heartbeat:apache):	 Unpromoted node1
       * child_rsc1:3	(ocf:heartbeat:apache):	 Unpromoted node2
       * child_rsc1:4	(ocf:heartbeat:apache):	 Stopped
diff --git a/cts/scheduler/summary/promoted-4.summary b/cts/scheduler/summary/promoted-4.summary
index 0dfe7c7263..2bcb25eaf1 100644
--- a/cts/scheduler/summary/promoted-4.summary
+++ b/cts/scheduler/summary/promoted-4.summary
@@ -1,94 +1,94 @@
 Current cluster status:
   * Node List:
     * Online: [ c001n01 c001n02 c001n03 c001n08 ]
 
   * Full List of Resources:
     * DcIPaddr	(ocf:heartbeat:IPaddr):	 Started c001n08
     * Resource Group: group-1:
       * ocf_child	(ocf:heartbeat:IPaddr):	 Started c001n03
       * heartbeat_child	(ocf:heartbeat:IPaddr):	 Started c001n03
     * lsb_dummy	(lsb:/usr/lib/heartbeat/cts/LSBDummy):	 Started c001n01
     * rsc_c001n08	(ocf:heartbeat:IPaddr):	 Started c001n08
     * rsc_c001n02	(ocf:heartbeat:IPaddr):	 Started c001n02
     * rsc_c001n03	(ocf:heartbeat:IPaddr):	 Started c001n03
     * rsc_c001n01	(ocf:heartbeat:IPaddr):	 Started c001n01
     * Clone Set: DoFencing [child_DoFencing] (unique):
       * child_DoFencing:0	(stonith:ssh):	 Started c001n08
       * child_DoFencing:1	(stonith:ssh):	 Started c001n03
       * child_DoFencing:2	(stonith:ssh):	 Started c001n01
       * child_DoFencing:3	(stonith:ssh):	 Started c001n02
-    * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique):
+    * Clone Set: master_rsc_1 [ocf_msdummy] (promotable, unique):
       * ocf_msdummy:0	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n08
       * ocf_msdummy:1	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n03
       * ocf_msdummy:2	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n01
       * ocf_msdummy:3	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n08
       * ocf_msdummy:4	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n03
       * ocf_msdummy:5	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n01
       * ocf_msdummy:6	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n02
       * ocf_msdummy:7	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n02
 
 Transition Summary:
   * Promote    ocf_msdummy:0     ( Unpromoted -> Promoted c001n08 )
 
 Executing Cluster Transition:
   * Resource action: child_DoFencing:1 monitor on c001n08
   * Resource action: child_DoFencing:1 monitor on c001n02
   * Resource action: child_DoFencing:1 monitor on c001n01
   * Resource action: child_DoFencing:2 monitor on c001n08
   * Resource action: child_DoFencing:2 monitor on c001n03
   * Resource action: child_DoFencing:2 monitor on c001n02
   * Resource action: child_DoFencing:3 monitor on c001n08
   * Resource action: child_DoFencing:3 monitor on c001n03
   * Resource action: child_DoFencing:3 monitor on c001n01
   * Resource action: ocf_msdummy:0   cancel=5000 on c001n08
   * Resource action: ocf_msdummy:2   monitor on c001n08
   * Resource action: ocf_msdummy:2   monitor on c001n03
   * Resource action: ocf_msdummy:2   monitor on c001n02
   * Resource action: ocf_msdummy:3   monitor on c001n03
   * Resource action: ocf_msdummy:3   monitor on c001n02
   * Resource action: ocf_msdummy:3   monitor on c001n01
   * Resource action: ocf_msdummy:4   monitor on c001n08
   * Resource action: ocf_msdummy:4   monitor on c001n02
   * Resource action: ocf_msdummy:4   monitor on c001n01
   * Resource action: ocf_msdummy:5   monitor on c001n08
   * Resource action: ocf_msdummy:5   monitor on c001n03
   * Resource action: ocf_msdummy:5   monitor on c001n02
   * Resource action: ocf_msdummy:6   monitor on c001n08
   * Resource action: ocf_msdummy:6   monitor on c001n03
   * Resource action: ocf_msdummy:6   monitor on c001n01
   * Resource action: ocf_msdummy:7   monitor on c001n08
   * Resource action: ocf_msdummy:7   monitor on c001n03
   * Resource action: ocf_msdummy:7   monitor on c001n01
   * Pseudo action:   master_rsc_1_promote_0
   * Resource action: ocf_msdummy:0   promote on c001n08
   * Pseudo action:   master_rsc_1_promoted_0
   * Resource action: ocf_msdummy:0   monitor=6000 on c001n08
 
 Revised Cluster Status:
   * Node List:
     * Online: [ c001n01 c001n02 c001n03 c001n08 ]
 
   * Full List of Resources:
     * DcIPaddr	(ocf:heartbeat:IPaddr):	 Started c001n08
     * Resource Group: group-1:
       * ocf_child	(ocf:heartbeat:IPaddr):	 Started c001n03
       * heartbeat_child	(ocf:heartbeat:IPaddr):	 Started c001n03
     * lsb_dummy	(lsb:/usr/lib/heartbeat/cts/LSBDummy):	 Started c001n01
     * rsc_c001n08	(ocf:heartbeat:IPaddr):	 Started c001n08
     * rsc_c001n02	(ocf:heartbeat:IPaddr):	 Started c001n02
     * rsc_c001n03	(ocf:heartbeat:IPaddr):	 Started c001n03
     * rsc_c001n01	(ocf:heartbeat:IPaddr):	 Started c001n01
     * Clone Set: DoFencing [child_DoFencing] (unique):
       * child_DoFencing:0	(stonith:ssh):	 Started c001n08
       * child_DoFencing:1	(stonith:ssh):	 Started c001n03
       * child_DoFencing:2	(stonith:ssh):	 Started c001n01
       * child_DoFencing:3	(stonith:ssh):	 Started c001n02
-    * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique):
+    * Clone Set: master_rsc_1 [ocf_msdummy] (promotable, unique):
       * ocf_msdummy:0	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Promoted c001n08
       * ocf_msdummy:1	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n03
       * ocf_msdummy:2	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n01
       * ocf_msdummy:3	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n08
       * ocf_msdummy:4	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n03
       * ocf_msdummy:5	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n01
       * ocf_msdummy:6	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n02
       * ocf_msdummy:7	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n02
diff --git a/cts/scheduler/summary/promoted-5.summary b/cts/scheduler/summary/promoted-5.summary
index 00fa1c2154..8a2f1a232f 100644
--- a/cts/scheduler/summary/promoted-5.summary
+++ b/cts/scheduler/summary/promoted-5.summary
@@ -1,88 +1,88 @@
 Current cluster status:
   * Node List:
     * Online: [ c001n01 c001n02 c001n03 c001n08 ]
 
   * Full List of Resources:
     * DcIPaddr	(ocf:heartbeat:IPaddr):	 Started c001n08
     * Resource Group: group-1:
       * ocf_child	(ocf:heartbeat:IPaddr):	 Started c001n03
       * heartbeat_child	(ocf:heartbeat:IPaddr):	 Started c001n03
     * lsb_dummy	(lsb:/usr/lib/heartbeat/cts/LSBDummy):	 Started c001n01
     * rsc_c001n08	(ocf:heartbeat:IPaddr):	 Started c001n08
     * rsc_c001n02	(ocf:heartbeat:IPaddr):	 Started c001n02
     * rsc_c001n03	(ocf:heartbeat:IPaddr):	 Started c001n03
     * rsc_c001n01	(ocf:heartbeat:IPaddr):	 Started c001n01
     * Clone Set: DoFencing [child_DoFencing] (unique):
       * child_DoFencing:0	(stonith:ssh):	 Started c001n08
       * child_DoFencing:1	(stonith:ssh):	 Started c001n03
       * child_DoFencing:2	(stonith:ssh):	 Started c001n01
       * child_DoFencing:3	(stonith:ssh):	 Started c001n02
-    * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique):
+    * Clone Set: master_rsc_1 [ocf_msdummy] (promotable, unique):
       * ocf_msdummy:0	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Promoted c001n08
       * ocf_msdummy:1	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n03
       * ocf_msdummy:2	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n01
       * ocf_msdummy:3	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n08
       * ocf_msdummy:4	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n03
       * ocf_msdummy:5	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n01
       * ocf_msdummy:6	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n02
       * ocf_msdummy:7	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n02
 
 Transition Summary:
 
 Executing Cluster Transition:
   * Resource action: child_DoFencing:1 monitor on c001n08
   * Resource action: child_DoFencing:1 monitor on c001n02
   * Resource action: child_DoFencing:1 monitor on c001n01
   * Resource action: child_DoFencing:2 monitor on c001n08
   * Resource action: child_DoFencing:2 monitor on c001n03
   * Resource action: child_DoFencing:2 monitor on c001n02
   * Resource action: child_DoFencing:3 monitor on c001n08
   * Resource action: child_DoFencing:3 monitor on c001n03
   * Resource action: child_DoFencing:3 monitor on c001n01
   * Resource action: ocf_msdummy:2   monitor on c001n08
   * Resource action: ocf_msdummy:2   monitor on c001n03
   * Resource action: ocf_msdummy:2   monitor on c001n02
   * Resource action: ocf_msdummy:3   monitor on c001n03
   * Resource action: ocf_msdummy:3   monitor on c001n02
   * Resource action: ocf_msdummy:3   monitor on c001n01
   * Resource action: ocf_msdummy:4   monitor on c001n08
   * Resource action: ocf_msdummy:4   monitor on c001n02
   * Resource action: ocf_msdummy:4   monitor on c001n01
   * Resource action: ocf_msdummy:5   monitor on c001n08
   * Resource action: ocf_msdummy:5   monitor on c001n03
   * Resource action: ocf_msdummy:5   monitor on c001n02
   * Resource action: ocf_msdummy:6   monitor on c001n08
   * Resource action: ocf_msdummy:6   monitor on c001n03
   * Resource action: ocf_msdummy:6   monitor on c001n01
   * Resource action: ocf_msdummy:7   monitor on c001n08
   * Resource action: ocf_msdummy:7   monitor on c001n03
   * Resource action: ocf_msdummy:7   monitor on c001n01
 
 Revised Cluster Status:
   * Node List:
     * Online: [ c001n01 c001n02 c001n03 c001n08 ]
 
   * Full List of Resources:
     * DcIPaddr	(ocf:heartbeat:IPaddr):	 Started c001n08
     * Resource Group: group-1:
       * ocf_child	(ocf:heartbeat:IPaddr):	 Started c001n03
       * heartbeat_child	(ocf:heartbeat:IPaddr):	 Started c001n03
     * lsb_dummy	(lsb:/usr/lib/heartbeat/cts/LSBDummy):	 Started c001n01
     * rsc_c001n08	(ocf:heartbeat:IPaddr):	 Started c001n08
     * rsc_c001n02	(ocf:heartbeat:IPaddr):	 Started c001n02
     * rsc_c001n03	(ocf:heartbeat:IPaddr):	 Started c001n03
     * rsc_c001n01	(ocf:heartbeat:IPaddr):	 Started c001n01
     * Clone Set: DoFencing [child_DoFencing] (unique):
       * child_DoFencing:0	(stonith:ssh):	 Started c001n08
       * child_DoFencing:1	(stonith:ssh):	 Started c001n03
       * child_DoFencing:2	(stonith:ssh):	 Started c001n01
       * child_DoFencing:3	(stonith:ssh):	 Started c001n02
-    * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique):
+    * Clone Set: master_rsc_1 [ocf_msdummy] (promotable, unique):
       * ocf_msdummy:0	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Promoted c001n08
       * ocf_msdummy:1	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n03
       * ocf_msdummy:2	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n01
       * ocf_msdummy:3	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n08
       * ocf_msdummy:4	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n03
       * ocf_msdummy:5	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n01
       * ocf_msdummy:6	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n02
       * ocf_msdummy:7	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n02
diff --git a/cts/scheduler/summary/promoted-6.summary b/cts/scheduler/summary/promoted-6.summary
index 13c12dfc1c..2d9c953bfa 100644
--- a/cts/scheduler/summary/promoted-6.summary
+++ b/cts/scheduler/summary/promoted-6.summary
@@ -1,87 +1,87 @@
 Current cluster status:
   * Node List:
     * Online: [ c001n01 c001n02 c001n03 c001n08 ]
 
   * Full List of Resources:
     * DcIPaddr	(ocf:heartbeat:IPaddr):	 Started c001n08
     * Resource Group: group-1:
       * ocf_192.168.100.181	(ocf:heartbeat:IPaddr):	 Started c001n02
       * heartbeat_192.168.100.182	(ocf:heartbeat:IPaddr):	 Started c001n02
       * ocf_192.168.100.183	(ocf:heartbeat:IPaddr):	 Started c001n02
     * lsb_dummy	(lsb:/usr/lib/heartbeat/cts/LSBDummy):	 Started c001n03
     * rsc_c001n08	(ocf:heartbeat:IPaddr):	 Started c001n08
     * rsc_c001n02	(ocf:heartbeat:IPaddr):	 Started c001n02
     * rsc_c001n03	(ocf:heartbeat:IPaddr):	 Started c001n03
     * rsc_c001n01	(ocf:heartbeat:IPaddr):	 Started c001n01
     * Clone Set: DoFencing [child_DoFencing] (unique):
       * child_DoFencing:0	(stonith:ssh):	 Started c001n08
       * child_DoFencing:1	(stonith:ssh):	 Started c001n02
       * child_DoFencing:2	(stonith:ssh):	 Started c001n03
       * child_DoFencing:3	(stonith:ssh):	 Started c001n01
-    * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique):
+    * Clone Set: master_rsc_1 [ocf_msdummy] (promotable, unique):
       * ocf_msdummy:0	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Promoted c001n08
       * ocf_msdummy:1	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n02
       * ocf_msdummy:2	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n03
       * ocf_msdummy:3	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n08
       * ocf_msdummy:4	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n02
       * ocf_msdummy:5	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n03
       * ocf_msdummy:6	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n01
       * ocf_msdummy:7	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n01
 
 Transition Summary:
 
 Executing Cluster Transition:
   * Resource action: child_DoFencing:1 monitor on c001n08
   * Resource action: child_DoFencing:1 monitor on c001n03
   * Resource action: child_DoFencing:1 monitor on c001n01
   * Resource action: child_DoFencing:2 monitor on c001n08
   * Resource action: child_DoFencing:2 monitor on c001n01
   * Resource action: child_DoFencing:3 monitor on c001n08
   * Resource action: child_DoFencing:3 monitor on c001n03
   * Resource action: child_DoFencing:3 monitor on c001n02
   * Resource action: ocf_msdummy:2   monitor on c001n08
   * Resource action: ocf_msdummy:2   monitor on c001n01
   * Resource action: ocf_msdummy:3   monitor on c001n03
   * Resource action: ocf_msdummy:3   monitor on c001n01
   * Resource action: ocf_msdummy:4   monitor on c001n08
   * Resource action: ocf_msdummy:4   monitor on c001n03
   * Resource action: ocf_msdummy:4   monitor on c001n01
   * Resource action: ocf_msdummy:5   monitor on c001n08
   * Resource action: ocf_msdummy:5   monitor on c001n02
   * Resource action: ocf_msdummy:5   monitor on c001n01
   * Resource action: ocf_msdummy:6   monitor on c001n08
   * Resource action: ocf_msdummy:6   monitor on c001n03
   * Resource action: ocf_msdummy:6   monitor on c001n02
   * Resource action: ocf_msdummy:7   monitor on c001n08
   * Resource action: ocf_msdummy:7   monitor on c001n03
   * Resource action: ocf_msdummy:7   monitor on c001n02
 
 Revised Cluster Status:
   * Node List:
     * Online: [ c001n01 c001n02 c001n03 c001n08 ]
 
   * Full List of Resources:
     * DcIPaddr	(ocf:heartbeat:IPaddr):	 Started c001n08
     * Resource Group: group-1:
       * ocf_192.168.100.181	(ocf:heartbeat:IPaddr):	 Started c001n02
       * heartbeat_192.168.100.182	(ocf:heartbeat:IPaddr):	 Started c001n02
       * ocf_192.168.100.183	(ocf:heartbeat:IPaddr):	 Started c001n02
     * lsb_dummy	(lsb:/usr/lib/heartbeat/cts/LSBDummy):	 Started c001n03
     * rsc_c001n08	(ocf:heartbeat:IPaddr):	 Started c001n08
     * rsc_c001n02	(ocf:heartbeat:IPaddr):	 Started c001n02
     * rsc_c001n03	(ocf:heartbeat:IPaddr):	 Started c001n03
     * rsc_c001n01	(ocf:heartbeat:IPaddr):	 Started c001n01
     * Clone Set: DoFencing [child_DoFencing] (unique):
       * child_DoFencing:0	(stonith:ssh):	 Started c001n08
       * child_DoFencing:1	(stonith:ssh):	 Started c001n02
       * child_DoFencing:2	(stonith:ssh):	 Started c001n03
       * child_DoFencing:3	(stonith:ssh):	 Started c001n01
-    * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique):
+    * Clone Set: master_rsc_1 [ocf_msdummy] (promotable, unique):
       * ocf_msdummy:0	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Promoted c001n08
       * ocf_msdummy:1	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n02
       * ocf_msdummy:2	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n03
       * ocf_msdummy:3	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n08
       * ocf_msdummy:4	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n02
       * ocf_msdummy:5	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n03
       * ocf_msdummy:6	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n01
       * ocf_msdummy:7	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n01
diff --git a/cts/scheduler/summary/promoted-7.summary b/cts/scheduler/summary/promoted-7.summary
index 0602f95895..e43682c9d4 100644
--- a/cts/scheduler/summary/promoted-7.summary
+++ b/cts/scheduler/summary/promoted-7.summary
@@ -1,121 +1,121 @@
 Current cluster status:
   * Node List:
     * Node c001n01: UNCLEAN (offline)
     * Online: [ c001n02 c001n03 c001n08 ]
 
   * Full List of Resources:
     * DcIPaddr	(ocf:heartbeat:IPaddr):	 Started c001n01 (UNCLEAN)
     * Resource Group: group-1:
       * ocf_192.168.100.181	(ocf:heartbeat:IPaddr):	 Started c001n03
       * heartbeat_192.168.100.182	(ocf:heartbeat:IPaddr):	 Started c001n03
       * ocf_192.168.100.183	(ocf:heartbeat:IPaddr):	 Started c001n03
     * lsb_dummy	(lsb:/usr/lib/heartbeat/cts/LSBDummy):	 Started c001n02
     * rsc_c001n01	(ocf:heartbeat:IPaddr):	 Started c001n01 (UNCLEAN)
     * rsc_c001n08	(ocf:heartbeat:IPaddr):	 Started c001n08
     * rsc_c001n02	(ocf:heartbeat:IPaddr):	 Started c001n02
     * rsc_c001n03	(ocf:heartbeat:IPaddr):	 Started c001n03
     * Clone Set: DoFencing [child_DoFencing] (unique):
       * child_DoFencing:0	(stonith:ssh):	 Started c001n01 (UNCLEAN)
       * child_DoFencing:1	(stonith:ssh):	 Started c001n03
       * child_DoFencing:2	(stonith:ssh):	 Started c001n02
       * child_DoFencing:3	(stonith:ssh):	 Started c001n08
-    * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique):
+    * Clone Set: master_rsc_1 [ocf_msdummy] (promotable, unique):
       * ocf_msdummy:0	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Promoted c001n01 (UNCLEAN)
       * ocf_msdummy:1	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n03
       * ocf_msdummy:2	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n02
       * ocf_msdummy:3	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n08
       * ocf_msdummy:4	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n01 (UNCLEAN)
       * ocf_msdummy:5	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n03
       * ocf_msdummy:6	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n02
       * ocf_msdummy:7	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n08
 
 Transition Summary:
   * Fence (reboot) c001n01 'peer is no longer part of the cluster'
   * Move       DcIPaddr                      ( c001n01 -> c001n03 )
   * Move       ocf_192.168.100.181           ( c001n03 -> c001n02 )
   * Move       heartbeat_192.168.100.182     ( c001n03 -> c001n02 )
   * Move       ocf_192.168.100.183           ( c001n03 -> c001n02 )
   * Move       lsb_dummy                     ( c001n02 -> c001n08 )
   * Move       rsc_c001n01                   ( c001n01 -> c001n03 )
   * Stop       child_DoFencing:0             (            c001n01 )  due to node availability
   * Stop       ocf_msdummy:0                 (     Promoted c001n01 )  due to node availability
   * Stop       ocf_msdummy:4                 (      Unpromoted c001n01 )  due to node availability
 
 Executing Cluster Transition:
   * Pseudo action:   group-1_stop_0
   * Resource action: ocf_192.168.100.183 stop on c001n03
   * Resource action: lsb_dummy       stop on c001n02
   * Resource action: child_DoFencing:2 monitor on c001n08
   * Resource action: child_DoFencing:2 monitor on c001n03
   * Resource action: child_DoFencing:3 monitor on c001n03
   * Resource action: child_DoFencing:3 monitor on c001n02
   * Pseudo action:   DoFencing_stop_0
   * Resource action: ocf_msdummy:4   monitor on c001n08
   * Resource action: ocf_msdummy:4   monitor on c001n03
   * Resource action: ocf_msdummy:4   monitor on c001n02
   * Resource action: ocf_msdummy:5   monitor on c001n08
   * Resource action: ocf_msdummy:5   monitor on c001n02
   * Resource action: ocf_msdummy:6   monitor on c001n08
   * Resource action: ocf_msdummy:6   monitor on c001n03
   * Resource action: ocf_msdummy:7   monitor on c001n03
   * Resource action: ocf_msdummy:7   monitor on c001n02
   * Pseudo action:   master_rsc_1_demote_0
   * Fencing c001n01 (reboot)
   * Pseudo action:   DcIPaddr_stop_0
   * Resource action: heartbeat_192.168.100.182 stop on c001n03
   * Resource action: lsb_dummy       start on c001n08
   * Pseudo action:   rsc_c001n01_stop_0
   * Pseudo action:   child_DoFencing:0_stop_0
   * Pseudo action:   DoFencing_stopped_0
   * Pseudo action:   ocf_msdummy:0_demote_0
   * Pseudo action:   master_rsc_1_demoted_0
   * Pseudo action:   master_rsc_1_stop_0
   * Resource action: DcIPaddr        start on c001n03
   * Resource action: ocf_192.168.100.181 stop on c001n03
   * Resource action: lsb_dummy       monitor=5000 on c001n08
   * Resource action: rsc_c001n01     start on c001n03
   * Pseudo action:   ocf_msdummy:0_stop_0
   * Pseudo action:   ocf_msdummy:4_stop_0
   * Pseudo action:   master_rsc_1_stopped_0
   * Resource action: DcIPaddr        monitor=5000 on c001n03
   * Pseudo action:   group-1_stopped_0
   * Pseudo action:   group-1_start_0
   * Resource action: ocf_192.168.100.181 start on c001n02
   * Resource action: heartbeat_192.168.100.182 start on c001n02
   * Resource action: ocf_192.168.100.183 start on c001n02
   * Resource action: rsc_c001n01     monitor=5000 on c001n03
   * Pseudo action:   group-1_running_0
   * Resource action: ocf_192.168.100.181 monitor=5000 on c001n02
   * Resource action: heartbeat_192.168.100.182 monitor=5000 on c001n02
   * Resource action: ocf_192.168.100.183 monitor=5000 on c001n02
 
 Revised Cluster Status:
   * Node List:
     * Online: [ c001n02 c001n03 c001n08 ]
     * OFFLINE: [ c001n01 ]
 
   * Full List of Resources:
     * DcIPaddr	(ocf:heartbeat:IPaddr):	 Started c001n03
     * Resource Group: group-1:
       * ocf_192.168.100.181	(ocf:heartbeat:IPaddr):	 Started c001n02
       * heartbeat_192.168.100.182	(ocf:heartbeat:IPaddr):	 Started c001n02
       * ocf_192.168.100.183	(ocf:heartbeat:IPaddr):	 Started c001n02
     * lsb_dummy	(lsb:/usr/lib/heartbeat/cts/LSBDummy):	 Started c001n08
     * rsc_c001n01	(ocf:heartbeat:IPaddr):	 Started c001n03
     * rsc_c001n08	(ocf:heartbeat:IPaddr):	 Started c001n08
     * rsc_c001n02	(ocf:heartbeat:IPaddr):	 Started c001n02
     * rsc_c001n03	(ocf:heartbeat:IPaddr):	 Started c001n03
     * Clone Set: DoFencing [child_DoFencing] (unique):
       * child_DoFencing:0	(stonith:ssh):	 Stopped
       * child_DoFencing:1	(stonith:ssh):	 Started c001n03
       * child_DoFencing:2	(stonith:ssh):	 Started c001n02
       * child_DoFencing:3	(stonith:ssh):	 Started c001n08
-    * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique):
+    * Clone Set: master_rsc_1 [ocf_msdummy] (promotable, unique):
       * ocf_msdummy:0	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Stopped
       * ocf_msdummy:1	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n03
       * ocf_msdummy:2	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n02
       * ocf_msdummy:3	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n08
       * ocf_msdummy:4	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Stopped
       * ocf_msdummy:5	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n03
       * ocf_msdummy:6	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n02
       * ocf_msdummy:7	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n08
diff --git a/cts/scheduler/summary/promoted-8.summary b/cts/scheduler/summary/promoted-8.summary
index 32417ff1ea..571eba6945 100644
--- a/cts/scheduler/summary/promoted-8.summary
+++ b/cts/scheduler/summary/promoted-8.summary
@@ -1,124 +1,124 @@
 Current cluster status:
   * Node List:
     * Node c001n01: UNCLEAN (offline)
     * Online: [ c001n02 c001n03 c001n08 ]
 
   * Full List of Resources:
     * DcIPaddr	(ocf:heartbeat:IPaddr):	 Started c001n01 (UNCLEAN)
     * Resource Group: group-1:
       * ocf_192.168.100.181	(ocf:heartbeat:IPaddr):	 Started c001n03
       * heartbeat_192.168.100.182	(ocf:heartbeat:IPaddr):	 Started c001n03
       * ocf_192.168.100.183	(ocf:heartbeat:IPaddr):	 Started c001n03
     * lsb_dummy	(lsb:/usr/lib/heartbeat/cts/LSBDummy):	 Started c001n02
     * rsc_c001n01	(ocf:heartbeat:IPaddr):	 Started c001n01 (UNCLEAN)
     * rsc_c001n08	(ocf:heartbeat:IPaddr):	 Started c001n08
     * rsc_c001n02	(ocf:heartbeat:IPaddr):	 Started c001n02
     * rsc_c001n03	(ocf:heartbeat:IPaddr):	 Started c001n03
     * Clone Set: DoFencing [child_DoFencing] (unique):
       * child_DoFencing:0	(stonith:ssh):	 Started c001n01 (UNCLEAN)
       * child_DoFencing:1	(stonith:ssh):	 Started c001n03
       * child_DoFencing:2	(stonith:ssh):	 Started c001n02
       * child_DoFencing:3	(stonith:ssh):	 Started c001n08
-    * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique):
+    * Clone Set: master_rsc_1 [ocf_msdummy] (promotable, unique):
       * ocf_msdummy:0	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Promoted c001n01 (UNCLEAN)
       * ocf_msdummy:1	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n03
       * ocf_msdummy:2	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n02
       * ocf_msdummy:3	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n08
       * ocf_msdummy:4	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Stopped
       * ocf_msdummy:5	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Stopped
       * ocf_msdummy:6	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n02
       * ocf_msdummy:7	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n08
 
 Transition Summary:
   * Fence (reboot) c001n01 'peer is no longer part of the cluster'
   * Move       DcIPaddr                      (              c001n01 -> c001n03 )
   * Move       ocf_192.168.100.181           (              c001n03 -> c001n02 )
   * Move       heartbeat_192.168.100.182     (              c001n03 -> c001n02 )
   * Move       ocf_192.168.100.183           (              c001n03 -> c001n02 )
   * Move       lsb_dummy                     (              c001n02 -> c001n08 )
   * Move       rsc_c001n01                   (              c001n01 -> c001n03 )
   * Stop       child_DoFencing:0             (                         c001n01 )  due to node availability
   * Move       ocf_msdummy:0                 ( Promoted c001n01 -> Unpromoted c001n03 )
 
 Executing Cluster Transition:
   * Pseudo action:   group-1_stop_0
   * Resource action: ocf_192.168.100.183 stop on c001n03
   * Resource action: lsb_dummy       stop on c001n02
   * Resource action: child_DoFencing:2 monitor on c001n08
   * Resource action: child_DoFencing:2 monitor on c001n03
   * Resource action: child_DoFencing:3 monitor on c001n03
   * Resource action: child_DoFencing:3 monitor on c001n02
   * Pseudo action:   DoFencing_stop_0
   * Resource action: ocf_msdummy:4   monitor on c001n08
   * Resource action: ocf_msdummy:4   monitor on c001n03
   * Resource action: ocf_msdummy:4   monitor on c001n02
   * Resource action: ocf_msdummy:5   monitor on c001n08
   * Resource action: ocf_msdummy:5   monitor on c001n03
   * Resource action: ocf_msdummy:5   monitor on c001n02
   * Resource action: ocf_msdummy:6   monitor on c001n08
   * Resource action: ocf_msdummy:6   monitor on c001n03
   * Resource action: ocf_msdummy:7   monitor on c001n03
   * Resource action: ocf_msdummy:7   monitor on c001n02
   * Pseudo action:   master_rsc_1_demote_0
   * Fencing c001n01 (reboot)
   * Pseudo action:   DcIPaddr_stop_0
   * Resource action: heartbeat_192.168.100.182 stop on c001n03
   * Resource action: lsb_dummy       start on c001n08
   * Pseudo action:   rsc_c001n01_stop_0
   * Pseudo action:   child_DoFencing:0_stop_0
   * Pseudo action:   DoFencing_stopped_0
   * Pseudo action:   ocf_msdummy:0_demote_0
   * Pseudo action:   master_rsc_1_demoted_0
   * Pseudo action:   master_rsc_1_stop_0
   * Resource action: DcIPaddr        start on c001n03
   * Resource action: ocf_192.168.100.181 stop on c001n03
   * Resource action: lsb_dummy       monitor=5000 on c001n08
   * Resource action: rsc_c001n01     start on c001n03
   * Pseudo action:   ocf_msdummy:0_stop_0
   * Pseudo action:   master_rsc_1_stopped_0
   * Pseudo action:   master_rsc_1_start_0
   * Resource action: DcIPaddr        monitor=5000 on c001n03
   * Pseudo action:   group-1_stopped_0
   * Pseudo action:   group-1_start_0
   * Resource action: ocf_192.168.100.181 start on c001n02
   * Resource action: heartbeat_192.168.100.182 start on c001n02
   * Resource action: ocf_192.168.100.183 start on c001n02
   * Resource action: rsc_c001n01     monitor=5000 on c001n03
   * Resource action: ocf_msdummy:0   start on c001n03
   * Pseudo action:   master_rsc_1_running_0
   * Pseudo action:   group-1_running_0
   * Resource action: ocf_192.168.100.181 monitor=5000 on c001n02
   * Resource action: heartbeat_192.168.100.182 monitor=5000 on c001n02
   * Resource action: ocf_192.168.100.183 monitor=5000 on c001n02
   * Resource action: ocf_msdummy:0   monitor=5000 on c001n03
 
 Revised Cluster Status:
   * Node List:
     * Online: [ c001n02 c001n03 c001n08 ]
     * OFFLINE: [ c001n01 ]
 
   * Full List of Resources:
     * DcIPaddr	(ocf:heartbeat:IPaddr):	 Started c001n03
     * Resource Group: group-1:
       * ocf_192.168.100.181	(ocf:heartbeat:IPaddr):	 Started c001n02
       * heartbeat_192.168.100.182	(ocf:heartbeat:IPaddr):	 Started c001n02
       * ocf_192.168.100.183	(ocf:heartbeat:IPaddr):	 Started c001n02
     * lsb_dummy	(lsb:/usr/lib/heartbeat/cts/LSBDummy):	 Started c001n08
     * rsc_c001n01	(ocf:heartbeat:IPaddr):	 Started c001n03
     * rsc_c001n08	(ocf:heartbeat:IPaddr):	 Started c001n08
     * rsc_c001n02	(ocf:heartbeat:IPaddr):	 Started c001n02
     * rsc_c001n03	(ocf:heartbeat:IPaddr):	 Started c001n03
     * Clone Set: DoFencing [child_DoFencing] (unique):
       * child_DoFencing:0	(stonith:ssh):	 Stopped
       * child_DoFencing:1	(stonith:ssh):	 Started c001n03
       * child_DoFencing:2	(stonith:ssh):	 Started c001n02
       * child_DoFencing:3	(stonith:ssh):	 Started c001n08
-    * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique):
+    * Clone Set: master_rsc_1 [ocf_msdummy] (promotable, unique):
       * ocf_msdummy:0	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n03
       * ocf_msdummy:1	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n03
       * ocf_msdummy:2	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n02
       * ocf_msdummy:3	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n08
       * ocf_msdummy:4	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Stopped
       * ocf_msdummy:5	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Stopped
       * ocf_msdummy:6	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n02
       * ocf_msdummy:7	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n08
diff --git a/cts/scheduler/summary/promoted-9.summary b/cts/scheduler/summary/promoted-9.summary
index 2da56a62db..7dfdbbda99 100644
--- a/cts/scheduler/summary/promoted-9.summary
+++ b/cts/scheduler/summary/promoted-9.summary
@@ -1,100 +1,100 @@
 Current cluster status:
   * Node List:
     * Node sgi2: UNCLEAN (offline)
     * Node test02: UNCLEAN (offline)
     * Online: [ ibm1 va1 ]
 
   * Full List of Resources:
     * DcIPaddr	(ocf:heartbeat:IPaddr):	 Stopped
     * Resource Group: group-1:
       * ocf_127.0.0.11	(ocf:heartbeat:IPaddr):	 Stopped
       * heartbeat_127.0.0.12	(ocf:heartbeat:IPaddr):	 Stopped
       * ocf_127.0.0.13	(ocf:heartbeat:IPaddr):	 Stopped
     * lsb_dummy	(lsb:/usr/lib64/heartbeat/cts/LSBDummy):	 Stopped
     * rsc_sgi2	(ocf:heartbeat:IPaddr):	 Stopped
     * rsc_ibm1	(ocf:heartbeat:IPaddr):	 Stopped
     * rsc_va1	(ocf:heartbeat:IPaddr):	 Stopped
     * rsc_test02	(ocf:heartbeat:IPaddr):	 Stopped
     * Clone Set: DoFencing [child_DoFencing] (unique):
       * child_DoFencing:0	(stonith:ssh):	 Started va1
       * child_DoFencing:1	(stonith:ssh):	 Started ibm1
       * child_DoFencing:2	(stonith:ssh):	 Stopped
       * child_DoFencing:3	(stonith:ssh):	 Stopped
-    * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique):
+    * Clone Set: master_rsc_1 [ocf_msdummy] (promotable, unique):
       * ocf_msdummy:0	(ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy):	 Stopped
       * ocf_msdummy:1	(ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy):	 Stopped
       * ocf_msdummy:2	(ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy):	 Stopped
       * ocf_msdummy:3	(ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy):	 Stopped
       * ocf_msdummy:4	(ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy):	 Stopped
       * ocf_msdummy:5	(ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy):	 Stopped
       * ocf_msdummy:6	(ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy):	 Stopped
       * ocf_msdummy:7	(ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy):	 Stopped
 
 Transition Summary:
   * Start      DcIPaddr                 (                   va1 )  due to no quorum (blocked)
   * Start      ocf_127.0.0.11           (                   va1 )  due to no quorum (blocked)
   * Start      heartbeat_127.0.0.12     (                   va1 )  due to no quorum (blocked)
   * Start      ocf_127.0.0.13           (                   va1 )  due to no quorum (blocked)
   * Start      lsb_dummy                (                   va1 )  due to no quorum (blocked)
   * Start      rsc_sgi2                 (                   va1 )  due to no quorum (blocked)
   * Start      rsc_ibm1                 (                   va1 )  due to no quorum (blocked)
   * Start      rsc_va1                  (                   va1 )  due to no quorum (blocked)
   * Start      rsc_test02               (                   va1 )  due to no quorum (blocked)
   * Stop       child_DoFencing:1        (                  ibm1 )  due to node availability
   * Promote    ocf_msdummy:0            ( Stopped -> Promoted va1 )  blocked
   * Start      ocf_msdummy:1            (                   va1 )  due to no quorum (blocked)
 
 Executing Cluster Transition:
   * Resource action: child_DoFencing:1 monitor on va1
   * Resource action: child_DoFencing:2 monitor on va1
   * Resource action: child_DoFencing:2 monitor on ibm1
   * Resource action: child_DoFencing:3 monitor on va1
   * Resource action: child_DoFencing:3 monitor on ibm1
   * Pseudo action:   DoFencing_stop_0
   * Resource action: ocf_msdummy:2   monitor on va1
   * Resource action: ocf_msdummy:2   monitor on ibm1
   * Resource action: ocf_msdummy:3   monitor on va1
   * Resource action: ocf_msdummy:3   monitor on ibm1
   * Resource action: ocf_msdummy:4   monitor on va1
   * Resource action: ocf_msdummy:4   monitor on ibm1
   * Resource action: ocf_msdummy:5   monitor on va1
   * Resource action: ocf_msdummy:5   monitor on ibm1
   * Resource action: ocf_msdummy:6   monitor on va1
   * Resource action: ocf_msdummy:6   monitor on ibm1
   * Resource action: ocf_msdummy:7   monitor on va1
   * Resource action: ocf_msdummy:7   monitor on ibm1
   * Resource action: child_DoFencing:1 stop on ibm1
   * Pseudo action:   DoFencing_stopped_0
   * Cluster action:  do_shutdown on ibm1
 
 Revised Cluster Status:
   * Node List:
     * Node sgi2: UNCLEAN (offline)
     * Node test02: UNCLEAN (offline)
     * Online: [ ibm1 va1 ]
 
   * Full List of Resources:
     * DcIPaddr	(ocf:heartbeat:IPaddr):	 Stopped
     * Resource Group: group-1:
       * ocf_127.0.0.11	(ocf:heartbeat:IPaddr):	 Stopped
       * heartbeat_127.0.0.12	(ocf:heartbeat:IPaddr):	 Stopped
       * ocf_127.0.0.13	(ocf:heartbeat:IPaddr):	 Stopped
     * lsb_dummy	(lsb:/usr/lib64/heartbeat/cts/LSBDummy):	 Stopped
     * rsc_sgi2	(ocf:heartbeat:IPaddr):	 Stopped
     * rsc_ibm1	(ocf:heartbeat:IPaddr):	 Stopped
     * rsc_va1	(ocf:heartbeat:IPaddr):	 Stopped
     * rsc_test02	(ocf:heartbeat:IPaddr):	 Stopped
     * Clone Set: DoFencing [child_DoFencing] (unique):
       * child_DoFencing:0	(stonith:ssh):	 Started va1
       * child_DoFencing:1	(stonith:ssh):	 Stopped
       * child_DoFencing:2	(stonith:ssh):	 Stopped
       * child_DoFencing:3	(stonith:ssh):	 Stopped
-    * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique):
+    * Clone Set: master_rsc_1 [ocf_msdummy] (promotable, unique):
       * ocf_msdummy:0	(ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy):	 Stopped
       * ocf_msdummy:1	(ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy):	 Stopped
       * ocf_msdummy:2	(ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy):	 Stopped
       * ocf_msdummy:3	(ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy):	 Stopped
       * ocf_msdummy:4	(ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy):	 Stopped
       * ocf_msdummy:5	(ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy):	 Stopped
       * ocf_msdummy:6	(ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy):	 Stopped
       * ocf_msdummy:7	(ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy):	 Stopped
diff --git a/cts/scheduler/summary/promoted-asymmetrical-order.summary b/cts/scheduler/summary/promoted-asymmetrical-order.summary
index e10568e898..1e49b3084b 100644
--- a/cts/scheduler/summary/promoted-asymmetrical-order.summary
+++ b/cts/scheduler/summary/promoted-asymmetrical-order.summary
@@ -1,37 +1,37 @@
 2 of 4 resource instances DISABLED and 0 BLOCKED from further action due to failure
 
 Current cluster status:
   * Node List:
     * Online: [ node1 node2 ]
 
   * Full List of Resources:
-    * Clone Set: ms1 [rsc1] (promotable) (disabled):
+    * Clone Set: ms1 [rsc1] (promotable, disabled):
       * Promoted: [ node1 ]
       * Unpromoted: [ node2 ]
     * Clone Set: ms2 [rsc2] (promotable):
       * Promoted: [ node2 ]
       * Unpromoted: [ node1 ]
 
 Transition Summary:
   * Stop       rsc1:0     ( Promoted node1 )  due to node availability
   * Stop       rsc1:1     (  Unpromoted node2 )  due to node availability
 
 Executing Cluster Transition:
   * Pseudo action:   ms1_demote_0
   * Resource action: rsc1:0          demote on node1
   * Pseudo action:   ms1_demoted_0
   * Pseudo action:   ms1_stop_0
   * Resource action: rsc1:0          stop on node1
   * Resource action: rsc1:1          stop on node2
   * Pseudo action:   ms1_stopped_0
 
 Revised Cluster Status:
   * Node List:
     * Online: [ node1 node2 ]
 
   * Full List of Resources:
-    * Clone Set: ms1 [rsc1] (promotable) (disabled):
+    * Clone Set: ms1 [rsc1] (promotable, disabled):
       * Stopped (disabled): [ node1 node2 ]
     * Clone Set: ms2 [rsc2] (promotable):
       * Promoted: [ node2 ]
       * Unpromoted: [ node1 ]
diff --git a/cts/scheduler/summary/promoted-failed-demote-2.summary b/cts/scheduler/summary/promoted-failed-demote-2.summary
index c8504e9e1d..453b5b7c9b 100644
--- a/cts/scheduler/summary/promoted-failed-demote-2.summary
+++ b/cts/scheduler/summary/promoted-failed-demote-2.summary
@@ -1,47 +1,47 @@
 Current cluster status:
   * Node List:
     * Online: [ dl380g5a dl380g5b ]
 
   * Full List of Resources:
-    * Clone Set: ms-sf [group] (promotable) (unique):
+    * Clone Set: ms-sf [group] (promotable, unique):
       * Resource Group: group:0:
         * stateful-1:0	(ocf:heartbeat:Stateful):	 FAILED dl380g5b
         * stateful-2:0	(ocf:heartbeat:Stateful):	 Stopped
       * Resource Group: group:1:
         * stateful-1:1	(ocf:heartbeat:Stateful):	 Unpromoted dl380g5a
         * stateful-2:1	(ocf:heartbeat:Stateful):	 Unpromoted dl380g5a
 
 Transition Summary:
   * Stop       stateful-1:0     (           Unpromoted dl380g5b )  due to node availability
   * Promote    stateful-1:1     ( Unpromoted -> Promoted dl380g5a )
   * Promote    stateful-2:1     ( Unpromoted -> Promoted dl380g5a )
 
 Executing Cluster Transition:
   * Resource action: stateful-1:1    cancel=20000 on dl380g5a
   * Resource action: stateful-2:1    cancel=20000 on dl380g5a
   * Pseudo action:   ms-sf_stop_0
   * Pseudo action:   group:0_stop_0
   * Resource action: stateful-1:0    stop on dl380g5b
   * Pseudo action:   group:0_stopped_0
   * Pseudo action:   ms-sf_stopped_0
   * Pseudo action:   ms-sf_promote_0
   * Pseudo action:   group:1_promote_0
   * Resource action: stateful-1:1    promote on dl380g5a
   * Resource action: stateful-2:1    promote on dl380g5a
   * Pseudo action:   group:1_promoted_0
   * Resource action: stateful-1:1    monitor=10000 on dl380g5a
   * Resource action: stateful-2:1    monitor=10000 on dl380g5a
   * Pseudo action:   ms-sf_promoted_0
 
 Revised Cluster Status:
   * Node List:
     * Online: [ dl380g5a dl380g5b ]
 
   * Full List of Resources:
-    * Clone Set: ms-sf [group] (promotable) (unique):
+    * Clone Set: ms-sf [group] (promotable, unique):
       * Resource Group: group:0:
         * stateful-1:0	(ocf:heartbeat:Stateful):	 Stopped
         * stateful-2:0	(ocf:heartbeat:Stateful):	 Stopped
       * Resource Group: group:1:
         * stateful-1:1	(ocf:heartbeat:Stateful):	 Promoted dl380g5a
         * stateful-2:1	(ocf:heartbeat:Stateful):	 Promoted dl380g5a
diff --git a/cts/scheduler/summary/promoted-failed-demote.summary b/cts/scheduler/summary/promoted-failed-demote.summary
index f071025528..732fba89c7 100644
--- a/cts/scheduler/summary/promoted-failed-demote.summary
+++ b/cts/scheduler/summary/promoted-failed-demote.summary
@@ -1,64 +1,64 @@
 Current cluster status:
   * Node List:
     * Online: [ dl380g5a dl380g5b ]
 
   * Full List of Resources:
-    * Clone Set: ms-sf [group] (promotable) (unique):
+    * Clone Set: ms-sf [group] (promotable, unique):
       * Resource Group: group:0:
         * stateful-1:0	(ocf:heartbeat:Stateful):	 FAILED dl380g5b
         * stateful-2:0	(ocf:heartbeat:Stateful):	 Stopped
       * Resource Group: group:1:
         * stateful-1:1	(ocf:heartbeat:Stateful):	 Unpromoted dl380g5a
         * stateful-2:1	(ocf:heartbeat:Stateful):	 Unpromoted dl380g5a
 
 Transition Summary:
   * Stop       stateful-1:0     (           Unpromoted dl380g5b )  due to node availability
   * Promote    stateful-1:1     ( Unpromoted -> Promoted dl380g5a )
   * Promote    stateful-2:1     ( Unpromoted -> Promoted dl380g5a )
 
 Executing Cluster Transition:
   * Resource action: stateful-1:1    cancel=20000 on dl380g5a
   * Resource action: stateful-2:1    cancel=20000 on dl380g5a
   * Pseudo action:   ms-sf_pre_notify_stop_0
   * Resource action: stateful-1:0    notify on dl380g5b
   * Resource action: stateful-1:1    notify on dl380g5a
   * Resource action: stateful-2:1    notify on dl380g5a
   * Pseudo action:   ms-sf_confirmed-pre_notify_stop_0
   * Pseudo action:   ms-sf_stop_0
   * Pseudo action:   group:0_stop_0
   * Resource action: stateful-1:0    stop on dl380g5b
   * Pseudo action:   group:0_stopped_0
   * Pseudo action:   ms-sf_stopped_0
   * Pseudo action:   ms-sf_post_notify_stopped_0
   * Resource action: stateful-1:1    notify on dl380g5a
   * Resource action: stateful-2:1    notify on dl380g5a
   * Pseudo action:   ms-sf_confirmed-post_notify_stopped_0
   * Pseudo action:   ms-sf_pre_notify_promote_0
   * Resource action: stateful-1:1    notify on dl380g5a
   * Resource action: stateful-2:1    notify on dl380g5a
   * Pseudo action:   ms-sf_confirmed-pre_notify_promote_0
   * Pseudo action:   ms-sf_promote_0
   * Pseudo action:   group:1_promote_0
   * Resource action: stateful-1:1    promote on dl380g5a
   * Resource action: stateful-2:1    promote on dl380g5a
   * Pseudo action:   group:1_promoted_0
   * Pseudo action:   ms-sf_promoted_0
   * Pseudo action:   ms-sf_post_notify_promoted_0
   * Resource action: stateful-1:1    notify on dl380g5a
   * Resource action: stateful-2:1    notify on dl380g5a
   * Pseudo action:   ms-sf_confirmed-post_notify_promoted_0
   * Resource action: stateful-1:1    monitor=10000 on dl380g5a
   * Resource action: stateful-2:1    monitor=10000 on dl380g5a
 
 Revised Cluster Status:
   * Node List:
     * Online: [ dl380g5a dl380g5b ]
 
   * Full List of Resources:
-    * Clone Set: ms-sf [group] (promotable) (unique):
+    * Clone Set: ms-sf [group] (promotable, unique):
       * Resource Group: group:0:
         * stateful-1:0	(ocf:heartbeat:Stateful):	 Stopped
         * stateful-2:0	(ocf:heartbeat:Stateful):	 Stopped
       * Resource Group: group:1:
         * stateful-1:1	(ocf:heartbeat:Stateful):	 Promoted dl380g5a
         * stateful-2:1	(ocf:heartbeat:Stateful):	 Promoted dl380g5a
diff --git a/cts/scheduler/summary/promoted-group.summary b/cts/scheduler/summary/promoted-group.summary
index f06047c34f..594d1b50d4 100644
--- a/cts/scheduler/summary/promoted-group.summary
+++ b/cts/scheduler/summary/promoted-group.summary
@@ -1,37 +1,37 @@
 Current cluster status:
   * Node List:
     * Online: [ rh44-1 rh44-2 ]
 
   * Full List of Resources:
     * Resource Group: test:
       * resource_1	(ocf:heartbeat:IPaddr):	 Started rh44-1
-    * Clone Set: ms-sf [grp_ms_sf] (promotable) (unique):
+    * Clone Set: ms-sf [grp_ms_sf] (promotable, unique):
       * Resource Group: grp_ms_sf:0:
         * master_slave_Stateful:0	(ocf:heartbeat:Stateful):	 Unpromoted rh44-2
       * Resource Group: grp_ms_sf:1:
         * master_slave_Stateful:1	(ocf:heartbeat:Stateful):	 Unpromoted rh44-1
 
 Transition Summary:
   * Promote    master_slave_Stateful:1     ( Unpromoted -> Promoted rh44-1 )
 
 Executing Cluster Transition:
   * Resource action: master_slave_Stateful:1 cancel=5000 on rh44-1
   * Pseudo action:   ms-sf_promote_0
   * Pseudo action:   grp_ms_sf:1_promote_0
   * Resource action: master_slave_Stateful:1 promote on rh44-1
   * Pseudo action:   grp_ms_sf:1_promoted_0
   * Resource action: master_slave_Stateful:1 monitor=6000 on rh44-1
   * Pseudo action:   ms-sf_promoted_0
 
 Revised Cluster Status:
   * Node List:
     * Online: [ rh44-1 rh44-2 ]
 
   * Full List of Resources:
     * Resource Group: test:
       * resource_1	(ocf:heartbeat:IPaddr):	 Started rh44-1
-    * Clone Set: ms-sf [grp_ms_sf] (promotable) (unique):
+    * Clone Set: ms-sf [grp_ms_sf] (promotable, unique):
       * Resource Group: grp_ms_sf:0:
         * master_slave_Stateful:0	(ocf:heartbeat:Stateful):	 Unpromoted rh44-2
       * Resource Group: grp_ms_sf:1:
         * master_slave_Stateful:1	(ocf:heartbeat:Stateful):	 Promoted rh44-1
diff --git a/cts/scheduler/summary/promoted-reattach.summary b/cts/scheduler/summary/promoted-reattach.summary
index cf089d9324..8f07251f2e 100644
--- a/cts/scheduler/summary/promoted-reattach.summary
+++ b/cts/scheduler/summary/promoted-reattach.summary
@@ -1,34 +1,34 @@
 Current cluster status:
   * Node List:
     * Online: [ dktest1 dktest2 ]
 
   * Full List of Resources:
-    * Clone Set: ms-drbd1 [drbd1] (promotable) (unmanaged):
+    * Clone Set: ms-drbd1 [drbd1] (promotable, unmanaged):
       * drbd1	(ocf:heartbeat:drbd):	 Promoted dktest1 (unmanaged)
       * drbd1	(ocf:heartbeat:drbd):	 Unpromoted dktest2 (unmanaged)
     * Resource Group: apache (unmanaged):
       * apache-vip	(ocf:heartbeat:IPaddr2):	 Started dktest1 (unmanaged)
       * mount	(ocf:heartbeat:Filesystem):	 Started dktest1 (unmanaged)
       * webserver	(ocf:heartbeat:apache):	 Started dktest1 (unmanaged)
 
 Transition Summary:
 
 Executing Cluster Transition:
   * Resource action: drbd1:0         monitor=10000 on dktest1
   * Resource action: drbd1:0         monitor=11000 on dktest2
   * Resource action: apache-vip      monitor=60000 on dktest1
   * Resource action: mount           monitor=10000 on dktest1
   * Resource action: webserver       monitor=30000 on dktest1
 
 Revised Cluster Status:
   * Node List:
     * Online: [ dktest1 dktest2 ]
 
   * Full List of Resources:
-    * Clone Set: ms-drbd1 [drbd1] (promotable) (unmanaged):
+    * Clone Set: ms-drbd1 [drbd1] (promotable, unmanaged):
       * drbd1	(ocf:heartbeat:drbd):	 Promoted dktest1 (unmanaged)
       * drbd1	(ocf:heartbeat:drbd):	 Unpromoted dktest2 (unmanaged)
     * Resource Group: apache (unmanaged):
       * apache-vip	(ocf:heartbeat:IPaddr2):	 Started dktest1 (unmanaged)
       * mount	(ocf:heartbeat:Filesystem):	 Started dktest1 (unmanaged)
       * webserver	(ocf:heartbeat:apache):	 Started dktest1 (unmanaged)
diff --git a/cts/scheduler/summary/promoted-unmanaged-monitor.summary b/cts/scheduler/summary/promoted-unmanaged-monitor.summary
index 2b96429fad..3c5b39aa17 100644
--- a/cts/scheduler/summary/promoted-unmanaged-monitor.summary
+++ b/cts/scheduler/summary/promoted-unmanaged-monitor.summary
@@ -1,69 +1,69 @@
 Current cluster status:
   * Node List:
     * Online: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ]
 
   * Full List of Resources:
     * Clone Set: Fencing [FencingChild] (unmanaged):
       * Stopped: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ]
     * Resource Group: group-1 (unmanaged):
       * r192.168.122.112	(ocf:heartbeat:IPaddr):	 Started pcmk-3 (unmanaged)
       * r192.168.122.113	(ocf:heartbeat:IPaddr):	 Started pcmk-3 (unmanaged)
       * r192.168.122.114	(ocf:heartbeat:IPaddr):	 Started pcmk-3 (unmanaged)
     * rsc_pcmk-1	(ocf:heartbeat:IPaddr):	 Started pcmk-1 (unmanaged)
     * rsc_pcmk-2	(ocf:heartbeat:IPaddr):	 Started pcmk-2 (unmanaged)
     * rsc_pcmk-3	(ocf:heartbeat:IPaddr):	 Started pcmk-3 (unmanaged)
     * rsc_pcmk-4	(ocf:heartbeat:IPaddr):	 Started pcmk-4 (unmanaged)
     * lsb-dummy	(lsb:/usr/share/pacemaker/tests/cts/LSBDummy):	 Started pcmk-3 (unmanaged)
     * migrator	(ocf:pacemaker:Dummy):	 Started pcmk-4 (unmanaged)
     * Clone Set: Connectivity [ping-1] (unmanaged):
       * ping-1	(ocf:pacemaker:ping):	 Started pcmk-2 (unmanaged)
       * ping-1	(ocf:pacemaker:ping):	 Started pcmk-3 (unmanaged)
       * ping-1	(ocf:pacemaker:ping):	 Started pcmk-4 (unmanaged)
       * ping-1	(ocf:pacemaker:ping):	 Started pcmk-1 (unmanaged)
-    * Clone Set: master-1 [stateful-1] (promotable) (unmanaged):
+    * Clone Set: master-1 [stateful-1] (promotable, unmanaged):
       * stateful-1	(ocf:pacemaker:Stateful):	 Unpromoted pcmk-2 (unmanaged)
       * stateful-1	(ocf:pacemaker:Stateful):	 Promoted pcmk-3 (unmanaged)
       * stateful-1	(ocf:pacemaker:Stateful):	 Unpromoted pcmk-4 (unmanaged)
       * Stopped: [ pcmk-1 ]
 
 Transition Summary:
 
 Executing Cluster Transition:
   * Resource action: lsb-dummy       monitor=5000 on pcmk-3
   * Resource action: migrator        monitor=10000 on pcmk-4
   * Resource action: ping-1:0        monitor=60000 on pcmk-2
   * Resource action: ping-1:0        monitor=60000 on pcmk-3
   * Resource action: ping-1:0        monitor=60000 on pcmk-4
   * Resource action: ping-1:0        monitor=60000 on pcmk-1
   * Resource action: stateful-1:0    monitor=15000 on pcmk-2
   * Resource action: stateful-1:0    monitor on pcmk-1
   * Resource action: stateful-1:0    monitor=16000 on pcmk-3
   * Resource action: stateful-1:0    monitor=15000 on pcmk-4
 
 Revised Cluster Status:
   * Node List:
     * Online: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ]
 
   * Full List of Resources:
     * Clone Set: Fencing [FencingChild] (unmanaged):
       * Stopped: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ]
     * Resource Group: group-1 (unmanaged):
       * r192.168.122.112	(ocf:heartbeat:IPaddr):	 Started pcmk-3 (unmanaged)
       * r192.168.122.113	(ocf:heartbeat:IPaddr):	 Started pcmk-3 (unmanaged)
       * r192.168.122.114	(ocf:heartbeat:IPaddr):	 Started pcmk-3 (unmanaged)
     * rsc_pcmk-1	(ocf:heartbeat:IPaddr):	 Started pcmk-1 (unmanaged)
     * rsc_pcmk-2	(ocf:heartbeat:IPaddr):	 Started pcmk-2 (unmanaged)
     * rsc_pcmk-3	(ocf:heartbeat:IPaddr):	 Started pcmk-3 (unmanaged)
     * rsc_pcmk-4	(ocf:heartbeat:IPaddr):	 Started pcmk-4 (unmanaged)
     * lsb-dummy	(lsb:/usr/share/pacemaker/tests/cts/LSBDummy):	 Started pcmk-3 (unmanaged)
     * migrator	(ocf:pacemaker:Dummy):	 Started pcmk-4 (unmanaged)
     * Clone Set: Connectivity [ping-1] (unmanaged):
       * ping-1	(ocf:pacemaker:ping):	 Started pcmk-2 (unmanaged)
       * ping-1	(ocf:pacemaker:ping):	 Started pcmk-3 (unmanaged)
       * ping-1	(ocf:pacemaker:ping):	 Started pcmk-4 (unmanaged)
       * ping-1	(ocf:pacemaker:ping):	 Started pcmk-1 (unmanaged)
-    * Clone Set: master-1 [stateful-1] (promotable) (unmanaged):
+    * Clone Set: master-1 [stateful-1] (promotable, unmanaged):
       * stateful-1	(ocf:pacemaker:Stateful):	 Unpromoted pcmk-2 (unmanaged)
       * stateful-1	(ocf:pacemaker:Stateful):	 Promoted pcmk-3 (unmanaged)
       * stateful-1	(ocf:pacemaker:Stateful):	 Unpromoted pcmk-4 (unmanaged)
       * Stopped: [ pcmk-1 ]
diff --git a/cts/scheduler/summary/rec-node-13.summary b/cts/scheduler/summary/rec-node-13.summary
index 68210542c3..72c8e42736 100644
--- a/cts/scheduler/summary/rec-node-13.summary
+++ b/cts/scheduler/summary/rec-node-13.summary
@@ -1,80 +1,80 @@
 Current cluster status:
   * Node List:
     * Node c001n04: UNCLEAN (online)
     * Online: [ c001n02 c001n06 c001n07 ]
     * OFFLINE: [ c001n03 c001n05 ]
 
   * Full List of Resources:
     * Clone Set: DoFencing [child_DoFencing]:
       * Started: [ c001n02 c001n06 c001n07 ]
       * Stopped: [ c001n03 c001n04 c001n05 ]
     * DcIPaddr	(ocf:heartbeat:IPaddr):	 Stopped
     * Resource Group: group-1:
       * ocf_192.168.100.181	(ocf:heartbeat:IPaddr):	 Started c001n02
       * heartbeat_192.168.100.182	(ocf:heartbeat:IPaddr):	 Started c001n02
       * ocf_192.168.100.183	(ocf:heartbeat:IPaddr):	 Started c001n02
     * lsb_dummy	(lsb:/usr/lib/heartbeat/cts/LSBDummy):	 Started c001n06
     * rsc_c001n05	(ocf:heartbeat:IPaddr):	 Started c001n07
     * rsc_c001n03	(ocf:heartbeat:IPaddr):	 Started c001n06
     * rsc_c001n04	(ocf:heartbeat:IPaddr):	 Started c001n07
     * rsc_c001n02	(ocf:heartbeat:IPaddr):	 Started c001n02
     * rsc_c001n07	(ocf:heartbeat:IPaddr):	 Started c001n07
     * rsc_c001n06	(ocf:heartbeat:IPaddr):	 Started c001n06
-    * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique):
+    * Clone Set: master_rsc_1 [ocf_msdummy] (promotable, unique):
       * ocf_msdummy:0	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Promoted c001n02
       * ocf_msdummy:1	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Stopped
       * ocf_msdummy:2	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Stopped
       * ocf_msdummy:3	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Stopped
       * ocf_msdummy:4	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n02
       * ocf_msdummy:5	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Stopped
       * ocf_msdummy:6	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 FAILED c001n04
       * ocf_msdummy:7	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Stopped
       * ocf_msdummy:8	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n06
       * ocf_msdummy:9	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n07
       * ocf_msdummy:10	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n06
       * ocf_msdummy:11	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n07
 
 Transition Summary:
   * Fence (reboot) c001n04 'ocf_msdummy:6 failed there'
   * Stop       ocf_msdummy:6     ( Unpromoted c001n04 )  due to node availability
 
 Executing Cluster Transition:
   * Fencing c001n04 (reboot)
   * Pseudo action:   master_rsc_1_stop_0
   * Pseudo action:   ocf_msdummy:6_stop_0
   * Pseudo action:   master_rsc_1_stopped_0
 
 Revised Cluster Status:
   * Node List:
     * Online: [ c001n02 c001n06 c001n07 ]
     * OFFLINE: [ c001n03 c001n04 c001n05 ]
 
   * Full List of Resources:
     * Clone Set: DoFencing [child_DoFencing]:
       * Started: [ c001n02 c001n06 c001n07 ]
       * Stopped: [ c001n03 c001n04 c001n05 ]
     * DcIPaddr	(ocf:heartbeat:IPaddr):	 Stopped
     * Resource Group: group-1:
       * ocf_192.168.100.181	(ocf:heartbeat:IPaddr):	 Started c001n02
       * heartbeat_192.168.100.182	(ocf:heartbeat:IPaddr):	 Started c001n02
       * ocf_192.168.100.183	(ocf:heartbeat:IPaddr):	 Started c001n02
     * lsb_dummy	(lsb:/usr/lib/heartbeat/cts/LSBDummy):	 Started c001n06
     * rsc_c001n05	(ocf:heartbeat:IPaddr):	 Started c001n07
     * rsc_c001n03	(ocf:heartbeat:IPaddr):	 Started c001n06
     * rsc_c001n04	(ocf:heartbeat:IPaddr):	 Started c001n07
     * rsc_c001n02	(ocf:heartbeat:IPaddr):	 Started c001n02
     * rsc_c001n07	(ocf:heartbeat:IPaddr):	 Started c001n07
     * rsc_c001n06	(ocf:heartbeat:IPaddr):	 Started c001n06
-    * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique):
+    * Clone Set: master_rsc_1 [ocf_msdummy] (promotable, unique):
       * ocf_msdummy:0	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Promoted c001n02
       * ocf_msdummy:1	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Stopped
       * ocf_msdummy:2	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Stopped
       * ocf_msdummy:3	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Stopped
       * ocf_msdummy:4	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n02
       * ocf_msdummy:5	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Stopped
       * ocf_msdummy:6	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Stopped
       * ocf_msdummy:7	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Stopped
       * ocf_msdummy:8	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n06
       * ocf_msdummy:9	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n07
       * ocf_msdummy:10	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n06
       * ocf_msdummy:11	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n07
diff --git a/cts/scheduler/summary/rsc-maintenance.summary b/cts/scheduler/summary/rsc-maintenance.summary
index fed1d61e19..0b9d57ed2a 100644
--- a/cts/scheduler/summary/rsc-maintenance.summary
+++ b/cts/scheduler/summary/rsc-maintenance.summary
@@ -1,31 +1,31 @@
 2 of 4 resource instances DISABLED and 0 BLOCKED from further action due to failure
 
 Current cluster status:
   * Node List:
     * Online: [ node1 node2 ]
 
   * Full List of Resources:
-    * Resource Group: group1 (unmanaged) (disabled):
+    * Resource Group: group1 (unmanaged, disabled):
       * rsc1	(ocf:pacemaker:Dummy):	 Started node1 (disabled, unmanaged)
       * rsc2	(ocf:pacemaker:Dummy):	 Started node1 (disabled, unmanaged)
     * Resource Group: group2:
       * rsc3	(ocf:pacemaker:Dummy):	 Started node2
       * rsc4	(ocf:pacemaker:Dummy):	 Started node2
 
 Transition Summary:
 
 Executing Cluster Transition:
   * Resource action: rsc1            cancel=10000 on node1
   * Resource action: rsc2            cancel=10000 on node1
 
 Revised Cluster Status:
   * Node List:
     * Online: [ node1 node2 ]
 
   * Full List of Resources:
-    * Resource Group: group1 (unmanaged) (disabled):
+    * Resource Group: group1 (unmanaged, disabled):
       * rsc1	(ocf:pacemaker:Dummy):	 Started node1 (disabled, unmanaged)
       * rsc2	(ocf:pacemaker:Dummy):	 Started node1 (disabled, unmanaged)
     * Resource Group: group2:
       * rsc3	(ocf:pacemaker:Dummy):	 Started node2
       * rsc4	(ocf:pacemaker:Dummy):	 Started node2
diff --git a/cts/scheduler/summary/stonith-0.summary b/cts/scheduler/summary/stonith-0.summary
index 5b829bf06d..f9745bd642 100644
--- a/cts/scheduler/summary/stonith-0.summary
+++ b/cts/scheduler/summary/stonith-0.summary
@@ -1,111 +1,111 @@
 Current cluster status:
   * Node List:
     * Node c001n03: UNCLEAN (online)
     * Node c001n05: UNCLEAN (online)
     * Online: [ c001n02 c001n04 c001n06 c001n07 c001n08 ]
 
   * Full List of Resources:
     * DcIPaddr	(ocf:heartbeat:IPaddr):	 Stopped
     * Resource Group: group-1:
       * ocf_192.168.100.181	(ocf:heartbeat:IPaddr):	 Started [ c001n03 c001n05 ]
       * heartbeat_192.168.100.182	(ocf:heartbeat:IPaddr):	 Started c001n03
       * ocf_192.168.100.183	(ocf:heartbeat:IPaddr):	 FAILED [ c001n03 c001n05 ]
     * lsb_dummy	(lsb:/usr/lib/heartbeat/cts/LSBDummy):	 Started c001n04
     * rsc_c001n03	(ocf:heartbeat:IPaddr):	 Started c001n06
     * rsc_c001n02	(ocf:heartbeat:IPaddr):	 Started c001n02
     * rsc_c001n04	(ocf:heartbeat:IPaddr):	 Started c001n04
     * rsc_c001n05	(ocf:heartbeat:IPaddr):	 Started c001n05
     * rsc_c001n06	(ocf:heartbeat:IPaddr):	 Started c001n06
     * rsc_c001n07	(ocf:heartbeat:IPaddr):	 Started c001n03
     * rsc_c001n08	(ocf:heartbeat:IPaddr):	 Started c001n08
     * Clone Set: DoFencing [child_DoFencing]:
       * Started: [ c001n02 c001n04 c001n06 c001n07 c001n08 ]
       * Stopped: [ c001n03 c001n05 ]
-    * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique):
+    * Clone Set: master_rsc_1 [ocf_msdummy] (promotable, unique):
       * ocf_msdummy:0	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Promoted c001n02
       * ocf_msdummy:1	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n02
       * ocf_msdummy:2	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n07
       * ocf_msdummy:3	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n07
       * ocf_msdummy:4	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n08
       * ocf_msdummy:5	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n08
       * ocf_msdummy:6	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Stopped
       * ocf_msdummy:7	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Stopped
       * ocf_msdummy:8	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Stopped
       * ocf_msdummy:9	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Stopped
       * ocf_msdummy:10	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n04
       * ocf_msdummy:11	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n04
       * ocf_msdummy:12	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n06
       * ocf_msdummy:13	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n06
 
 Transition Summary:
   * Fence (reboot) c001n05 'ocf_192.168.100.183 failed there'
   * Fence (reboot) c001n03 'ocf_192.168.100.183 failed there'
   * Move       ocf_192.168.100.181           ( c001n03 -> c001n02 )
   * Move       heartbeat_192.168.100.182     ( c001n03 -> c001n02 )
   * Recover    ocf_192.168.100.183           ( c001n03 -> c001n02 )
   * Move       rsc_c001n05                   ( c001n05 -> c001n07 )
   * Move       rsc_c001n07                   ( c001n03 -> c001n07 )
 
 Executing Cluster Transition:
   * Resource action: child_DoFencing:4 monitor=20000 on c001n08
   * Fencing c001n05 (reboot)
   * Fencing c001n03 (reboot)
   * Pseudo action:   group-1_stop_0
   * Pseudo action:   ocf_192.168.100.183_stop_0
   * Pseudo action:   ocf_192.168.100.183_stop_0
   * Pseudo action:   rsc_c001n05_stop_0
   * Pseudo action:   rsc_c001n07_stop_0
   * Pseudo action:   heartbeat_192.168.100.182_stop_0
   * Resource action: rsc_c001n05     start on c001n07
   * Resource action: rsc_c001n07     start on c001n07
   * Pseudo action:   ocf_192.168.100.181_stop_0
   * Pseudo action:   ocf_192.168.100.181_stop_0
   * Resource action: rsc_c001n05     monitor=5000 on c001n07
   * Resource action: rsc_c001n07     monitor=5000 on c001n07
   * Pseudo action:   group-1_stopped_0
   * Pseudo action:   group-1_start_0
   * Resource action: ocf_192.168.100.181 start on c001n02
   * Resource action: heartbeat_192.168.100.182 start on c001n02
   * Resource action: ocf_192.168.100.183 start on c001n02
   * Pseudo action:   group-1_running_0
   * Resource action: ocf_192.168.100.181 monitor=5000 on c001n02
   * Resource action: heartbeat_192.168.100.182 monitor=5000 on c001n02
   * Resource action: ocf_192.168.100.183 monitor=5000 on c001n02
 
 Revised Cluster Status:
   * Node List:
     * Online: [ c001n02 c001n04 c001n06 c001n07 c001n08 ]
     * OFFLINE: [ c001n03 c001n05 ]
 
   * Full List of Resources:
     * DcIPaddr	(ocf:heartbeat:IPaddr):	 Stopped
     * Resource Group: group-1:
       * ocf_192.168.100.181	(ocf:heartbeat:IPaddr):	 Started c001n02
       * heartbeat_192.168.100.182	(ocf:heartbeat:IPaddr):	 Started c001n02
       * ocf_192.168.100.183	(ocf:heartbeat:IPaddr):	 Started c001n02
     * lsb_dummy	(lsb:/usr/lib/heartbeat/cts/LSBDummy):	 Started c001n04
     * rsc_c001n03	(ocf:heartbeat:IPaddr):	 Started c001n06
     * rsc_c001n02	(ocf:heartbeat:IPaddr):	 Started c001n02
     * rsc_c001n04	(ocf:heartbeat:IPaddr):	 Started c001n04
     * rsc_c001n05	(ocf:heartbeat:IPaddr):	 Started c001n07
     * rsc_c001n06	(ocf:heartbeat:IPaddr):	 Started c001n06
     * rsc_c001n07	(ocf:heartbeat:IPaddr):	 Started c001n07
     * rsc_c001n08	(ocf:heartbeat:IPaddr):	 Started c001n08
     * Clone Set: DoFencing [child_DoFencing]:
       * Started: [ c001n02 c001n04 c001n06 c001n07 c001n08 ]
       * Stopped: [ c001n03 c001n05 ]
-    * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique):
+    * Clone Set: master_rsc_1 [ocf_msdummy] (promotable, unique):
       * ocf_msdummy:0	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Promoted c001n02
       * ocf_msdummy:1	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n02
       * ocf_msdummy:2	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n07
       * ocf_msdummy:3	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n07
       * ocf_msdummy:4	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n08
       * ocf_msdummy:5	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n08
       * ocf_msdummy:6	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Stopped
       * ocf_msdummy:7	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Stopped
       * ocf_msdummy:8	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Stopped
       * ocf_msdummy:9	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Stopped
       * ocf_msdummy:10	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n04
       * ocf_msdummy:11	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n04
       * ocf_msdummy:12	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n06
       * ocf_msdummy:13	(ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	 Unpromoted c001n06
diff --git a/cts/scheduler/summary/stonith-1.summary b/cts/scheduler/summary/stonith-1.summary
index 31d2f57ce8..29b979cacc 100644
--- a/cts/scheduler/summary/stonith-1.summary
+++ b/cts/scheduler/summary/stonith-1.summary
@@ -1,113 +1,113 @@
 Current cluster status:
   * Node List:
     * Node sles-3: UNCLEAN (offline)
     * Online: [ sles-1 sles-2 sles-4 ]
 
   * Full List of Resources:
     * Resource Group: group-1:
       * r192.168.100.181	(ocf:heartbeat:IPaddr):	 Started sles-1
       * r192.168.100.182	(ocf:heartbeat:IPaddr):	 Started sles-1
       * r192.168.100.183	(ocf:heartbeat:IPaddr):	 Stopped
     * lsb_dummy	(lsb:/usr/lib64/heartbeat/cts/LSBDummy):	 Started sles-2
     * migrator	(ocf:heartbeat:Dummy):	 Started sles-3 (UNCLEAN)
     * rsc_sles-1	(ocf:heartbeat:IPaddr):	 Started sles-1
     * rsc_sles-2	(ocf:heartbeat:IPaddr):	 Started sles-2
     * rsc_sles-3	(ocf:heartbeat:IPaddr):	 Started sles-3 (UNCLEAN)
     * rsc_sles-4	(ocf:heartbeat:IPaddr):	 Started sles-4
     * Clone Set: DoFencing [child_DoFencing]:
       * child_DoFencing	(stonith:external/vmware):	 Started sles-3 (UNCLEAN)
       * Started: [ sles-1 sles-2 ]
       * Stopped: [ sles-4 ]
-    * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique):
+    * Clone Set: master_rsc_1 [ocf_msdummy] (promotable, unique):
       * ocf_msdummy:0	(ocf:heartbeat:Stateful):	 Stopped
       * ocf_msdummy:1	(ocf:heartbeat:Stateful):	 Stopped
       * ocf_msdummy:2	(ocf:heartbeat:Stateful):	 Unpromoted sles-3 (UNCLEAN)
       * ocf_msdummy:3	(ocf:heartbeat:Stateful):	 Stopped
       * ocf_msdummy:4	(ocf:heartbeat:Stateful):	 Stopped
       * ocf_msdummy:5	(ocf:heartbeat:Stateful):	 Unpromoted sles-3 (UNCLEAN)
       * ocf_msdummy:6	(ocf:heartbeat:Stateful):	 Stopped
       * ocf_msdummy:7	(ocf:heartbeat:Stateful):	 Stopped
 
 Transition Summary:
   * Fence (reboot) sles-3 'peer is no longer part of the cluster'
   * Start      r192.168.100.183     (                 sles-1 )
   * Move       migrator             (       sles-3 -> sles-4 )
   * Move       rsc_sles-3           (       sles-3 -> sles-4 )
   * Move       child_DoFencing:2    (       sles-3 -> sles-4 )
   * Start      ocf_msdummy:0        (                 sles-4 )
   * Start      ocf_msdummy:1        (                 sles-1 )
   * Move       ocf_msdummy:2        ( sles-3 -> sles-2 Unpromoted )
   * Start      ocf_msdummy:3        (                 sles-4 )
   * Start      ocf_msdummy:4        (                 sles-1 )
   * Move       ocf_msdummy:5        ( sles-3 -> sles-2 Unpromoted )
 
 Executing Cluster Transition:
   * Pseudo action:   group-1_start_0
   * Resource action: r192.168.100.182 monitor=5000 on sles-1
   * Resource action: lsb_dummy       monitor=5000 on sles-2
   * Resource action: rsc_sles-2      monitor=5000 on sles-2
   * Resource action: rsc_sles-4      monitor=5000 on sles-4
   * Pseudo action:   DoFencing_stop_0
   * Fencing sles-3 (reboot)
   * Resource action: r192.168.100.183 start on sles-1
   * Pseudo action:   migrator_stop_0
   * Pseudo action:   rsc_sles-3_stop_0
   * Pseudo action:   child_DoFencing:2_stop_0
   * Pseudo action:   DoFencing_stopped_0
   * Pseudo action:   DoFencing_start_0
   * Pseudo action:   master_rsc_1_stop_0
   * Pseudo action:   group-1_running_0
   * Resource action: r192.168.100.183 monitor=5000 on sles-1
   * Resource action: migrator        start on sles-4
   * Resource action: rsc_sles-3      start on sles-4
   * Resource action: child_DoFencing:2 start on sles-4
   * Pseudo action:   DoFencing_running_0
   * Pseudo action:   ocf_msdummy:2_stop_0
   * Pseudo action:   ocf_msdummy:5_stop_0
   * Pseudo action:   master_rsc_1_stopped_0
   * Pseudo action:   master_rsc_1_start_0
   * Resource action: migrator        monitor=10000 on sles-4
   * Resource action: rsc_sles-3      monitor=5000 on sles-4
   * Resource action: child_DoFencing:2 monitor=60000 on sles-4
   * Resource action: ocf_msdummy:0   start on sles-4
   * Resource action: ocf_msdummy:1   start on sles-1
   * Resource action: ocf_msdummy:2   start on sles-2
   * Resource action: ocf_msdummy:3   start on sles-4
   * Resource action: ocf_msdummy:4   start on sles-1
   * Resource action: ocf_msdummy:5   start on sles-2
   * Pseudo action:   master_rsc_1_running_0
   * Resource action: ocf_msdummy:0   monitor=5000 on sles-4
   * Resource action: ocf_msdummy:1   monitor=5000 on sles-1
   * Resource action: ocf_msdummy:2   monitor=5000 on sles-2
   * Resource action: ocf_msdummy:3   monitor=5000 on sles-4
   * Resource action: ocf_msdummy:4   monitor=5000 on sles-1
   * Resource action: ocf_msdummy:5   monitor=5000 on sles-2
 
 Revised Cluster Status:
   * Node List:
     * Online: [ sles-1 sles-2 sles-4 ]
     * OFFLINE: [ sles-3 ]
 
   * Full List of Resources:
     * Resource Group: group-1:
       * r192.168.100.181	(ocf:heartbeat:IPaddr):	 Started sles-1
       * r192.168.100.182	(ocf:heartbeat:IPaddr):	 Started sles-1
       * r192.168.100.183	(ocf:heartbeat:IPaddr):	 Started sles-1
     * lsb_dummy	(lsb:/usr/lib64/heartbeat/cts/LSBDummy):	 Started sles-2
     * migrator	(ocf:heartbeat:Dummy):	 Started sles-4
     * rsc_sles-1	(ocf:heartbeat:IPaddr):	 Started sles-1
     * rsc_sles-2	(ocf:heartbeat:IPaddr):	 Started sles-2
     * rsc_sles-3	(ocf:heartbeat:IPaddr):	 Started sles-4
     * rsc_sles-4	(ocf:heartbeat:IPaddr):	 Started sles-4
     * Clone Set: DoFencing [child_DoFencing]:
       * Started: [ sles-1 sles-2 sles-4 ]
       * Stopped: [ sles-3 ]
-    * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique):
+    * Clone Set: master_rsc_1 [ocf_msdummy] (promotable, unique):
       * ocf_msdummy:0	(ocf:heartbeat:Stateful):	 Unpromoted sles-4
       * ocf_msdummy:1	(ocf:heartbeat:Stateful):	 Unpromoted sles-1
       * ocf_msdummy:2	(ocf:heartbeat:Stateful):	 Unpromoted sles-2
       * ocf_msdummy:3	(ocf:heartbeat:Stateful):	 Unpromoted sles-4
       * ocf_msdummy:4	(ocf:heartbeat:Stateful):	 Unpromoted sles-1
       * ocf_msdummy:5	(ocf:heartbeat:Stateful):	 Unpromoted sles-2
       * ocf_msdummy:6	(ocf:heartbeat:Stateful):	 Stopped
       * ocf_msdummy:7	(ocf:heartbeat:Stateful):	 Stopped
diff --git a/cts/scheduler/summary/stonith-2.summary b/cts/scheduler/summary/stonith-2.summary
index 9fd7c65e59..c6f657193b 100644
--- a/cts/scheduler/summary/stonith-2.summary
+++ b/cts/scheduler/summary/stonith-2.summary
@@ -1,78 +1,78 @@
 Current cluster status:
   * Node List:
     * Node sles-5: UNCLEAN (offline)
     * Online: [ sles-1 sles-2 sles-3 sles-4 sles-6 ]
 
   * Full List of Resources:
     * Resource Group: group-1:
       * r192.168.100.181	(ocf:heartbeat:IPaddr):	 Started sles-1
       * r192.168.100.182	(ocf:heartbeat:IPaddr):	 Started sles-1
       * r192.168.100.183	(ocf:heartbeat:IPaddr):	 Started sles-1
     * lsb_dummy	(lsb:/usr/share/heartbeat/cts/LSBDummy):	 Started sles-2
     * migrator	(ocf:heartbeat:Dummy):	 Started sles-3
     * rsc_sles-1	(ocf:heartbeat:IPaddr):	 Started sles-1
     * rsc_sles-2	(ocf:heartbeat:IPaddr):	 Started sles-2
     * rsc_sles-3	(ocf:heartbeat:IPaddr):	 Started sles-3
     * rsc_sles-4	(ocf:heartbeat:IPaddr):	 Started sles-4
     * rsc_sles-5	(ocf:heartbeat:IPaddr):	 Stopped
     * rsc_sles-6	(ocf:heartbeat:IPaddr):	 Started sles-6
     * Clone Set: DoFencing [child_DoFencing]:
       * Started: [ sles-1 sles-2 sles-3 sles-4 sles-6 ]
       * Stopped: [ sles-5 ]
-    * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique):
+    * Clone Set: master_rsc_1 [ocf_msdummy] (promotable, unique):
       * ocf_msdummy:0	(ocf:heartbeat:Stateful):	 Unpromoted sles-3
       * ocf_msdummy:1	(ocf:heartbeat:Stateful):	 Unpromoted sles-4
       * ocf_msdummy:2	(ocf:heartbeat:Stateful):	 Unpromoted sles-4
       * ocf_msdummy:3	(ocf:heartbeat:Stateful):	 Unpromoted sles-1
       * ocf_msdummy:4	(ocf:heartbeat:Stateful):	 Unpromoted sles-2
       * ocf_msdummy:5	(ocf:heartbeat:Stateful):	 Unpromoted sles-1
       * ocf_msdummy:6	(ocf:heartbeat:Stateful):	 Stopped
       * ocf_msdummy:7	(ocf:heartbeat:Stateful):	 Stopped
       * ocf_msdummy:8	(ocf:heartbeat:Stateful):	 Unpromoted sles-6
       * ocf_msdummy:9	(ocf:heartbeat:Stateful):	 Unpromoted sles-6
       * ocf_msdummy:10	(ocf:heartbeat:Stateful):	 Unpromoted sles-2
       * ocf_msdummy:11	(ocf:heartbeat:Stateful):	 Unpromoted sles-3
 
 Transition Summary:
   * Fence (reboot) sles-5 'peer is no longer part of the cluster'
   * Start      rsc_sles-5     ( sles-6 )
 
 Executing Cluster Transition:
   * Fencing sles-5 (reboot)
   * Resource action: rsc_sles-5      start on sles-6
   * Resource action: rsc_sles-5      monitor=5000 on sles-6
 
 Revised Cluster Status:
   * Node List:
     * Online: [ sles-1 sles-2 sles-3 sles-4 sles-6 ]
     * OFFLINE: [ sles-5 ]
 
   * Full List of Resources:
     * Resource Group: group-1:
       * r192.168.100.181	(ocf:heartbeat:IPaddr):	 Started sles-1
       * r192.168.100.182	(ocf:heartbeat:IPaddr):	 Started sles-1
       * r192.168.100.183	(ocf:heartbeat:IPaddr):	 Started sles-1
     * lsb_dummy	(lsb:/usr/share/heartbeat/cts/LSBDummy):	 Started sles-2
     * migrator	(ocf:heartbeat:Dummy):	 Started sles-3
     * rsc_sles-1	(ocf:heartbeat:IPaddr):	 Started sles-1
     * rsc_sles-2	(ocf:heartbeat:IPaddr):	 Started sles-2
     * rsc_sles-3	(ocf:heartbeat:IPaddr):	 Started sles-3
     * rsc_sles-4	(ocf:heartbeat:IPaddr):	 Started sles-4
     * rsc_sles-5	(ocf:heartbeat:IPaddr):	 Started sles-6
     * rsc_sles-6	(ocf:heartbeat:IPaddr):	 Started sles-6
     * Clone Set: DoFencing [child_DoFencing]:
       * Started: [ sles-1 sles-2 sles-3 sles-4 sles-6 ]
       * Stopped: [ sles-5 ]
-    * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique):
+    * Clone Set: master_rsc_1 [ocf_msdummy] (promotable, unique):
       * ocf_msdummy:0	(ocf:heartbeat:Stateful):	 Unpromoted sles-3
       * ocf_msdummy:1	(ocf:heartbeat:Stateful):	 Unpromoted sles-4
       * ocf_msdummy:2	(ocf:heartbeat:Stateful):	 Unpromoted sles-4
       * ocf_msdummy:3	(ocf:heartbeat:Stateful):	 Unpromoted sles-1
       * ocf_msdummy:4	(ocf:heartbeat:Stateful):	 Unpromoted sles-2
       * ocf_msdummy:5	(ocf:heartbeat:Stateful):	 Unpromoted sles-1
       * ocf_msdummy:6	(ocf:heartbeat:Stateful):	 Stopped
       * ocf_msdummy:7	(ocf:heartbeat:Stateful):	 Stopped
       * ocf_msdummy:8	(ocf:heartbeat:Stateful):	 Unpromoted sles-6
       * ocf_msdummy:9	(ocf:heartbeat:Stateful):	 Unpromoted sles-6
       * ocf_msdummy:10	(ocf:heartbeat:Stateful):	 Unpromoted sles-2
       * ocf_msdummy:11	(ocf:heartbeat:Stateful):	 Unpromoted sles-3
diff --git a/cts/scheduler/summary/unmanaged-promoted.summary b/cts/scheduler/summary/unmanaged-promoted.summary
index bdaac99618..33c0a4127f 100644
--- a/cts/scheduler/summary/unmanaged-promoted.summary
+++ b/cts/scheduler/summary/unmanaged-promoted.summary
@@ -1,63 +1,63 @@
 Current cluster status:
   * Node List:
     * Online: [ pcmk-1 pcmk-2 ]
     * OFFLINE: [ pcmk-3 pcmk-4 ]
 
   * Full List of Resources:
     * Clone Set: Fencing [FencingChild] (unmanaged):
       * FencingChild	(stonith:fence_xvm):	 Started pcmk-2 (unmanaged)
       * FencingChild	(stonith:fence_xvm):	 Started pcmk-1 (unmanaged)
       * Stopped: [ pcmk-3 pcmk-4 ]
     * Resource Group: group-1 (unmanaged):
       * r192.168.122.126	(ocf:heartbeat:IPaddr):	 Started pcmk-2 (unmanaged)
       * r192.168.122.127	(ocf:heartbeat:IPaddr):	 Started pcmk-2 (unmanaged)
       * r192.168.122.128	(ocf:heartbeat:IPaddr):	 Started pcmk-2 (unmanaged)
     * rsc_pcmk-1	(ocf:heartbeat:IPaddr):	 Started pcmk-1 (unmanaged)
     * rsc_pcmk-2	(ocf:heartbeat:IPaddr):	 Started pcmk-2 (unmanaged)
     * rsc_pcmk-3	(ocf:heartbeat:IPaddr):	 Started pcmk-3 (unmanaged)
     * rsc_pcmk-4	(ocf:heartbeat:IPaddr):	 Started pcmk-4 (unmanaged)
     * lsb-dummy	(lsb:/usr/share/pacemaker/tests/cts/LSBDummy):	 Started pcmk-2 (unmanaged)
     * migrator	(ocf:pacemaker:Dummy):	 Started pcmk-4 (unmanaged)
     * Clone Set: Connectivity [ping-1] (unmanaged):
       * ping-1	(ocf:pacemaker:ping):	 Started pcmk-2 (unmanaged)
       * ping-1	(ocf:pacemaker:ping):	 Started pcmk-1 (unmanaged)
       * Stopped: [ pcmk-3 pcmk-4 ]
-    * Clone Set: master-1 [stateful-1] (promotable) (unmanaged):
+    * Clone Set: master-1 [stateful-1] (promotable, unmanaged):
       * stateful-1	(ocf:pacemaker:Stateful):	 Promoted pcmk-2 (unmanaged)
       * stateful-1	(ocf:pacemaker:Stateful):	 Unpromoted pcmk-1 (unmanaged)
       * Stopped: [ pcmk-3 pcmk-4 ]
 
 Transition Summary:
 
 Executing Cluster Transition:
   * Cluster action:  do_shutdown on pcmk-2
   * Cluster action:  do_shutdown on pcmk-1
 
 Revised Cluster Status:
   * Node List:
     * Online: [ pcmk-1 pcmk-2 ]
     * OFFLINE: [ pcmk-3 pcmk-4 ]
 
   * Full List of Resources:
     * Clone Set: Fencing [FencingChild] (unmanaged):
       * FencingChild	(stonith:fence_xvm):	 Started pcmk-2 (unmanaged)
       * FencingChild	(stonith:fence_xvm):	 Started pcmk-1 (unmanaged)
       * Stopped: [ pcmk-3 pcmk-4 ]
     * Resource Group: group-1 (unmanaged):
       * r192.168.122.126	(ocf:heartbeat:IPaddr):	 Started pcmk-2 (unmanaged)
       * r192.168.122.127	(ocf:heartbeat:IPaddr):	 Started pcmk-2 (unmanaged)
       * r192.168.122.128	(ocf:heartbeat:IPaddr):	 Started pcmk-2 (unmanaged)
     * rsc_pcmk-1	(ocf:heartbeat:IPaddr):	 Started pcmk-1 (unmanaged)
     * rsc_pcmk-2	(ocf:heartbeat:IPaddr):	 Started pcmk-2 (unmanaged)
     * rsc_pcmk-3	(ocf:heartbeat:IPaddr):	 Started pcmk-3 (unmanaged)
     * rsc_pcmk-4	(ocf:heartbeat:IPaddr):	 Started pcmk-4 (unmanaged)
     * lsb-dummy	(lsb:/usr/share/pacemaker/tests/cts/LSBDummy):	 Started pcmk-2 (unmanaged)
     * migrator	(ocf:pacemaker:Dummy):	 Started pcmk-4 (unmanaged)
     * Clone Set: Connectivity [ping-1] (unmanaged):
       * ping-1	(ocf:pacemaker:ping):	 Started pcmk-2 (unmanaged)
       * ping-1	(ocf:pacemaker:ping):	 Started pcmk-1 (unmanaged)
       * Stopped: [ pcmk-3 pcmk-4 ]
-    * Clone Set: master-1 [stateful-1] (promotable) (unmanaged):
+    * Clone Set: master-1 [stateful-1] (promotable, unmanaged):
       * stateful-1	(ocf:pacemaker:Stateful):	 Promoted pcmk-2 (unmanaged)
       * stateful-1	(ocf:pacemaker:Stateful):	 Unpromoted pcmk-1 (unmanaged)
       * Stopped: [ pcmk-3 pcmk-4 ]
diff --git a/lib/pengine/clone.c b/lib/pengine/clone.c
index b8cfc497b3..7f4d0744ba 100644
--- a/lib/pengine/clone.c
+++ b/lib/pengine/clone.c
@@ -1,1017 +1,1039 @@
 /*
  * Copyright 2004-2021 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <crm/pengine/rules.h>
 #include <crm/pengine/status.h>
 #include <crm/pengine/internal.h>
 #include <pe_status_private.h>
 #include <crm/msg_xml.h>
 #include <crm/common/output.h>
 #include <crm/common/xml_internal.h>
 
 #define VARIANT_CLONE 1
 #include "./variant.h"
 
 #ifdef PCMK__COMPAT_2_0
 #define PROMOTED_INSTANCES   RSC_ROLE_PROMOTED_LEGACY_S "s"
 #define UNPROMOTED_INSTANCES RSC_ROLE_UNPROMOTED_LEGACY_S "s"
 #else
 #define PROMOTED_INSTANCES   RSC_ROLE_PROMOTED_S
 #define UNPROMOTED_INSTANCES RSC_ROLE_UNPROMOTED_S
 #endif
 
 static void
 clone_header(pcmk__output_t *out, int *rc, pe_resource_t *rsc, clone_variant_data_t *clone_data)
 {
-    PCMK__OUTPUT_LIST_HEADER(out, FALSE, *rc, "Clone Set: %s [%s]%s%s%s%s",
+    char *attrs = NULL;
+    size_t len = 0;
+
+    if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
+        pcmk__add_separated_word(&attrs, &len, "promotable", ", ");
+    }
+
+    if (pcmk_is_set(rsc->flags, pe_rsc_unique)) {
+        pcmk__add_separated_word(&attrs, &len, "unique", ", ");
+    }
+
+    if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+        pcmk__add_separated_word(&attrs, &len, "unmanaged", ", ");
+    }
+
+    if (pe__resource_is_disabled(rsc)) {
+        pcmk__add_separated_word(&attrs, &len, "disabled", ", ");
+    }
+
+    if (attrs) {
+        PCMK__OUTPUT_LIST_HEADER(out, FALSE, *rc, "Clone Set: %s [%s] (%s)",
                              rsc->id, ID(clone_data->xml_obj_child),
-                             pcmk_is_set(rsc->flags, pe_rsc_promotable) ? " (promotable)" : "",
-                             pcmk_is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
-                             pcmk_is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)",
-                             pe__resource_is_disabled(rsc) ? " (disabled)" : "");
+                                 attrs);
+        free(attrs);
+    } else {
+        PCMK__OUTPUT_LIST_HEADER(out, FALSE, *rc, "Clone Set: %s [%s]",
+                                 rsc->id, ID(clone_data->xml_obj_child))
+    }
 }
 
 void
 pe__force_anon(const char *standard, pe_resource_t *rsc, const char *rid,
                pe_working_set_t *data_set)
 {
     if (pe_rsc_is_clone(rsc)) {
         clone_variant_data_t *clone_data = NULL;
 
         get_clone_variant_data(clone_data, rsc);
 
         pe_warn("Ignoring " XML_RSC_ATTR_UNIQUE " for %s because %s resources "
                 "such as %s can be used only as anonymous clones",
                 rsc->id, standard, rid);
 
         clone_data->clone_node_max = 1;
         clone_data->clone_max = QB_MIN(clone_data->clone_max,
                                        g_list_length(data_set->nodes));
     }
 }
 
 pe_resource_t *
 find_clone_instance(pe_resource_t * rsc, const char *sub_id, pe_working_set_t * data_set)
 {
     char *child_id = NULL;
     pe_resource_t *child = NULL;
     const char *child_base = NULL;
     clone_variant_data_t *clone_data = NULL;
 
     get_clone_variant_data(clone_data, rsc);
 
     child_base = ID(clone_data->xml_obj_child);
     child_id = crm_strdup_printf("%s:%s", child_base, sub_id);
     child = pe_find_resource(rsc->children, child_id);
 
     free(child_id);
     return child;
 }
 
 pe_resource_t *
 pe__create_clone_child(pe_resource_t *rsc, pe_working_set_t *data_set)
 {
     gboolean as_orphan = FALSE;
     char *inc_num = NULL;
     char *inc_max = NULL;
     pe_resource_t *child_rsc = NULL;
     xmlNode *child_copy = NULL;
     clone_variant_data_t *clone_data = NULL;
 
     get_clone_variant_data(clone_data, rsc);
 
     CRM_CHECK(clone_data->xml_obj_child != NULL, return FALSE);
 
     if (clone_data->total_clones >= clone_data->clone_max) {
         // If we've already used all available instances, this is an orphan
         as_orphan = TRUE;
     }
 
     // Allocate instance numbers in numerical order (starting at 0)
     inc_num = pcmk__itoa(clone_data->total_clones);
     inc_max = pcmk__itoa(clone_data->clone_max);
 
     child_copy = copy_xml(clone_data->xml_obj_child);
 
     crm_xml_add(child_copy, XML_RSC_ATTR_INCARNATION, inc_num);
 
     if (common_unpack(child_copy, &child_rsc, rsc, data_set) == FALSE) {
         pe_err("Failed unpacking resource %s", crm_element_value(child_copy, XML_ATTR_ID));
         child_rsc = NULL;
         goto bail;
     }
 /*  child_rsc->globally_unique = rsc->globally_unique; */
 
     CRM_ASSERT(child_rsc);
     clone_data->total_clones += 1;
     pe_rsc_trace(child_rsc, "Setting clone attributes for: %s", child_rsc->id);
     rsc->children = g_list_append(rsc->children, child_rsc);
     if (as_orphan) {
         pe__set_resource_flags_recursive(child_rsc, pe_rsc_orphan);
     }
 
     add_hash_param(child_rsc->meta, XML_RSC_ATTR_INCARNATION_MAX, inc_max);
     pe_rsc_trace(rsc, "Added %s instance %s", rsc->id, child_rsc->id);
 
   bail:
     free(inc_num);
     free(inc_max);
 
     return child_rsc;
 }
 
 gboolean
 clone_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
 {
     int lpc = 0;
     xmlNode *a_child = NULL;
     xmlNode *xml_obj = rsc->xml;
     clone_variant_data_t *clone_data = NULL;
 
     const char *ordered = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_ORDERED);
     const char *max_clones = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION_MAX);
     const char *max_clones_node = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION_NODEMAX);
 
     pe_rsc_trace(rsc, "Processing resource %s...", rsc->id);
 
     clone_data = calloc(1, sizeof(clone_variant_data_t));
     rsc->variant_opaque = clone_data;
 
     if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
         const char *promoted_max = NULL;
         const char *promoted_node_max = NULL;
 
         promoted_max = g_hash_table_lookup(rsc->meta,
                                            XML_RSC_ATTR_PROMOTED_MAX);
         if (promoted_max == NULL) {
             // @COMPAT deprecated since 2.0.0
             promoted_max = g_hash_table_lookup(rsc->meta,
                                                PCMK_XE_PROMOTED_MAX_LEGACY);
         }
 
         promoted_node_max = g_hash_table_lookup(rsc->meta,
                                                 XML_RSC_ATTR_PROMOTED_NODEMAX);
         if (promoted_node_max == NULL) {
             // @COMPAT deprecated since 2.0.0
             promoted_node_max = g_hash_table_lookup(rsc->meta,
                                                     PCMK_XE_PROMOTED_NODE_MAX_LEGACY);
         }
 
         // Use 1 as default but 0 for minimum and invalid
         if (promoted_max == NULL) {
             clone_data->promoted_max = 1;
         } else {
             pcmk__scan_min_int(promoted_max, &(clone_data->promoted_max), 0);
         }
 
         // Use 1 as default but 0 for minimum and invalid
         if (promoted_node_max == NULL) {
             clone_data->promoted_node_max = 1;
         } else {
             pcmk__scan_min_int(promoted_node_max,
                                &(clone_data->promoted_node_max), 0);
         }
     }
 
     // Implied by calloc()
     /* clone_data->xml_obj_child = NULL; */
 
     // Use 1 as default but 0 for minimum and invalid
     if (max_clones_node == NULL) {
         clone_data->clone_node_max = 1;
     } else {
         pcmk__scan_min_int(max_clones_node, &(clone_data->clone_node_max), 0);
     }
 
     /* Use number of nodes (but always at least 1, which is handy for crm_verify
      * for a CIB without nodes) as default, but 0 for minimum and invalid
      */
     if (max_clones == NULL) {
         clone_data->clone_max = QB_MAX(1, g_list_length(data_set->nodes));
     } else {
         pcmk__scan_min_int(max_clones, &(clone_data->clone_max), 0);
     }
 
     clone_data->ordered = crm_is_true(ordered);
 
     if ((rsc->flags & pe_rsc_unique) == 0 && clone_data->clone_node_max > 1) {
         pcmk__config_err("Ignoring " XML_RSC_ATTR_PROMOTED_MAX " for %s "
                          "because anonymous clones support only one instance "
                          "per node", rsc->id);
         clone_data->clone_node_max = 1;
     }
 
     pe_rsc_trace(rsc, "Options for %s", rsc->id);
     pe_rsc_trace(rsc, "\tClone max: %d", clone_data->clone_max);
     pe_rsc_trace(rsc, "\tClone node max: %d", clone_data->clone_node_max);
     pe_rsc_trace(rsc, "\tClone is unique: %s",
                  pe__rsc_bool_str(rsc, pe_rsc_unique));
     pe_rsc_trace(rsc, "\tClone is promotable: %s",
                  pe__rsc_bool_str(rsc, pe_rsc_promotable));
 
     // Clones may contain a single group or primitive
     for (a_child = pcmk__xe_first_child(xml_obj); a_child != NULL;
          a_child = pcmk__xe_next(a_child)) {
 
         if (pcmk__str_any_of((const char *)a_child->name, XML_CIB_TAG_RESOURCE, XML_CIB_TAG_GROUP, NULL)) {
             clone_data->xml_obj_child = a_child;
             break;
         }
     }
 
     if (clone_data->xml_obj_child == NULL) {
         pcmk__config_err("%s has nothing to clone", rsc->id);
         return FALSE;
     }
 
     /*
      * Make clones ever so slightly sticky by default
      *
      * This helps ensure clone instances are not shuffled around the cluster
      * for no benefit in situations when pre-allocation is not appropriate
      */
     if (g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_STICKINESS) == NULL) {
         add_hash_param(rsc->meta, XML_RSC_ATTR_STICKINESS, "1");
     }
 
     /* This ensures that the globally-unique value always exists for children to
      * inherit when being unpacked, as well as in resource agents' environment.
      */
     add_hash_param(rsc->meta, XML_RSC_ATTR_UNIQUE,
                    pe__rsc_bool_str(rsc, pe_rsc_unique));
 
     if (clone_data->clone_max <= 0) {
         /* Create one child instance so that unpack_find_resource() will hook up
          * any orphans up to the parent correctly.
          */
         if (pe__create_clone_child(rsc, data_set) == NULL) {
             return FALSE;
         }
 
     } else {
         // Create a child instance for each available instance number
         for (lpc = 0; lpc < clone_data->clone_max; lpc++) {
             if (pe__create_clone_child(rsc, data_set) == NULL) {
                 return FALSE;
             }
         }
     }
 
     pe_rsc_trace(rsc, "Added %d children to resource %s...", clone_data->clone_max, rsc->id);
     return TRUE;
 }
 
 gboolean
 clone_active(pe_resource_t * rsc, gboolean all)
 {
     GList *gIter = rsc->children;
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
         gboolean child_active = child_rsc->fns->active(child_rsc, all);
 
         if (all == FALSE && child_active) {
             return TRUE;
         } else if (all && child_active == FALSE) {
             return FALSE;
         }
     }
 
     if (all) {
         return TRUE;
     } else {
         return FALSE;
     }
 }
 
 static void
 short_print(char *list, const char *prefix, const char *type, const char *suffix, long options, void *print_data)
 {
     if(suffix == NULL) {
         suffix = "";
     }
 
     if (list) {
         if (options & pe_print_html) {
             status_print("<li>");
         }
         status_print("%s%s: [ %s ]%s", prefix, type, list, suffix);
 
         if (options & pe_print_html) {
             status_print("</li>\n");
 
         } else if (options & pe_print_suppres_nl) {
             /* nothing */
         } else if ((options & pe_print_printf) || (options & pe_print_ncurses)) {
             status_print("\n");
         }
 
     }
 }
 
 static const char *
 configured_role_str(pe_resource_t * rsc)
 {
     const char *target_role = g_hash_table_lookup(rsc->meta,
                                                   XML_RSC_ATTR_TARGET_ROLE);
 
     if ((target_role == NULL) && rsc->children && rsc->children->data) {
         target_role = g_hash_table_lookup(((pe_resource_t*)rsc->children->data)->meta,
                                           XML_RSC_ATTR_TARGET_ROLE);
     }
     return target_role;
 }
 
 static enum rsc_role_e
 configured_role(pe_resource_t * rsc)
 {
     const char *target_role = configured_role_str(rsc);
 
     if (target_role) {
         return text2role(target_role);
     }
     return RSC_ROLE_UNKNOWN;
 }
 
 static void
 clone_print_xml(pe_resource_t * rsc, const char *pre_text, long options, void *print_data)
 {
     char *child_text = crm_strdup_printf("%s    ", pre_text);
     const char *target_role = configured_role_str(rsc);
     GList *gIter = rsc->children;
 
     status_print("%s<clone ", pre_text);
     status_print("id=\"%s\" ", rsc->id);
     status_print("multi_state=\"%s\" ",
                  pe__rsc_bool_str(rsc, pe_rsc_promotable));
     status_print("unique=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_unique));
     status_print("managed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_managed));
     status_print("failed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_failed));
     status_print("failure_ignored=\"%s\" ",
                  pe__rsc_bool_str(rsc, pe_rsc_failure_ignored));
     if (target_role) {
         status_print("target_role=\"%s\" ", target_role);
     }
     status_print(">\n");
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         child_rsc->fns->print(child_rsc, child_text, options, print_data);
     }
 
     status_print("%s</clone>\n", pre_text);
     free(child_text);
 }
 
 bool is_set_recursive(pe_resource_t * rsc, long long flag, bool any)
 {
     GList *gIter;
     bool all = !any;
 
     if (pcmk_is_set(rsc->flags, flag)) {
         if(any) {
             return TRUE;
         }
     } else if(all) {
         return FALSE;
     }
 
     for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
         if(is_set_recursive(gIter->data, flag, any)) {
             if(any) {
                 return TRUE;
             }
 
         } else if(all) {
             return FALSE;
         }
     }
 
     if(all) {
         return TRUE;
     }
     return FALSE;
 }
 
 void
 clone_print(pe_resource_t * rsc, const char *pre_text, long options, void *print_data)
 {
     char *list_text = NULL;
     char *child_text = NULL;
     char *stopped_list = NULL;
     size_t list_text_len = 0;
     size_t stopped_list_len = 0;
 
     GList *promoted_list = NULL;
     GList *started_list = NULL;
     GList *gIter = rsc->children;
 
     clone_variant_data_t *clone_data = NULL;
     int active_instances = 0;
 
     if (pre_text == NULL) {
         pre_text = " ";
     }
 
     if (options & pe_print_xml) {
         clone_print_xml(rsc, pre_text, options, print_data);
         return;
     }
 
     get_clone_variant_data(clone_data, rsc);
 
     child_text = crm_strdup_printf("%s    ", pre_text);
 
     status_print("%sClone Set: %s [%s]%s%s%s",
                  pre_text ? pre_text : "", rsc->id, ID(clone_data->xml_obj_child),
                  pcmk_is_set(rsc->flags, pe_rsc_promotable)? " (promotable)" : "",
                  pcmk_is_set(rsc->flags, pe_rsc_unique)? " (unique)" : "",
                  pcmk_is_set(rsc->flags, pe_rsc_managed)? "" : " (unmanaged)");
 
     if (options & pe_print_html) {
         status_print("\n<ul>\n");
 
     } else if ((options & pe_print_log) == 0) {
         status_print("\n");
     }
 
     for (; gIter != NULL; gIter = gIter->next) {
         gboolean print_full = FALSE;
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
         gboolean partially_active = child_rsc->fns->active(child_rsc, FALSE);
 
         if (options & pe_print_clone_details) {
             print_full = TRUE;
         }
 
         if (pcmk_is_set(rsc->flags, pe_rsc_unique)) {
             // Print individual instance when unique (except stopped orphans)
             if (partially_active || !pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
                 print_full = TRUE;
             }
 
         // Everything else in this block is for anonymous clones
 
         } else if (pcmk_is_set(options, pe_print_pending)
                    && (child_rsc->pending_task != NULL)
                    && strcmp(child_rsc->pending_task, "probe")) {
             // Print individual instance when non-probe action is pending
             print_full = TRUE;
 
         } else if (partially_active == FALSE) {
             // List stopped instances when requested (except orphans)
             if (!pcmk_is_set(child_rsc->flags, pe_rsc_orphan)
                 && !pcmk_is_set(options, pe_print_clone_active)) {
                 pcmk__add_word(&stopped_list, &stopped_list_len, child_rsc->id);
             }
 
         } else if (is_set_recursive(child_rsc, pe_rsc_orphan, TRUE)
                    || is_set_recursive(child_rsc, pe_rsc_managed, FALSE) == FALSE
                    || is_set_recursive(child_rsc, pe_rsc_failed, TRUE)) {
 
             // Print individual instance when active orphaned/unmanaged/failed
             print_full = TRUE;
 
         } else if (child_rsc->fns->active(child_rsc, TRUE)) {
             // Instance of fully active anonymous clone
 
             pe_node_t *location = child_rsc->fns->location(child_rsc, NULL, TRUE);
 
             if (location) {
                 // Instance is active on a single node
 
                 enum rsc_role_e a_role = child_rsc->fns->state(child_rsc, TRUE);
 
                 if (location->details->online == FALSE && location->details->unclean) {
                     print_full = TRUE;
 
                 } else if (a_role > RSC_ROLE_UNPROMOTED) {
                     promoted_list = g_list_append(promoted_list, location);
 
                 } else {
                     started_list = g_list_append(started_list, location);
                 }
 
             } else {
                 /* uncolocated group - bleh */
                 print_full = TRUE;
             }
 
         } else {
             // Instance of partially active anonymous clone
             print_full = TRUE;
         }
 
         if (print_full) {
             if (options & pe_print_html) {
                 status_print("<li>\n");
             }
             child_rsc->fns->print(child_rsc, child_text, options, print_data);
             if (options & pe_print_html) {
                 status_print("</li>\n");
             }
         }
     }
 
     /* Promoted */
     promoted_list = g_list_sort(promoted_list, sort_node_uname);
     for (gIter = promoted_list; gIter; gIter = gIter->next) {
         pe_node_t *host = gIter->data;
 
         pcmk__add_word(&list_text, &list_text_len, host->details->uname);
 	active_instances++;
     }
 
     short_print(list_text, child_text, PROMOTED_INSTANCES, NULL, options,
                 print_data);
     g_list_free(promoted_list);
     free(list_text);
     list_text = NULL;
     list_text_len = 0;
 
     /* Started/Unpromoted */
     started_list = g_list_sort(started_list, sort_node_uname);
     for (gIter = started_list; gIter; gIter = gIter->next) {
         pe_node_t *host = gIter->data;
 
         pcmk__add_word(&list_text, &list_text_len, host->details->uname);
         active_instances++;
     }
 
     if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
         enum rsc_role_e role = configured_role(rsc);
 
         if (role == RSC_ROLE_UNPROMOTED) {
             short_print(list_text, child_text,
                         UNPROMOTED_INSTANCES " (target-role)", NULL, options,
                         print_data);
         } else {
             short_print(list_text, child_text, UNPROMOTED_INSTANCES, NULL,
                         options, print_data);
         }
 
     } else {
         short_print(list_text, child_text, "Started", NULL, options, print_data);
     }
 
     g_list_free(started_list);
     free(list_text);
     list_text = NULL;
     list_text_len = 0;
 
     if (!pcmk_is_set(options, pe_print_clone_active)) {
         const char *state = "Stopped";
         enum rsc_role_e role = configured_role(rsc);
 
         if (role == RSC_ROLE_STOPPED) {
             state = "Stopped (disabled)";
         }
 
         if (!pcmk_is_set(rsc->flags, pe_rsc_unique)
             && (clone_data->clone_max > active_instances)) {
 
             GList *nIter;
             GList *list = g_hash_table_get_values(rsc->allowed_nodes);
 
             /* Custom stopped list for non-unique clones */
             free(stopped_list);
             stopped_list = NULL;
             stopped_list_len = 0;
 
             if (list == NULL) {
                 /* Clusters with symmetrical=false haven't calculated allowed_nodes yet
                  * If we've not probed for them yet, the Stopped list will be empty
                  */
                 list = g_hash_table_get_values(rsc->known_on);
             }
 
             list = g_list_sort(list, sort_node_uname);
             for (nIter = list; nIter != NULL; nIter = nIter->next) {
                 pe_node_t *node = (pe_node_t *)nIter->data;
 
                 if (pe_find_node(rsc->running_on, node->details->uname) == NULL) {
                     pcmk__add_word(&stopped_list, &stopped_list_len,
                                    node->details->uname);
                 }
             }
             g_list_free(list);
         }
 
         short_print(stopped_list, child_text, state, NULL, options, print_data);
         free(stopped_list);
     }
 
     if (options & pe_print_html) {
         status_print("</ul>\n");
     }
 
     free(child_text);
 }
 
 PCMK__OUTPUT_ARGS("clone", "unsigned int", "pe_resource_t *", "GList *", "GList *")
 int
 pe__clone_xml(pcmk__output_t *out, va_list args)
 {
     unsigned int show_opts = va_arg(args, unsigned int);
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     GList *only_node = va_arg(args, GList *);
     GList *only_rsc = va_arg(args, GList *);
 
     GList *gIter = rsc->children;
     int rc = pcmk_rc_no_output;
     gboolean printed_header = FALSE;
     gboolean print_everything = TRUE;
 
     if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
         return rc;
     }
 
     print_everything = pcmk__str_in_list(only_rsc, rsc_printable_id(rsc), pcmk__str_none) ||
                        (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(only_rsc, rsc->id, pcmk__str_none));
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         if (pcmk__rsc_filtered_by_node(child_rsc, only_node)) {
             continue;
         }
 
         if (child_rsc->fns->is_filtered(child_rsc, only_rsc, print_everything)) {
             continue;
         }
 
         if (!printed_header) {
             printed_header = TRUE;
 
             rc = pe__name_and_nvpairs_xml(out, true, "clone", 8,
                     "id", rsc->id,
                     "multi_state", pe__rsc_bool_str(rsc, pe_rsc_promotable),
                     "unique", pe__rsc_bool_str(rsc, pe_rsc_unique),
                     "managed", pe__rsc_bool_str(rsc, pe_rsc_managed),
                     "disabled", pcmk__btoa(pe__resource_is_disabled(rsc)),
                     "failed", pe__rsc_bool_str(rsc, pe_rsc_failed),
                     "failure_ignored", pe__rsc_bool_str(rsc, pe_rsc_failure_ignored),
                     "target_role", configured_role_str(rsc));
             CRM_ASSERT(rc == pcmk_rc_ok);
         }
 
         out->message(out, crm_map_element_name(child_rsc->xml), show_opts,
                      child_rsc, only_node, only_rsc);
     }
 
     if (printed_header) {
         pcmk__output_xml_pop_parent(out);
     }
 
     return rc;
 }
 
 PCMK__OUTPUT_ARGS("clone", "unsigned int", "pe_resource_t *", "GList *", "GList *")
 int
 pe__clone_default(pcmk__output_t *out, va_list args)
 {
     unsigned int show_opts = va_arg(args, unsigned int);
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     GList *only_node = va_arg(args, GList *);
     GList *only_rsc = va_arg(args, GList *);
 
     char *list_text = NULL;
     char *stopped_list = NULL;
     size_t list_text_len = 0;
     size_t stopped_list_len = 0;
 
     GList *promoted_list = NULL;
     GList *started_list = NULL;
     GList *gIter = rsc->children;
 
     clone_variant_data_t *clone_data = NULL;
     int active_instances = 0;
     int rc = pcmk_rc_no_output;
     gboolean print_everything = TRUE;
 
     get_clone_variant_data(clone_data, rsc);
 
     if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
         return rc;
     }
 
     print_everything = pcmk__str_in_list(only_rsc, rsc_printable_id(rsc), pcmk__str_none) ||
                        (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(only_rsc, rsc->id, pcmk__str_none));
 
     for (; gIter != NULL; gIter = gIter->next) {
         gboolean print_full = FALSE;
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
         gboolean partially_active = child_rsc->fns->active(child_rsc, FALSE);
 
         if (pcmk__rsc_filtered_by_node(child_rsc, only_node)) {
             continue;
         }
 
         if (child_rsc->fns->is_filtered(child_rsc, only_rsc, print_everything)) {
             continue;
         }
 
         if (pcmk_is_set(show_opts, pcmk_show_clone_detail)) {
             print_full = TRUE;
         }
 
         if (pcmk_is_set(rsc->flags, pe_rsc_unique)) {
             // Print individual instance when unique (except stopped orphans)
             if (partially_active || !pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
                 print_full = TRUE;
             }
 
         // Everything else in this block is for anonymous clones
 
         } else if (pcmk_is_set(show_opts, pcmk_show_pending)
                    && (child_rsc->pending_task != NULL)
                    && strcmp(child_rsc->pending_task, "probe")) {
             // Print individual instance when non-probe action is pending
             print_full = TRUE;
 
         } else if (partially_active == FALSE) {
             // List stopped instances when requested (except orphans)
             if (!pcmk_is_set(child_rsc->flags, pe_rsc_orphan)
                 && pcmk_is_set(show_opts, pcmk_show_inactive_rscs)) {
                 pcmk__add_word(&stopped_list, &stopped_list_len, child_rsc->id);
             }
 
         } else if (is_set_recursive(child_rsc, pe_rsc_orphan, TRUE)
                    || is_set_recursive(child_rsc, pe_rsc_managed, FALSE) == FALSE
                    || is_set_recursive(child_rsc, pe_rsc_failed, TRUE)) {
 
             // Print individual instance when active orphaned/unmanaged/failed
             print_full = TRUE;
 
         } else if (child_rsc->fns->active(child_rsc, TRUE)) {
             // Instance of fully active anonymous clone
 
             pe_node_t *location = child_rsc->fns->location(child_rsc, NULL, TRUE);
 
             if (location) {
                 // Instance is active on a single node
 
                 enum rsc_role_e a_role = child_rsc->fns->state(child_rsc, TRUE);
 
                 if (location->details->online == FALSE && location->details->unclean) {
                     print_full = TRUE;
 
                 } else if (a_role > RSC_ROLE_UNPROMOTED) {
                     promoted_list = g_list_append(promoted_list, location);
 
                 } else {
                     started_list = g_list_append(started_list, location);
                 }
 
             } else {
                 /* uncolocated group - bleh */
                 print_full = TRUE;
             }
 
         } else {
             // Instance of partially active anonymous clone
             print_full = TRUE;
         }
 
         if (print_full) {
             GList *all = NULL;
 
             clone_header(out, &rc, rsc, clone_data);
 
             /* Print every resource that's a child of this clone. */
             all = g_list_prepend(all, (gpointer) "*");
             out->message(out, crm_map_element_name(child_rsc->xml), show_opts,
                          child_rsc, only_node, all);
             g_list_free(all);
         }
     }
 
     if (pcmk_is_set(show_opts, pcmk_show_clone_detail)) {
         free(stopped_list);
         PCMK__OUTPUT_LIST_FOOTER(out, rc);
         return pcmk_rc_ok;
     }
 
     /* Promoted */
     promoted_list = g_list_sort(promoted_list, sort_node_uname);
     for (gIter = promoted_list; gIter; gIter = gIter->next) {
         pe_node_t *host = gIter->data;
 
         if (!pcmk__str_in_list(only_node, host->details->uname, pcmk__str_casei)) {
             continue;
         }
 
         pcmk__add_word(&list_text, &list_text_len, host->details->uname);
         active_instances++;
     }
 
     if (list_text != NULL) {
         clone_header(out, &rc, rsc, clone_data);
 
         out->list_item(out, NULL, PROMOTED_INSTANCES ": [ %s ]", list_text);
         g_list_free(promoted_list);
         free(list_text);
         list_text = NULL;
         list_text_len = 0;
     }
 
     /* Started/Unpromoted */
     started_list = g_list_sort(started_list, sort_node_uname);
     for (gIter = started_list; gIter; gIter = gIter->next) {
         pe_node_t *host = gIter->data;
 
         if (!pcmk__str_in_list(only_node, host->details->uname, pcmk__str_casei)) {
             continue;
         }
 
         pcmk__add_word(&list_text, &list_text_len, host->details->uname);
         active_instances++;
     }
 
     if (list_text != NULL) {
         clone_header(out, &rc, rsc, clone_data);
 
         if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
             enum rsc_role_e role = configured_role(rsc);
 
             if (role == RSC_ROLE_UNPROMOTED) {
                 out->list_item(out, NULL,
                                UNPROMOTED_INSTANCES " (target-role): [ %s ]",
                                list_text);
             } else {
                 out->list_item(out, NULL, UNPROMOTED_INSTANCES ": [ %s ]",
                                list_text);
             }
 
         } else {
             out->list_item(out, NULL, "Started: [ %s ]", list_text);
         }
 
         g_list_free(started_list);
         free(list_text);
         list_text = NULL;
         list_text_len = 0;
     }
 
     if (pcmk_is_set(show_opts, pcmk_show_inactive_rscs)) {
         const char *state = "Stopped";
         enum rsc_role_e role = configured_role(rsc);
 
         if (role == RSC_ROLE_STOPPED) {
             state = "Stopped (disabled)";
         }
 
         if (!pcmk_is_set(rsc->flags, pe_rsc_unique)
             && (clone_data->clone_max > active_instances)) {
 
             GList *nIter;
             GList *list = g_hash_table_get_values(rsc->allowed_nodes);
 
             /* Custom stopped list for non-unique clones */
             free(stopped_list);
             stopped_list = NULL;
             stopped_list_len = 0;
 
             if (list == NULL) {
                 /* Clusters with symmetrical=false haven't calculated allowed_nodes yet
                  * If we've not probed for them yet, the Stopped list will be empty
                  */
                 list = g_hash_table_get_values(rsc->known_on);
             }
 
             list = g_list_sort(list, sort_node_uname);
             for (nIter = list; nIter != NULL; nIter = nIter->next) {
                 pe_node_t *node = (pe_node_t *)nIter->data;
 
                 if (pe_find_node(rsc->running_on, node->details->uname) == NULL &&
                     pcmk__str_in_list(only_node, node->details->uname, pcmk__str_casei)) {
                     pcmk__add_word(&stopped_list, &stopped_list_len,
                                    node->details->uname);
                 }
             }
             g_list_free(list);
         }
 
         if (stopped_list != NULL) {
             clone_header(out, &rc, rsc, clone_data);
 
             out->list_item(out, NULL, "%s: [ %s ]", state, stopped_list);
             free(stopped_list);
             stopped_list_len = 0;
         }
     }
 
     PCMK__OUTPUT_LIST_FOOTER(out, rc);
     return rc;
 }
 
 void
 clone_free(pe_resource_t * rsc)
 {
     clone_variant_data_t *clone_data = NULL;
 
     get_clone_variant_data(clone_data, rsc);
 
     pe_rsc_trace(rsc, "Freeing %s", rsc->id);
 
     for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         CRM_ASSERT(child_rsc);
         pe_rsc_trace(child_rsc, "Freeing child %s", child_rsc->id);
         free_xml(child_rsc->xml);
         child_rsc->xml = NULL;
         /* There could be a saved unexpanded xml */
         free_xml(child_rsc->orig_xml);
         child_rsc->orig_xml = NULL;
         child_rsc->fns->free(child_rsc);
     }
 
     g_list_free(rsc->children);
 
     if (clone_data) {
         CRM_ASSERT(clone_data->demote_notify == NULL);
         CRM_ASSERT(clone_data->stop_notify == NULL);
         CRM_ASSERT(clone_data->start_notify == NULL);
         CRM_ASSERT(clone_data->promote_notify == NULL);
     }
 
     common_free(rsc);
 }
 
 enum rsc_role_e
 clone_resource_state(const pe_resource_t * rsc, gboolean current)
 {
     enum rsc_role_e clone_role = RSC_ROLE_UNKNOWN;
     GList *gIter = rsc->children;
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
         enum rsc_role_e a_role = child_rsc->fns->state(child_rsc, current);
 
         if (a_role > clone_role) {
             clone_role = a_role;
         }
     }
 
     pe_rsc_trace(rsc, "%s role: %s", rsc->id, role2text(clone_role));
     return clone_role;
 }
 
 /*!
  * \internal
  * \brief Check whether a clone has an instance for every node
  *
  * \param[in] rsc       Clone to check
  * \param[in] data_set  Cluster state
  */
 bool
 pe__is_universal_clone(pe_resource_t *rsc,
                        pe_working_set_t *data_set)
 {
     if (pe_rsc_is_clone(rsc)) {
         clone_variant_data_t *clone_data = NULL;
 
         get_clone_variant_data(clone_data, rsc);
         if (clone_data->clone_max == g_list_length(data_set->nodes)) {
             return TRUE;
         }
     }
     return FALSE;
 }
 
 gboolean
 pe__clone_is_filtered(pe_resource_t *rsc, GList *only_rsc, gboolean check_parent)
 {
     gboolean passes = FALSE;
     clone_variant_data_t *clone_data = NULL;
 
     if (pcmk__str_in_list(only_rsc, rsc_printable_id(rsc), pcmk__str_none)) {
         passes = TRUE;
     } else {
         get_clone_variant_data(clone_data, rsc);
         passes = pcmk__str_in_list(only_rsc, ID(clone_data->xml_obj_child), pcmk__str_none);
 
         if (!passes) {
             for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
                 pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
                 if (!child_rsc->fns->is_filtered(child_rsc, only_rsc, FALSE)) {
                     passes = TRUE;
                     break;
                 }
             }
         }
     }
 
     return !passes;
 }
diff --git a/lib/pengine/group.c b/lib/pengine/group.c
index ae94fc7268..31e73d7eb2 100644
--- a/lib/pengine/group.c
+++ b/lib/pengine/group.c
@@ -1,428 +1,423 @@
 /*
  * Copyright 2004-2021 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <crm/pengine/rules.h>
 #include <crm/pengine/status.h>
 #include <crm/pengine/internal.h>
 #include <crm/msg_xml.h>
 #include <crm/common/output.h>
 #include <crm/common/strings_internal.h>
 #include <crm/common/xml_internal.h>
 #include <pe_status_private.h>
 
 #define VARIANT_GROUP 1
 #include "./variant.h"
 
 static int
 inactive_resources(pe_resource_t *rsc)
 {
     int retval = 0;
 
     for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         if (!child_rsc->fns->active(child_rsc, TRUE)) {
             retval++;
         }
     }
 
     return retval;
 }
 
+static void
+group_header(pcmk__output_t *out, int *rc, pe_resource_t *rsc, int n_inactive, bool show_inactive)
+{
+    char *attrs = NULL;
+    size_t len = 0;
+
+    if (n_inactive > 0 && !show_inactive) {
+        char *word = crm_strdup_printf("%d member%s inactive", n_inactive, pcmk__plural_s(n_inactive));
+        pcmk__add_separated_word(&attrs, &len, word, ", ");
+        free(word);
+    }
+
+    if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+        pcmk__add_separated_word(&attrs, &len, "unmanaged", ", ");
+    }
+
+    if (pe__resource_is_disabled(rsc)) {
+        pcmk__add_separated_word(&attrs, &len, "disabled", ", ");
+    }
+
+    if (attrs) {
+        PCMK__OUTPUT_LIST_HEADER(out, FALSE, *rc, "Resource Group: %s (%s)",
+                                 rsc->id, attrs);
+        free(attrs);
+    } else {
+        PCMK__OUTPUT_LIST_HEADER(out, FALSE, *rc, "Resource Group: %s", rsc->id);
+    }
+}
+
 static bool
 skip_child_rsc(pe_resource_t *rsc, pe_resource_t *child, gboolean parent_passes,
                GList *only_rsc, unsigned int show_opts)
 {
     bool star_list = pcmk__list_of_1(only_rsc) &&
                      pcmk__str_eq("*", g_list_first(only_rsc)->data, pcmk__str_none);
     bool child_filtered = child->fns->is_filtered(child, only_rsc, FALSE);
     bool child_active = child->fns->active(child, FALSE);
     bool show_inactive = pcmk_is_set(show_opts, pcmk_show_inactive_rscs);
 
     /* If the resource is in only_rsc by name (so, ignoring "*") then allow
      * it regardless of if it's active or not.
      */
     if (!star_list && !child_filtered) {
         return false;
 
     } else if (!child_filtered && (child_active || show_inactive)) {
         return false;
 
     } else if (parent_passes && (child_active || show_inactive)) {
         return false;
 
     }
 
     return true;
 }
 
 gboolean
 group_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
 {
     xmlNode *xml_obj = rsc->xml;
     xmlNode *xml_native_rsc = NULL;
     group_variant_data_t *group_data = NULL;
     const char *group_ordered = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_ORDERED);
     const char *group_colocated = g_hash_table_lookup(rsc->meta, "collocated");
     const char *clone_id = NULL;
 
     pe_rsc_trace(rsc, "Processing resource %s...", rsc->id);
 
     group_data = calloc(1, sizeof(group_variant_data_t));
     group_data->num_children = 0;
     group_data->first_child = NULL;
     group_data->last_child = NULL;
     rsc->variant_opaque = group_data;
 
     // We don't actually need the null checks but it speeds up the common case
     if ((group_ordered == NULL)
         || (crm_str_to_boolean(group_ordered, &(group_data->ordered)) < 0)) {
         group_data->ordered = TRUE;
     }
     if ((group_colocated == NULL)
         || (crm_str_to_boolean(group_colocated, &(group_data->colocated)) < 0)) {
         group_data->colocated = TRUE;
     }
 
     clone_id = crm_element_value(rsc->xml, XML_RSC_ATTR_INCARNATION);
 
     for (xml_native_rsc = pcmk__xe_first_child(xml_obj); xml_native_rsc != NULL;
          xml_native_rsc = pcmk__xe_next(xml_native_rsc)) {
 
         if (pcmk__str_eq((const char *)xml_native_rsc->name,
                          XML_CIB_TAG_RESOURCE, pcmk__str_none)) {
             pe_resource_t *new_rsc = NULL;
 
             crm_xml_add(xml_native_rsc, XML_RSC_ATTR_INCARNATION, clone_id);
             if (common_unpack(xml_native_rsc, &new_rsc, rsc, data_set) == FALSE) {
                 pe_err("Failed unpacking resource %s", crm_element_value(xml_obj, XML_ATTR_ID));
                 if (new_rsc != NULL && new_rsc->fns != NULL) {
                     new_rsc->fns->free(new_rsc);
                 }
                 continue;
             }
 
             group_data->num_children++;
             rsc->children = g_list_append(rsc->children, new_rsc);
 
             if (group_data->first_child == NULL) {
                 group_data->first_child = new_rsc;
             }
             group_data->last_child = new_rsc;
             pe_rsc_trace(rsc, "Added %s member %s", rsc->id, new_rsc->id);
         }
     }
 
     if (group_data->num_children == 0) {
         pcmk__config_warn("Group %s does not have any children", rsc->id);
         return TRUE; // Allow empty groups, children can be added later
     }
 
     pe_rsc_trace(rsc, "Added %d children to resource %s...", group_data->num_children, rsc->id);
 
     return TRUE;
 }
 
 gboolean
 group_active(pe_resource_t * rsc, gboolean all)
 {
     gboolean c_all = TRUE;
     gboolean c_any = FALSE;
     GList *gIter = rsc->children;
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         if (child_rsc->fns->active(child_rsc, all)) {
             c_any = TRUE;
         } else {
             c_all = FALSE;
         }
     }
 
     if (c_any == FALSE) {
         return FALSE;
     } else if (all && c_all == FALSE) {
         return FALSE;
     }
     return TRUE;
 }
 
 static void
 group_print_xml(pe_resource_t * rsc, const char *pre_text, long options, void *print_data)
 {
     GList *gIter = rsc->children;
     char *child_text = crm_strdup_printf("%s     ", pre_text);
 
     status_print("%s<group id=\"%s\" ", pre_text, rsc->id);
     status_print("number_resources=\"%d\" ", g_list_length(rsc->children));
     status_print(">\n");
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         child_rsc->fns->print(child_rsc, child_text, options, print_data);
     }
 
     status_print("%s</group>\n", pre_text);
     free(child_text);
 }
 
 void
 group_print(pe_resource_t * rsc, const char *pre_text, long options, void *print_data)
 {
     char *child_text = NULL;
     GList *gIter = rsc->children;
 
     if (pre_text == NULL) {
         pre_text = " ";
     }
 
     if (options & pe_print_xml) {
         group_print_xml(rsc, pre_text, options, print_data);
         return;
     }
 
     child_text = crm_strdup_printf("%s    ", pre_text);
 
     status_print("%sResource Group: %s", pre_text ? pre_text : "", rsc->id);
 
     if (options & pe_print_html) {
         status_print("\n<ul>\n");
 
     } else if ((options & pe_print_log) == 0) {
         status_print("\n");
     }
 
     if (options & pe_print_brief) {
         print_rscs_brief(rsc->children, child_text, options, print_data, TRUE);
 
     } else {
         for (; gIter != NULL; gIter = gIter->next) {
             pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
             if (options & pe_print_html) {
                 status_print("<li>\n");
             }
             child_rsc->fns->print(child_rsc, child_text, options, print_data);
             if (options & pe_print_html) {
                 status_print("</li>\n");
             }
         }
     }
 
     if (options & pe_print_html) {
         status_print("</ul>\n");
     }
     free(child_text);
 }
 
 PCMK__OUTPUT_ARGS("group", "unsigned int", "pe_resource_t *", "GList *", "GList *")
 int
 pe__group_xml(pcmk__output_t *out, va_list args)
 {
     unsigned int show_opts = va_arg(args, unsigned int);
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     GList *only_node = va_arg(args, GList *);
     GList *only_rsc = va_arg(args, GList *);
 
     GList *gIter = rsc->children;
     char *count = pcmk__itoa(g_list_length(gIter));
 
     int rc = pcmk_rc_no_output;
 
     gboolean parent_passes = pcmk__str_in_list(only_rsc, rsc_printable_id(rsc), pcmk__str_none) ||
                              (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(only_rsc, rsc->id, pcmk__str_none));
 
     if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
         free(count);
         return rc;
     }
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         if (skip_child_rsc(rsc, child_rsc, parent_passes, only_rsc, show_opts)) {
             continue;
         }
 
         if (rc == pcmk_rc_no_output) {
             rc = pe__name_and_nvpairs_xml(out, true, "group", 4
                                           , "id", rsc->id
                                           , "number_resources", count
                                           , "managed", pe__rsc_bool_str(rsc, pe_rsc_managed)
                                           , "disabled", pcmk__btoa(pe__resource_is_disabled(rsc)));
             free(count);
             CRM_ASSERT(rc == pcmk_rc_ok);
         }
 
         out->message(out, crm_map_element_name(child_rsc->xml), show_opts, child_rsc,
 					 only_node, only_rsc);
     }
 
     if (rc == pcmk_rc_ok) {
         pcmk__output_xml_pop_parent(out);
     }
 
     return rc;
 }
 
 PCMK__OUTPUT_ARGS("group", "unsigned int", "pe_resource_t *", "GList *", "GList *")
 int
 pe__group_default(pcmk__output_t *out, va_list args)
 {
     unsigned int show_opts = va_arg(args, unsigned int);
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     GList *only_node = va_arg(args, GList *);
     GList *only_rsc = va_arg(args, GList *);
 
     int rc = pcmk_rc_no_output;
 
     gboolean parent_passes = pcmk__str_in_list(only_rsc, rsc_printable_id(rsc), pcmk__str_none) ||
                              (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(only_rsc, rsc->id, pcmk__str_none));
 
     gboolean active = rsc->fns->active(rsc, TRUE);
     gboolean partially_active = rsc->fns->active(rsc, FALSE);
 
     if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
         return rc;
     }
 
     if (pcmk_is_set(show_opts, pcmk_show_brief)) {
         GList *rscs = pe__filter_rsc_list(rsc->children, only_rsc);
 
         if (rscs != NULL) {
-            char *s = NULL;
-
-            if (!active && partially_active && !pcmk_is_set(show_opts, pcmk_show_inactive_rscs)) {
-                int n_active = inactive_resources(rsc);
-
-                if (n_active > 0) {
-                    s = crm_strdup_printf(" (%d member%s inactive)", n_active, pcmk__plural_s(n_active));
-                }
-            }
-
-            out->begin_list(out, NULL, NULL, "Resource Group: %s%s%s%s", rsc->id,
-                            s ? s : "",
-                            pcmk_is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)",
-                            pe__resource_is_disabled(rsc) ? " (disabled)" : "");
-
+            group_header(out, &rc, rsc, !active && partially_active ? inactive_resources(rsc) : 0,
+                         pcmk_is_set(show_opts, pcmk_show_inactive_rscs));
             pe__rscs_brief_output(out, rscs, show_opts | pcmk_show_inactive_rscs);
 
             rc = pcmk_rc_ok;
             g_list_free(rscs);
-
-            if (s) {
-                free(s);
-            }
         }
 
     } else {
         for (GList *gIter = rsc->children; gIter; gIter = gIter->next) {
             pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
-            char *s = NULL;
 
             if (skip_child_rsc(rsc, child_rsc, parent_passes, only_rsc, show_opts)) {
                 continue;
             }
 
-            if (!active && partially_active && !pcmk_is_set(show_opts, pcmk_show_inactive_rscs)) {
-                int n_active = inactive_resources(rsc);
-
-                if (n_active > 0) {
-                    s = crm_strdup_printf(" (%d member%s inactive)", n_active, pcmk__plural_s(n_active));
-                }
-            }
-
-
-            PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Resource Group: %s%s%s%s", rsc->id,
-                                     s ? s : "",
-                                     pcmk_is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)",
-                                     pe__resource_is_disabled(rsc) ? " (disabled)" : "");
-
+            group_header(out, &rc, rsc, !active && partially_active ? inactive_resources(rsc) : 0,
+                         pcmk_is_set(show_opts, pcmk_show_inactive_rscs));
             out->message(out, crm_map_element_name(child_rsc->xml), show_opts,
                          child_rsc, only_node, only_rsc);
-
-            if (s) {
-                free(s);
-            }
         }
     }
 
 	PCMK__OUTPUT_LIST_FOOTER(out, rc);
 
     return rc;
 }
 
 void
 group_free(pe_resource_t * rsc)
 {
     CRM_CHECK(rsc != NULL, return);
 
     pe_rsc_trace(rsc, "Freeing %s", rsc->id);
 
     for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         CRM_ASSERT(child_rsc);
         pe_rsc_trace(child_rsc, "Freeing child %s", child_rsc->id);
         child_rsc->fns->free(child_rsc);
     }
 
     pe_rsc_trace(rsc, "Freeing child list");
     g_list_free(rsc->children);
 
     common_free(rsc);
 }
 
 enum rsc_role_e
 group_resource_state(const pe_resource_t * rsc, gboolean current)
 {
     enum rsc_role_e group_role = RSC_ROLE_UNKNOWN;
     GList *gIter = rsc->children;
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
         enum rsc_role_e role = child_rsc->fns->state(child_rsc, current);
 
         if (role > group_role) {
             group_role = role;
         }
     }
 
     pe_rsc_trace(rsc, "%s role: %s", rsc->id, role2text(group_role));
     return group_role;
 }
 
 gboolean
 pe__group_is_filtered(pe_resource_t *rsc, GList *only_rsc, gboolean check_parent)
 {
     gboolean passes = FALSE;
 
     if (check_parent && pcmk__str_in_list(only_rsc, rsc_printable_id(uber_parent(rsc)), pcmk__str_none)) {
         passes = TRUE;
     } else if (pcmk__str_in_list(only_rsc, rsc_printable_id(rsc), pcmk__str_none)) {
         passes = TRUE;
     } else if (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(only_rsc, rsc->id, pcmk__str_none)) {
         passes = TRUE;
     } else {
         for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
             pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
             if (!child_rsc->fns->is_filtered(child_rsc, only_rsc, FALSE)) {
                 passes = TRUE;
                 break;
             }
         }
     }
 
     return !passes;
 }