diff --git a/cts/cli/crm_mon.xml b/cts/cli/crm_mon.xml
index 463468e930..24cb79b804 100644
--- a/cts/cli/crm_mon.xml
+++ b/cts/cli/crm_mon.xml
@@ -1,315 +1,315 @@
-<cib crm_feature_set="3.3.0" validate-with="pacemaker-3.7" epoch="1" num_updates="173" admin_epoch="1" cib-last-written="Tue May  5 12:04:36 2020" update-origin="cluster01" update-client="crmd" update-user="hacluster" have-quorum="1" dc-uuid="2">
+<cib crm_feature_set="3.3.0" validate-with="pacemaker-3.9" epoch="1" num_updates="173" admin_epoch="1" cib-last-written="Tue May  5 12:04:36 2020" update-origin="cluster01" update-client="crmd" update-user="hacluster" have-quorum="1" dc-uuid="2">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
         <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.0.4-1.e97f9675f.git.el7-e97f9675f"/>
         <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
         <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="test-cluster"/>
         <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
         <nvpair id="cib-bootstrap-options-maintenance-mode" name="maintenance-mode" value="false"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="1" uname="cluster01">
         <instance_attributes id="nodes-1">
           <nvpair id="nodes-1-location" name="location" value="office"/>
         </instance_attributes>
       </node>
       <node id="2" uname="cluster02"/>
     </nodes>
     <resources>
       <clone id="ping-clone">
         <primitive class="ocf" id="ping" provider="pacemaker" type="ping">
           <instance_attributes id="ping-instance_attributes">
             <nvpair id="ping-instance_attributes-dampen" name="dampen" value="5s"/>
             <nvpair id="ping-instance_attributes-host_list" name="host_list" value="192.168.122.1"/>
             <nvpair id="ping-instance_attributes-multiplier" name="multiplier" value="1000"/>
           </instance_attributes>
           <operations>
             <op id="ping-monitor-interval-10s" interval="10s" name="monitor" timeout="60s"/>
             <op id="ping-start-interval-0s" interval="0s" name="start" timeout="60s"/>
             <op id="ping-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
           </operations>
         </primitive>
       </clone>
       <primitive class="stonith" id="Fencing" type="fence_xvm">
         <instance_attributes id="Fencing-instance_attributes">
           <nvpair id="Fencing-instance_attributes-ip_family" name="ip_family" value="ipv4"/>
         </instance_attributes>
         <operations>
           <op id="Fencing-monitor-interval-60s" interval="60s" name="monitor"/>
         </operations>
       </primitive>
       <primitive class="ocf" id="dummy" provider="pacemaker" type="Dummy">
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-op_sleep" name="op_sleep" value="6"/>
         </instance_attributes>
         <operations>
           <op id="dummy-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="20s"/>
           <op id="dummy-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="20s"/>
           <op id="dummy-monitor-interval-60s" interval="60s" name="monitor" on-fail="stop"/>
           <op id="dummy-reload-interval-0s" interval="0s" name="reload" timeout="20s"/>
           <op id="dummy-start-interval-0s" interval="0s" name="start" timeout="20s"/>
           <op id="dummy-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
         </operations>
       </primitive>
       <clone id="inactive-clone">
         <meta_attributes id="inactive-clone-meta_attributes">
           <nvpair id="inactive-clone-meta_attributes-target-role" name="target-role" value="stopped"/>
         </meta_attributes>
         <primitive id="inactive-dhcpd" class="lsb" type="dhcpd"/>
       </clone>
       <group id="inactive-group">
         <meta_attributes id="inactive-group-meta_attributes">
           <nvpair id="inactive-group-meta_attributes-target-role" name="target-role" value="stopped"/>
         </meta_attributes>
         <primitive class="ocf" id="inactive-dummy-1" provider="pacemaker" type="Dummy"/>
         <primitive class="ocf" id="inactive-dummy-2" provider="pacemaker" type="Dummy"/>
       </group>
       <bundle id="httpd-bundle">
         <docker image="pcmk:http" replicas="3"/>
         <network ip-range-start="192.168.122.131" host-netmask="24" host-interface="eth0">
           <port-mapping id="httpd-port" port="80"/>
         </network>
         <storage>
           <storage-mapping id="httpd-syslog" source-dir="/dev/log" target-dir="/dev/log" options="rw"/>
           <storage-mapping id="httpd-root" source-dir="/srv/html" target-dir="/var/www/html" options="rw"/>
           <storage-mapping id="httpd-logs" source-dir-root="/var/log/pacemaker/bundles" target-dir="/etc/httpd/logs" options="rw"/>
         </storage>
         <primitive class="ocf" id="httpd" provider="heartbeat" type="apache"/>
         <meta_attributes id="bundle-meta_attributes">
           <nvpair id="bundle-meta_attributes-target-role" name="target-role" value="Started"/>
         </meta_attributes>
       </bundle>
       <group id="exim-group">
         <primitive id="Public-IP" class="ocf" type="IPaddr" provider="heartbeat">
           <instance_attributes id="params-public-ip">
             <nvpair id="public-ip-addr" name="ip" value="192.168.1.1"/>
           </instance_attributes>
         </primitive>
         <primitive id="Email" class="lsb" type="exim"/>
       </group>
       <clone id="mysql-clone-group">
         <group id="mysql-group">
           <primitive id="mysql-proxy" class="lsb" type="mysql-proxy">
             <operations>
               <op name="monitor" interval="10s" id="mysql-proxy_mon" timeout="20s"/>
             </operations>
           </primitive>
         </group>
       </clone>
       <clone id="promotable-clone">
         <meta_attributes id="promotable-clone-meta_attributes">
           <nvpair id="promotable-clone-meta_attributes-promotable" name="promotable" value="true"/>
         </meta_attributes>
-        <primitive id="promotable-rsc" class="ocf" provider="pacemaker" type="Stateful">
+        <primitive id="promotable-rsc" class="ocf" provider="pacemaker" type="Stateful" description="test_description">
           <operations id="promotable-rsc-operations">
             <op id="promotable-rsc-monitor-promoted-5" name="monitor" interval="5" role="Promoted"/>
             <op id="promotable-rsc-monitor-unpromoted-10" name="monitor" interval="10" role="Unpromoted"/>
           </operations>
         </primitive>
       </clone>
     </resources>
     <constraints>
       <rsc_location id="not-on-cluster1" rsc="dummy" node="cluster01" score="-INFINITY"/>
       <rsc_location id="loc-promotable-clone" rsc="promotable-clone">
         <rule id="loc-promotable-clone-rule" role="Promoted" score="10">
           <expression attribute="#uname" id="loc-promotable-clone-expression" operation="eq" value="cluster02"/>
         </rule>
       </rsc_location>
     </constraints>
     <tags>
       <tag id="all-nodes">
         <obj_ref id="1"/>
         <obj_ref id="2"/>
       </tag>
       <tag id="even-nodes">
         <obj_ref id="2"/>
       </tag>
       <tag id="odd-nodes">
         <obj_ref id="1"/>
       </tag>
       <tag id="inactive-rscs">
         <obj_ref id="inactive-group"/>
         <obj_ref id="inactive-clone"/>
       </tag>
       <tag id="fencing-rscs">
         <obj_ref id="Fencing"/>
       </tag>
     </tags>
     <op_defaults>
       <meta_attributes id="op_defaults-options">
         <nvpair id="op_defaults-options-timeout" name="timeout" value="5s"/>
       </meta_attributes>
     </op_defaults>
   </configuration>
   <status>
     <node_state id="2" uname="cluster02" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
       <lrm id="2">
         <lrm_resources>
           <lrm_resource id="ping" type="ping" class="ocf" provider="pacemaker">
             <lrm_rsc_op id="ping_last_0" operation_key="ping_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="9:0:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;9:0:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster02" call-id="11" rc-code="0" op-status="0" interval="0" last-rc-change="1588951263" exec-time="2044" queue-time="0" op-digest="769dd6f95f1494d416ae9dc690960e17"/>
             <lrm_rsc_op id="ping_monitor_10000" operation_key="ping_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="10:0:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;10:0:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster02" call-id="12" rc-code="0" op-status="0" interval="10000" last-rc-change="1588951265" exec-time="2031" queue-time="0" op-digest="7beffd8be749b787fabea4aef5df21c9"/>
           </lrm_resource>
           <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
             <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="5:0:7:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:7;5:0:7:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster02" call-id="10" rc-code="7" op-status="0" interval="0" last-rc-change="1588951263" exec-time="3" queue-time="0" op-digest="7da16842ab2328e41f737cab5e5fc89c"/>
           </lrm_resource>
           <lrm_resource id="dummy" type="Dummy" class="ocf" provider="pacemaker">
             <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="14:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;14:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster02" call-id="18" rc-code="0" op-status="0" interval="0" last-rc-change="1588951278" exec-time="6020" queue-time="0" op-digest="aa0f9b7caf28600646551adb55bd9b95" op-force-restart=" envfile  op_sleep  passwd  state " op-restart-digest="aa0f9b7caf28600646551adb55bd9b95" op-secure-params=" passwd " op-secure-digest="aa0f9b7caf28600646551adb55bd9b95"/>
             <lrm_rsc_op id="dummy_monitor_60000" operation_key="dummy_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="16:2:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;16:2:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster02" call-id="19" rc-code="0" op-status="0" interval="60000" last-rc-change="1588951284" exec-time="6015" queue-time="0" op-digest="ccfee4afbb0618907016c9bef210b8b6" op-secure-params=" passwd " op-secure-digest="aa0f9b7caf28600646551adb55bd9b95"/>
           </lrm_resource>
           <lrm_resource id="Public-IP" class="ocf" provider="heartbeat" type="IPaddr">
             <lrm_rsc_op id="Public-IP_last_0" operation_key="Public-IP_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.3.0" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1591717057" exec-time="0" queue-time="0" op-digest="3bb21cd55b79809a3ae69333a8981fd4"/>
           </lrm_resource>
           <lrm_resource id="Email" class="lsb" type="exim">
             <lrm_rsc_op id="Email_last_0" operation_key="Email_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.3.0" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1591717057" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
           <lrm_resource id="mysql-proxy" class="lsb" type="mysql-proxy">
             <lrm_rsc_op id="mysql-proxy_last_0" operation_key="mysql-proxy_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.4.1" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1596126852" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
             <lrm_rsc_op id="mysql-proxy_monitor_10000" operation_key="mysql-proxy_monitor_10000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.4.1" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="10000" last-rc-change="1596126852" exec-time="0" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
           </lrm_resource>
           <lrm_resource id="promotable-rsc" class="ocf" provider="pacemaker" type="Stateful">
             <lrm_rsc_op id="promotable-rsc_last_0" operation_key="promotable-rsc_promote_0" operation="promote" crm-debug-origin="crm_simulate" crm_feature_set="3.7.1" transition-key="6:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;6:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="6" rc-code="0" op-status="0" interval="0" last-rc-change="1613059546" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
             <lrm_rsc_op id="promotable-rsc_post_notify_start_0" operation_key="promotable-rsc_notify_0" operation="notify" crm-debug-origin="crm_simulate" crm_feature_set="3.7.1" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="0" last-rc-change="1613058809" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
             <lrm_rsc_op id="promotable-rsc_monitor_10000" operation_key="promotable-rsc_monitor_10000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.7.1" transition-key="4:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;4:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="4" rc-code="0" op-status="0" interval="10000" last-rc-change="1613058809" exec-time="0" queue-time="0" op-digest="79643b49fcd2a15282788271c56eddb4"/>
             <lrm_rsc_op id="promotable-rsc_cancel_10000" operation_key="promotable-rsc_cancel_10000" operation="cancel" crm-debug-origin="crm_simulate" crm_feature_set="3.7.1" transition-key="5:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;5:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="5" rc-code="0" op-status="0" interval="10000" last-rc-change="1613059546" exec-time="0" queue-time="0" op-digest="79643b49fcd2a15282788271c56eddb4"/>
             <lrm_rsc_op id="promotable-rsc_monitor_5000" operation_key="promotable-rsc_monitor_5000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.7.1" transition-key="7:-1:8:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:8;7:-1:8:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="7" rc-code="8" op-status="0" interval="5000" last-rc-change="1613059546" exec-time="0" queue-time="0" op-digest="79643b49fcd2a15282788271c56eddb4"/>
           </lrm_resource>
           <lrm_resource id="inactive-dhcpd" class="lsb" type="dhcpd">
             <lrm_rsc_op id="inactive-dhcpd_last_0" operation_key="inactive-dhcpd_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.7.1" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1613491700" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
           <lrm_resource id="inactive-dummy-1" class="ocf" provider="pacemaker" type="Dummy">
             <lrm_rsc_op id="inactive-dummy-1_last_0" operation_key="inactive-dummy-1_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.7.1" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1613491700" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
           <lrm_resource id="inactive-dummy-2" class="ocf" provider="pacemaker" type="Dummy">
             <lrm_rsc_op id="inactive-dummy-2_last_0" operation_key="inactive-dummy-2_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.7.1" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1613491700" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
           <lrm_resource id="httpd-bundle-ip-192.168.122.131" class="ocf" provider="heartbeat" type="IPaddr2">
             <lrm_rsc_op id="httpd-bundle-ip-192.168.122.131_last_0" operation_key="httpd-bundle-ip-192.168.122.131_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.7.1" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1613491700" exec-time="0" queue-time="0" op-digest="8656419d4ed26465c724189832393477"/>
           </lrm_resource>
           <lrm_resource id="httpd-bundle-docker-0" class="ocf" provider="heartbeat" type="docker">
             <lrm_rsc_op id="httpd-bundle-docker-0_last_0" operation_key="httpd-bundle-docker-0_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.7.1" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1613491700" exec-time="0" queue-time="0" op-digest="02a1a0b2dfa1cade1893713b56939c55"/>
           </lrm_resource>
           <lrm_resource id="httpd-bundle-ip-192.168.122.132" class="ocf" provider="heartbeat" type="IPaddr2">
             <lrm_rsc_op id="httpd-bundle-ip-192.168.122.132_last_0" operation_key="httpd-bundle-ip-192.168.122.132_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.7.1" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1613491700" exec-time="0" queue-time="0" op-digest="c3d96a2922c2946905f760df9a177cd1"/>
             <lrm_rsc_op id="httpd-bundle-ip-192.168.122.132_monitor_60000" operation_key="httpd-bundle-ip-192.168.122.132_monitor_60000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.7.1" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="60000" last-rc-change="1613491700" exec-time="0" queue-time="0" op-digest="547dff7d7a9d7448dd07cde35966f08a"/>
           </lrm_resource>
           <lrm_resource id="httpd-bundle-docker-1" class="ocf" provider="heartbeat" type="docker">
             <lrm_rsc_op id="httpd-bundle-docker-1_last_0" operation_key="httpd-bundle-docker-1_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.7.1" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1613491700" exec-time="0" queue-time="0" op-digest="2edb33b196e2261c6b3e30ce579e0590"/>
             <lrm_rsc_op id="httpd-bundle-docker-1_monitor_60000" operation_key="httpd-bundle-docker-1_monitor_60000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.7.1" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="60000" last-rc-change="1613491700" exec-time="0" queue-time="0" op-digest="1ed1cced876b80101858caac9836e113"/>
           </lrm_resource>
           <lrm_resource id="httpd-bundle-ip-192.168.122.133" class="ocf" provider="heartbeat" type="IPaddr2">
             <lrm_rsc_op id="httpd-bundle-ip-192.168.122.133_last_0" operation_key="httpd-bundle-ip-192.168.122.133_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.7.1" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1613491700" exec-time="0" queue-time="0" op-digest="f318115a675fd430c293a0dc2705f398"/>
           </lrm_resource>
           <lrm_resource id="httpd-bundle-docker-2" class="ocf" provider="heartbeat" type="docker">
             <lrm_rsc_op id="httpd-bundle-docker-2_last_0" operation_key="httpd-bundle-docker-2_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.7.1" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1613491700" exec-time="0" queue-time="0" op-digest="6680384ac1363763d9d5cca296be0b2d"/>
           </lrm_resource>
           <lrm_resource id="httpd-bundle-0" class="ocf" provider="pacemaker" type="remote">
             <lrm_rsc_op id="httpd-bundle-0_last_0" operation_key="httpd-bundle-0_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.7.1" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1613491700" exec-time="0" queue-time="0" op-digest="c535429017a9ee0785106fbef2858a41"/>
           </lrm_resource>
           <lrm_resource id="httpd-bundle-1" class="ocf" provider="pacemaker" type="remote">
             <lrm_rsc_op id="httpd-bundle-1_last_0" operation_key="httpd-bundle-1_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.7.1" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1613491700" exec-time="0" queue-time="0" op-digest="791bcda8f6693465cc318cba5302a8df"/>
             <lrm_rsc_op id="httpd-bundle-1_monitor_30000" operation_key="httpd-bundle-1_monitor_30000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.7.1" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="30000" last-rc-change="1613491700" exec-time="0" queue-time="0" op-digest="7592cb10fa1499772a031adfd385f558"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
       <transient_attributes id="2">
         <instance_attributes id="status-2">
           <nvpair id="status-2-pingd" name="pingd" value="1000"/>
         </instance_attributes>
       </transient_attributes>
     </node_state>
     <node_state id="1" uname="cluster01" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
       <lrm id="1">
         <lrm_resources>
           <lrm_resource id="ping" type="ping" class="ocf" provider="pacemaker">
             <lrm_rsc_op id="ping_last_0" operation_key="ping_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="6:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;6:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster01" call-id="17" rc-code="0" op-status="0" interval="0" last-rc-change="1588951272" exec-time="2038" queue-time="0" op-digest="769dd6f95f1494d416ae9dc690960e17"/>
             <lrm_rsc_op id="ping_monitor_10000" operation_key="ping_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="7:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;7:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster01" call-id="18" rc-code="0" op-status="0" interval="10000" last-rc-change="1588951274" exec-time="2034" queue-time="0" op-digest="7beffd8be749b787fabea4aef5df21c9"/>
           </lrm_resource>
           <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
             <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="12:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;12:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster01" call-id="15" rc-code="0" op-status="0" interval="0" last-rc-change="1588951272" exec-time="36" queue-time="0" op-digest="7da16842ab2328e41f737cab5e5fc89c"/>
             <lrm_rsc_op id="Fencing_monitor_60000" operation_key="Fencing_monitor_60000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.7.1" transition-key="20:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;20:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="cluster01" call-id="20" rc-code="0" op-status="0" interval="60000" last-rc-change="1613056690" exec-time="0" queue-time="0" op-digest="d4ee02dc1c7ce16eb0f72e06c2cc9193"/>
           </lrm_resource>
           <lrm_resource id="dummy" type="Dummy" class="ocf" provider="pacemaker">
             <lrm_rsc_op id="dummy_last_0" operation_key="dummy_stop_0" operation="stop" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="3:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;3:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster01" call-id="16" rc-code="0" op-status="0" interval="0" last-rc-change="1588951272" exec-time="6048" queue-time="0" op-digest="aa0f9b7caf28600646551adb55bd9b95" op-force-restart=" envfile  op_sleep  passwd  state " op-restart-digest="aa0f9b7caf28600646551adb55bd9b95" op-secure-params=" passwd " op-secure-digest="aa0f9b7caf28600646551adb55bd9b95"/>
           </lrm_resource>
           <lrm_resource id="Public-IP" class="ocf" provider="heartbeat" type="IPaddr">
             <lrm_rsc_op id="Public-IP_last_0" operation_key="Public-IP_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.3.0" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1591717057" exec-time="0" queue-time="0" op-digest="3bb21cd55b79809a3ae69333a8981fd4"/>
           </lrm_resource>
           <lrm_resource id="Email" class="lsb" type="exim">
             <lrm_rsc_op id="Email_last_0" operation_key="Email_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.3.0" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1591717057" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
           <lrm_resource id="mysql-proxy" class="lsb" type="mysql-proxy">
             <lrm_rsc_op id="mysql-proxy_last_0" operation_key="mysql-proxy_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.4.1" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1596126852" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
             <lrm_rsc_op id="mysql-proxy_monitor_10000" operation_key="mysql-proxy_monitor_10000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.4.1" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="10000" last-rc-change="1596126852" exec-time="0" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
           </lrm_resource>
           <lrm_resource id="promotable-rsc" class="ocf" provider="pacemaker" type="Stateful">
             <lrm_rsc_op id="promotable-rsc_last_0" operation_key="promotable-rsc_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.7.1" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1613058809" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
             <lrm_rsc_op id="promotable-rsc_post_notify_start_0" operation_key="promotable-rsc_notify_0" operation="notify" crm-debug-origin="crm_simulate" crm_feature_set="3.7.1" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="0" last-rc-change="1613058809" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
             <lrm_rsc_op id="promotable-rsc_monitor_10000" operation_key="promotable-rsc_monitor_10000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.7.1" transition-key="4:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;4:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="4" rc-code="0" op-status="0" interval="10000" last-rc-change="1613058809" exec-time="0" queue-time="0" op-digest="79643b49fcd2a15282788271c56eddb4"/>
           </lrm_resource>
           <lrm_resource id="inactive-dhcpd" class="lsb" type="dhcpd">
             <lrm_rsc_op id="inactive-dhcpd_last_0" operation_key="inactive-dhcpd_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.7.1" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1613491700" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
           <lrm_resource id="inactive-dummy-1" class="ocf" provider="pacemaker" type="Dummy">
             <lrm_rsc_op id="inactive-dummy-1_last_0" operation_key="inactive-dummy-1_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.7.1" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1613491700" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
           <lrm_resource id="inactive-dummy-2" class="ocf" provider="pacemaker" type="Dummy">
             <lrm_rsc_op id="inactive-dummy-2_last_0" operation_key="inactive-dummy-2_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.7.1" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1613491700" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
           <lrm_resource id="httpd-bundle-ip-192.168.122.131" class="ocf" provider="heartbeat" type="IPaddr2">
             <lrm_rsc_op id="httpd-bundle-ip-192.168.122.131_last_0" operation_key="httpd-bundle-ip-192.168.122.131_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.7.1" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1613491700" exec-time="0" queue-time="0" op-digest="8656419d4ed26465c724189832393477"/>
             <lrm_rsc_op id="httpd-bundle-ip-192.168.122.131_monitor_60000" operation_key="httpd-bundle-ip-192.168.122.131_monitor_60000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.7.1" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="60000" last-rc-change="1613491700" exec-time="0" queue-time="0" op-digest="dfb531456299aa7b527d4e57805703da"/>
           </lrm_resource>
           <lrm_resource id="httpd-bundle-docker-0" class="ocf" provider="heartbeat" type="docker">
             <lrm_rsc_op id="httpd-bundle-docker-0_last_0" operation_key="httpd-bundle-docker-0_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.7.1" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1613491700" exec-time="0" queue-time="0" op-digest="02a1a0b2dfa1cade1893713b56939c55"/>
             <lrm_rsc_op id="httpd-bundle-docker-0_monitor_60000" operation_key="httpd-bundle-docker-0_monitor_60000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.7.1" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="60000" last-rc-change="1613491700" exec-time="0" queue-time="0" op-digest="377a66c466df6e6edf98a6e83cff9c22"/>
           </lrm_resource>
           <lrm_resource id="httpd-bundle-ip-192.168.122.132" class="ocf" provider="heartbeat" type="IPaddr2">
             <lrm_rsc_op id="httpd-bundle-ip-192.168.122.132_last_0" operation_key="httpd-bundle-ip-192.168.122.132_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.7.1" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1613491700" exec-time="0" queue-time="0" op-digest="c3d96a2922c2946905f760df9a177cd1"/>
           </lrm_resource>
           <lrm_resource id="httpd-bundle-docker-1" class="ocf" provider="heartbeat" type="docker">
             <lrm_rsc_op id="httpd-bundle-docker-1_last_0" operation_key="httpd-bundle-docker-1_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.7.1" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1613491700" exec-time="0" queue-time="0" op-digest="2edb33b196e2261c6b3e30ce579e0590"/>
           </lrm_resource>
           <lrm_resource id="httpd-bundle-ip-192.168.122.133" class="ocf" provider="heartbeat" type="IPaddr2">
             <lrm_rsc_op id="httpd-bundle-ip-192.168.122.133_last_0" operation_key="httpd-bundle-ip-192.168.122.133_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.7.1" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1613491700" exec-time="0" queue-time="0" op-digest="f318115a675fd430c293a0dc2705f398"/>
           </lrm_resource>
           <lrm_resource id="httpd-bundle-docker-2" class="ocf" provider="heartbeat" type="docker">
             <lrm_rsc_op id="httpd-bundle-docker-2_last_0" operation_key="httpd-bundle-docker-2_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.7.1" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1613491700" exec-time="0" queue-time="0" op-digest="6680384ac1363763d9d5cca296be0b2d"/>
           </lrm_resource>
           <lrm_resource id="httpd-bundle-0" class="ocf" provider="pacemaker" type="remote">
             <lrm_rsc_op id="httpd-bundle-0_last_0" operation_key="httpd-bundle-0_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.7.1" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1613491700" exec-time="0" queue-time="0" op-digest="c535429017a9ee0785106fbef2858a41"/>
             <lrm_rsc_op id="httpd-bundle-0_monitor_30000" operation_key="httpd-bundle-0_monitor_30000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.7.1" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="30000" last-rc-change="1613491700" exec-time="0" queue-time="0" op-digest="6d63e20548871f169e287d33f3711637"/>
           </lrm_resource>
           <lrm_resource id="httpd-bundle-1" class="ocf" provider="pacemaker" type="remote">
             <lrm_rsc_op id="httpd-bundle-1_last_0" operation_key="httpd-bundle-1_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.7.1" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1613491700" exec-time="0" queue-time="0" op-digest="791bcda8f6693465cc318cba5302a8df"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
       <transient_attributes id="1">
         <instance_attributes id="status-1">
           <nvpair id="status-1-pingd" name="pingd" value="1000"/>
         </instance_attributes>
       </transient_attributes>
     </node_state>
     <node_state id="httpd-bundle-0" uname="httpd-bundle-0">
       <lrm id="httpd-bundle-0">
         <lrm_resources>
           <lrm_resource id="httpd" class="ocf" provider="heartbeat" type="apache">
             <lrm_rsc_op id="httpd_last_0" operation_key="httpd_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.7.1" transition-key="1:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;1:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="0" op-status="0" interval="0" last-rc-change="1613491700" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
     <node_state id="httpd-bundle-1" uname="httpd-bundle-1">
       <lrm id="httpd-bundle-1">
         <lrm_resources>
           <lrm_resource id="httpd" class="ocf" provider="heartbeat" type="apache">
             <lrm_rsc_op id="httpd_last_0" operation_key="httpd_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.7.1" transition-key="1:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;1:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="0" op-status="0" interval="0" last-rc-change="1613491700" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
   </status>
 </cib>
diff --git a/cts/cli/regression.crm_mon.exp b/cts/cli/regression.crm_mon.exp
index a2cba60614..fa516aada3 100644
--- a/cts/cli/regression.crm_mon.exp
+++ b/cts/cli/regression.crm_mon.exp
@@ -1,5039 +1,5039 @@
 =#=#=#= Begin test: Basic text output =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
 
 Active Resources:
   * Clone Set: ping-clone [ping]:
     * Started: [ cluster01 cluster02 ]
   * Fencing	(stonith:fence_xvm):	 Started cluster01
   * dummy	(ocf:pacemaker:Dummy):	 Started cluster02
   * Container bundle set: httpd-bundle [pcmk:http]:
     * httpd-bundle-0 (192.168.122.131)	(ocf:heartbeat:apache):	 Started cluster01
     * httpd-bundle-1 (192.168.122.132)	(ocf:heartbeat:apache):	 Started cluster02
     * httpd-bundle-2 (192.168.122.133)	(ocf:heartbeat:apache):	 Stopped
   * Resource Group: exim-group:
     * Public-IP	(ocf:heartbeat:IPaddr):	 Started cluster02
     * Email	(lsb:exim):	 Started cluster02
   * Clone Set: mysql-clone-group [mysql-group]:
     * Started: [ cluster01 cluster02 ]
   * Clone Set: promotable-clone [promotable-rsc] (promotable):
     * Promoted: [ cluster02 ]
     * Unpromoted: [ cluster01 ]
 =#=#=#= End test: Basic text output - OK (0) =#=#=#=
 * Passed: crm_mon        - Basic text output
 =#=#=#= Begin test: XML output =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <clone id="ping-clone" multi_state="false" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
     </clone>
     <resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
       <node name="cluster01" id="1" cached="true"/>
     </resource>
     <resource id="dummy" resource_agent="ocf:pacemaker:Dummy" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
       <node name="cluster02" id="2" cached="true"/>
     </resource>
     <clone id="inactive-clone" multi_state="false" unique="false" maintenance="false" managed="true" disabled="true" failed="false" failure_ignored="false" target_role="stopped">
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
     <group id="inactive-group" number_resources="2" maintenance="false" managed="true" disabled="true">
       <resource id="inactive-dummy-1" resource_agent="ocf:pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dummy-2" resource_agent="ocf:pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </group>
     <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" maintenance="false" managed="true" failed="false">
       <replica id="0">
         <resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="httpd-bundle-0" id="httpd-bundle-0" cached="true"/>
         </resource>
         <resource id="httpd-bundle-docker-0" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
         <resource id="httpd-bundle-0" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </replica>
       <replica id="1">
         <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="httpd-bundle-1" id="httpd-bundle-1" cached="true"/>
         </resource>
         <resource id="httpd-bundle-docker-1" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
         <resource id="httpd-bundle-1" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </replica>
       <replica id="2">
         <resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf:heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-docker-2" resource_agent="ocf:heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-2" resource_agent="ocf:pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </replica>
     </bundle>
     <group id="exim-group" number_resources="2" maintenance="false" managed="true" disabled="false">
       <resource id="Public-IP" resource_agent="ocf:heartbeat:IPaddr" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="Email" resource_agent="lsb:exim" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
     </group>
     <clone id="mysql-clone-group" multi_state="false" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <group id="mysql-group:0" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:1" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:2" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:3" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:4" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
     </clone>
     <clone id="promotable-clone" multi_state="true" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Promoted" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Unpromoted" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="11" task="start" rc="0" rc_text="ok" exec-time="2044ms" queue-time="0ms"/>
         <operation_history call="12" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2031ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="dummy" orphan="false" migration-threshold="1000000">
         <operation_history call="18" task="start" rc="0" rc_text="ok" exec-time="6020ms" queue-time="0ms"/>
         <operation_history call="19" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="6015ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Public-IP" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Email" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="promotable-rsc" orphan="false" migration-threshold="1000000">
         <operation_history call="4" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="5" task="cancel" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="6" task="promote" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="7" task="monitor" rc="8" rc_text="promoted" interval="5000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-ip-192.168.122.132" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="17" task="start" rc="0" rc_text="ok" exec-time="2038ms" queue-time="0ms"/>
         <operation_history call="18" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2034ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Fencing" orphan="false" migration-threshold="1000000">
         <operation_history call="15" task="start" rc="0" rc_text="ok" exec-time="36ms" queue-time="0ms"/>
         <operation_history call="20" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="dummy" orphan="false" migration-threshold="1000000">
         <operation_history call="16" task="stop" rc="0" rc_text="ok" exec-time="6048ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="promotable-rsc" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="4" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-ip-192.168.122.131" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-0">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-1">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <bans>
     <ban id="not-on-cluster1" resource="dummy" node="cluster01" weight="-1000000" promoted-only="false" master_only="false"/>
   </bans>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output
 =#=#=#= Begin test: Basic text output without node section =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Active Resources:
   * Clone Set: ping-clone [ping]:
     * Started: [ cluster01 cluster02 ]
   * Fencing	(stonith:fence_xvm):	 Started cluster01
   * dummy	(ocf:pacemaker:Dummy):	 Started cluster02
   * Container bundle set: httpd-bundle [pcmk:http]:
     * httpd-bundle-0 (192.168.122.131)	(ocf:heartbeat:apache):	 Started cluster01
     * httpd-bundle-1 (192.168.122.132)	(ocf:heartbeat:apache):	 Started cluster02
     * httpd-bundle-2 (192.168.122.133)	(ocf:heartbeat:apache):	 Stopped
   * Resource Group: exim-group:
     * Public-IP	(ocf:heartbeat:IPaddr):	 Started cluster02
     * Email	(lsb:exim):	 Started cluster02
   * Clone Set: mysql-clone-group [mysql-group]:
     * Started: [ cluster01 cluster02 ]
   * Clone Set: promotable-clone [promotable-rsc] (promotable):
     * Promoted: [ cluster02 ]
     * Unpromoted: [ cluster01 ]
 =#=#=#= End test: Basic text output without node section - OK (0) =#=#=#=
 * Passed: crm_mon        - Basic text output without node section
 =#=#=#= Begin test: XML output without the node section =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --exclude=nodes">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <resources>
     <clone id="ping-clone" multi_state="false" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
     </clone>
     <resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
       <node name="cluster01" id="1" cached="true"/>
     </resource>
     <resource id="dummy" resource_agent="ocf:pacemaker:Dummy" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
       <node name="cluster02" id="2" cached="true"/>
     </resource>
     <clone id="inactive-clone" multi_state="false" unique="false" maintenance="false" managed="true" disabled="true" failed="false" failure_ignored="false" target_role="stopped">
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
     <group id="inactive-group" number_resources="2" maintenance="false" managed="true" disabled="true">
       <resource id="inactive-dummy-1" resource_agent="ocf:pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dummy-2" resource_agent="ocf:pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </group>
     <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" maintenance="false" managed="true" failed="false">
       <replica id="0">
         <resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="httpd-bundle-0" id="httpd-bundle-0" cached="true"/>
         </resource>
         <resource id="httpd-bundle-docker-0" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
         <resource id="httpd-bundle-0" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </replica>
       <replica id="1">
         <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="httpd-bundle-1" id="httpd-bundle-1" cached="true"/>
         </resource>
         <resource id="httpd-bundle-docker-1" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
         <resource id="httpd-bundle-1" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </replica>
       <replica id="2">
         <resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf:heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-docker-2" resource_agent="ocf:heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-2" resource_agent="ocf:pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </replica>
     </bundle>
     <group id="exim-group" number_resources="2" maintenance="false" managed="true" disabled="false">
       <resource id="Public-IP" resource_agent="ocf:heartbeat:IPaddr" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="Email" resource_agent="lsb:exim" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
     </group>
     <clone id="mysql-clone-group" multi_state="false" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <group id="mysql-group:0" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:1" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:2" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:3" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:4" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
     </clone>
     <clone id="promotable-clone" multi_state="true" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Promoted" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Unpromoted" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="11" task="start" rc="0" rc_text="ok" exec-time="2044ms" queue-time="0ms"/>
         <operation_history call="12" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2031ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="dummy" orphan="false" migration-threshold="1000000">
         <operation_history call="18" task="start" rc="0" rc_text="ok" exec-time="6020ms" queue-time="0ms"/>
         <operation_history call="19" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="6015ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Public-IP" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Email" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="promotable-rsc" orphan="false" migration-threshold="1000000">
         <operation_history call="4" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="5" task="cancel" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="6" task="promote" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="7" task="monitor" rc="8" rc_text="promoted" interval="5000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-ip-192.168.122.132" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="17" task="start" rc="0" rc_text="ok" exec-time="2038ms" queue-time="0ms"/>
         <operation_history call="18" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2034ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Fencing" orphan="false" migration-threshold="1000000">
         <operation_history call="15" task="start" rc="0" rc_text="ok" exec-time="36ms" queue-time="0ms"/>
         <operation_history call="20" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="dummy" orphan="false" migration-threshold="1000000">
         <operation_history call="16" task="stop" rc="0" rc_text="ok" exec-time="6048ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="promotable-rsc" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="4" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-ip-192.168.122.131" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-0">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-1">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <bans>
     <ban id="not-on-cluster1" resource="dummy" node="cluster01" weight="-1000000" promoted-only="false" master_only="false"/>
   </bans>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output without the node section - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output without the node section
 =#=#=#= Begin test: Text output with only the node section =#=#=#=
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
 =#=#=#= End test: Text output with only the node section - OK (0) =#=#=#=
 * Passed: crm_mon        - Text output with only the node section
 =#=#=#= Begin test: Complete text output =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
 
 Active Resources:
   * Clone Set: ping-clone [ping]:
     * Started: [ cluster01 cluster02 ]
   * Fencing	(stonith:fence_xvm):	 Started cluster01
   * dummy	(ocf:pacemaker:Dummy):	 Started cluster02
   * Container bundle set: httpd-bundle [pcmk:http]:
     * httpd-bundle-0 (192.168.122.131)	(ocf:heartbeat:apache):	 Started cluster01
     * httpd-bundle-1 (192.168.122.132)	(ocf:heartbeat:apache):	 Started cluster02
     * httpd-bundle-2 (192.168.122.133)	(ocf:heartbeat:apache):	 Stopped
   * Resource Group: exim-group:
     * Public-IP	(ocf:heartbeat:IPaddr):	 Started cluster02
     * Email	(lsb:exim):	 Started cluster02
   * Clone Set: mysql-clone-group [mysql-group]:
     * Started: [ cluster01 cluster02 ]
   * Clone Set: promotable-clone [promotable-rsc] (promotable):
     * Promoted: [ cluster02 ]
     * Unpromoted: [ cluster01 ]
 
 Node Attributes:
   * Node: cluster01:
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02:
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02:
     * ping: migration-threshold=1000000:
       * (11) start
       * (12) monitor: interval="10000ms"
     * dummy: migration-threshold=1000000:
       * (18) start
       * (19) monitor: interval="60000ms"
     * Public-IP: migration-threshold=1000000:
       * (2) start
     * Email: migration-threshold=1000000:
       * (2) start
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
     * promotable-rsc: migration-threshold=1000000:
       * (4) monitor: interval="10000ms"
       * (5) cancel: interval="10000ms"
       * (6) promote
       * (7) monitor: interval="5000ms"
     * httpd-bundle-ip-192.168.122.132: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-docker-1: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-1: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="30000ms"
   * Node: cluster01:
     * ping: migration-threshold=1000000:
       * (17) start
       * (18) monitor: interval="10000ms"
     * Fencing: migration-threshold=1000000:
       * (15) start
       * (20) monitor: interval="60000ms"
     * dummy: migration-threshold=1000000:
       * (16) stop
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
     * promotable-rsc: migration-threshold=1000000:
       * (2) start
       * (4) monitor: interval="10000ms"
     * httpd-bundle-ip-192.168.122.131: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-docker-0: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-0: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="30000ms"
   * Node: httpd-bundle-0:
     * httpd: migration-threshold=1000000:
       * (1) start
   * Node: httpd-bundle-1:
     * httpd: migration-threshold=1000000:
       * (1) start
 
 Negative Location Constraints:
   * not-on-cluster1	prevents dummy from running on cluster01
 =#=#=#= End test: Complete text output - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete text output
 =#=#=#= Begin test: Complete text output with detail =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (2) (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Node cluster01 (1): online, feature set <3.15.1
   * Node cluster02 (2): online, feature set <3.15.1
   * GuestNode httpd-bundle-0@cluster01: online
   * GuestNode httpd-bundle-1@cluster02: online
   * GuestNode httpd-bundle-2@: OFFLINE
 
 Active Resources:
   * Clone Set: ping-clone [ping]:
     * ping	(ocf:pacemaker:ping):	 Started cluster02
     * ping	(ocf:pacemaker:ping):	 Started cluster01
   * Fencing	(stonith:fence_xvm):	 Started cluster01
   * dummy	(ocf:pacemaker:Dummy):	 Started cluster02
   * Container bundle set: httpd-bundle [pcmk:http]:
     * Replica[0]
       * httpd-bundle-ip-192.168.122.131	(ocf:heartbeat:IPaddr2):	 Started cluster01
       * httpd	(ocf:heartbeat:apache):	 Started httpd-bundle-0
       * httpd-bundle-docker-0	(ocf:heartbeat:docker):	 Started cluster01
       * httpd-bundle-0	(ocf:pacemaker:remote):	 Started cluster01
     * Replica[1]
       * httpd-bundle-ip-192.168.122.132	(ocf:heartbeat:IPaddr2):	 Started cluster02
       * httpd	(ocf:heartbeat:apache):	 Started httpd-bundle-1
       * httpd-bundle-docker-1	(ocf:heartbeat:docker):	 Started cluster02
       * httpd-bundle-1	(ocf:pacemaker:remote):	 Started cluster02
     * Replica[2]
       * httpd-bundle-ip-192.168.122.133	(ocf:heartbeat:IPaddr2):	 Stopped
       * httpd	(ocf:heartbeat:apache):	 Stopped
       * httpd-bundle-docker-2	(ocf:heartbeat:docker):	 Stopped
       * httpd-bundle-2	(ocf:pacemaker:remote):	 Stopped
   * Resource Group: exim-group:
     * Public-IP	(ocf:heartbeat:IPaddr):	 Started cluster02
     * Email	(lsb:exim):	 Started cluster02
   * Clone Set: mysql-clone-group [mysql-group]:
     * Resource Group: mysql-group:0:
       * mysql-proxy	(lsb:mysql-proxy):	 Started cluster02
     * Resource Group: mysql-group:1:
       * mysql-proxy	(lsb:mysql-proxy):	 Started cluster01
   * Clone Set: promotable-clone [promotable-rsc] (promotable):
-    * promotable-rsc	(ocf:pacemaker:Stateful):	 Promoted cluster02
-    * promotable-rsc	(ocf:pacemaker:Stateful):	 Unpromoted cluster01
-    * promotable-rsc	(ocf:pacemaker:Stateful):	 Stopped
-    * promotable-rsc	(ocf:pacemaker:Stateful):	 Stopped
-    * promotable-rsc	(ocf:pacemaker:Stateful):	 Stopped
+    * promotable-rsc	(ocf:pacemaker:Stateful):	 Promoted cluster02 (test_description)
+    * promotable-rsc	(ocf:pacemaker:Stateful):	 Unpromoted cluster01 (test_description)
+    * promotable-rsc	(ocf:pacemaker:Stateful):	 Stopped (test_description)
+    * promotable-rsc	(ocf:pacemaker:Stateful):	 Stopped (test_description)
+    * promotable-rsc	(ocf:pacemaker:Stateful):	 Stopped (test_description)
 
 Node Attributes:
   * Node: cluster01 (1):
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02 (2):
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02 (2):
     * ping: migration-threshold=1000000:
       * (11) start
       * (12) monitor: interval="10000ms"
     * dummy: migration-threshold=1000000:
       * (18) start
       * (19) monitor: interval="60000ms"
     * Public-IP: migration-threshold=1000000:
       * (2) start
     * Email: migration-threshold=1000000:
       * (2) start
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
     * promotable-rsc: migration-threshold=1000000:
       * (4) monitor: interval="10000ms"
       * (5) cancel: interval="10000ms"
       * (6) promote
       * (7) monitor: interval="5000ms"
     * httpd-bundle-ip-192.168.122.132: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-docker-1: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-1: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="30000ms"
   * Node: cluster01 (1):
     * ping: migration-threshold=1000000:
       * (17) start
       * (18) monitor: interval="10000ms"
     * Fencing: migration-threshold=1000000:
       * (15) start
       * (20) monitor: interval="60000ms"
     * dummy: migration-threshold=1000000:
       * (16) stop
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
     * promotable-rsc: migration-threshold=1000000:
       * (2) start
       * (4) monitor: interval="10000ms"
     * httpd-bundle-ip-192.168.122.131: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-docker-0: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-0: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="30000ms"
   * Node: httpd-bundle-0@cluster01:
     * httpd: migration-threshold=1000000:
       * (1) start
   * Node: httpd-bundle-1@cluster02:
     * httpd: migration-threshold=1000000:
       * (1) start
 
 Negative Location Constraints:
   * not-on-cluster1	prevents dummy from running on cluster01 (1)
 =#=#=#= End test: Complete text output with detail - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete text output with detail
 =#=#=#= Begin test: Complete brief text output =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
 
 Active Resources:
   * 1	(ocf:pacemaker:Dummy):	Active cluster02
   * 1	(stonith:fence_xvm):	Active cluster01
   * Clone Set: ping-clone [ping]:
     * Started: [ cluster01 cluster02 ]
   * Container bundle set: httpd-bundle [pcmk:http]:
     * httpd-bundle-0 (192.168.122.131)	(ocf:heartbeat:apache):	 Started cluster01
     * httpd-bundle-1 (192.168.122.132)	(ocf:heartbeat:apache):	 Started cluster02
     * httpd-bundle-2 (192.168.122.133)	(ocf:heartbeat:apache):	 Stopped
   * Resource Group: exim-group:
     * 1/1	(lsb:exim):	Active cluster02
     * 1/1	(ocf:heartbeat:IPaddr):	Active cluster02
   * Clone Set: mysql-clone-group [mysql-group]:
     * Started: [ cluster01 cluster02 ]
   * Clone Set: promotable-clone [promotable-rsc] (promotable):
     * Promoted: [ cluster02 ]
     * Unpromoted: [ cluster01 ]
 
 Node Attributes:
   * Node: cluster01:
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02:
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02:
     * ping: migration-threshold=1000000:
       * (11) start
       * (12) monitor: interval="10000ms"
     * dummy: migration-threshold=1000000:
       * (18) start
       * (19) monitor: interval="60000ms"
     * Public-IP: migration-threshold=1000000:
       * (2) start
     * Email: migration-threshold=1000000:
       * (2) start
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
     * promotable-rsc: migration-threshold=1000000:
       * (4) monitor: interval="10000ms"
       * (5) cancel: interval="10000ms"
       * (6) promote
       * (7) monitor: interval="5000ms"
     * httpd-bundle-ip-192.168.122.132: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-docker-1: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-1: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="30000ms"
   * Node: cluster01:
     * ping: migration-threshold=1000000:
       * (17) start
       * (18) monitor: interval="10000ms"
     * Fencing: migration-threshold=1000000:
       * (15) start
       * (20) monitor: interval="60000ms"
     * dummy: migration-threshold=1000000:
       * (16) stop
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
     * promotable-rsc: migration-threshold=1000000:
       * (2) start
       * (4) monitor: interval="10000ms"
     * httpd-bundle-ip-192.168.122.131: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-docker-0: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-0: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="30000ms"
   * Node: httpd-bundle-0:
     * httpd: migration-threshold=1000000:
       * (1) start
   * Node: httpd-bundle-1:
     * httpd: migration-threshold=1000000:
       * (1) start
 
 Negative Location Constraints:
   * not-on-cluster1	prevents dummy from running on cluster01
 =#=#=#= End test: Complete brief text output - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete brief text output
 =#=#=#= Begin test: Complete text output grouped by node =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Node cluster01: online:
     * Resources:
       * ping	(ocf:pacemaker:ping):	 Started
       * Fencing	(stonith:fence_xvm):	 Started
       * mysql-proxy	(lsb:mysql-proxy):	 Started
-      * promotable-rsc	(ocf:pacemaker:Stateful):	 Unpromoted
+      * promotable-rsc	(ocf:pacemaker:Stateful):	 Unpromoted (test_description)
       * httpd-bundle-ip-192.168.122.131	(ocf:heartbeat:IPaddr2):	 Started
       * httpd-bundle-docker-0	(ocf:heartbeat:docker):	 Started
   * Node cluster02: online:
     * Resources:
       * ping	(ocf:pacemaker:ping):	 Started
       * dummy	(ocf:pacemaker:Dummy):	 Started
       * Public-IP	(ocf:heartbeat:IPaddr):	 Started
       * Email	(lsb:exim):	 Started
       * mysql-proxy	(lsb:mysql-proxy):	 Started
-      * promotable-rsc	(ocf:pacemaker:Stateful):	 Promoted
+      * promotable-rsc	(ocf:pacemaker:Stateful):	 Promoted (test_description)
       * httpd-bundle-ip-192.168.122.132	(ocf:heartbeat:IPaddr2):	 Started
       * httpd-bundle-docker-1	(ocf:heartbeat:docker):	 Started
   * GuestNode httpd-bundle-0: online:
     * Resources:
       * httpd	(ocf:heartbeat:apache):	 Started
   * GuestNode httpd-bundle-1: online:
     * Resources:
       * httpd	(ocf:heartbeat:apache):	 Started
   * GuestNode httpd-bundle-2: OFFLINE:
     * Resources:
 
 Node Attributes:
   * Node: cluster01:
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02:
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02:
     * ping: migration-threshold=1000000:
       * (11) start
       * (12) monitor: interval="10000ms"
     * dummy: migration-threshold=1000000:
       * (18) start
       * (19) monitor: interval="60000ms"
     * Public-IP: migration-threshold=1000000:
       * (2) start
     * Email: migration-threshold=1000000:
       * (2) start
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
     * promotable-rsc: migration-threshold=1000000:
       * (4) monitor: interval="10000ms"
       * (5) cancel: interval="10000ms"
       * (6) promote
       * (7) monitor: interval="5000ms"
     * httpd-bundle-ip-192.168.122.132: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-docker-1: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-1: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="30000ms"
   * Node: cluster01:
     * ping: migration-threshold=1000000:
       * (17) start
       * (18) monitor: interval="10000ms"
     * Fencing: migration-threshold=1000000:
       * (15) start
       * (20) monitor: interval="60000ms"
     * dummy: migration-threshold=1000000:
       * (16) stop
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
     * promotable-rsc: migration-threshold=1000000:
       * (2) start
       * (4) monitor: interval="10000ms"
     * httpd-bundle-ip-192.168.122.131: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-docker-0: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-0: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="30000ms"
   * Node: httpd-bundle-0:
     * httpd: migration-threshold=1000000:
       * (1) start
   * Node: httpd-bundle-1:
     * httpd: migration-threshold=1000000:
       * (1) start
 
 Negative Location Constraints:
   * not-on-cluster1	prevents dummy from running on cluster01
 =#=#=#= End test: Complete text output grouped by node - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete text output grouped by node
 =#=#=#= Begin test: Complete brief text output grouped by node =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Node cluster01: online:
     * Resources:
       * 1	(lsb:mysql-proxy):	Active 
       * 1	(ocf:heartbeat:IPaddr2):	Active 
       * 1	(ocf:heartbeat:docker):	Active 
       * 1	(ocf:pacemaker:Stateful):	Active 
       * 1	(ocf:pacemaker:ping):	Active 
       * 1	(ocf:pacemaker:remote):	Active 
       * 1	(stonith:fence_xvm):	Active 
   * Node cluster02: online:
     * Resources:
       * 1	(lsb:exim):	Active 
       * 1	(lsb:mysql-proxy):	Active 
       * 1	(ocf:heartbeat:IPaddr):	Active 
       * 1	(ocf:heartbeat:IPaddr2):	Active 
       * 1	(ocf:heartbeat:docker):	Active 
       * 1	(ocf:pacemaker:Dummy):	Active 
       * 1	(ocf:pacemaker:Stateful):	Active 
       * 1	(ocf:pacemaker:ping):	Active 
       * 1	(ocf:pacemaker:remote):	Active 
   * GuestNode httpd-bundle-0: online:
     * Resources:
       * 1	(ocf:heartbeat:apache):	Active 
   * GuestNode httpd-bundle-1: online:
     * Resources:
       * 1	(ocf:heartbeat:apache):	Active 
 
 Node Attributes:
   * Node: cluster01:
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02:
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02:
     * ping: migration-threshold=1000000:
       * (11) start
       * (12) monitor: interval="10000ms"
     * dummy: migration-threshold=1000000:
       * (18) start
       * (19) monitor: interval="60000ms"
     * Public-IP: migration-threshold=1000000:
       * (2) start
     * Email: migration-threshold=1000000:
       * (2) start
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
     * promotable-rsc: migration-threshold=1000000:
       * (4) monitor: interval="10000ms"
       * (5) cancel: interval="10000ms"
       * (6) promote
       * (7) monitor: interval="5000ms"
     * httpd-bundle-ip-192.168.122.132: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-docker-1: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-1: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="30000ms"
   * Node: cluster01:
     * ping: migration-threshold=1000000:
       * (17) start
       * (18) monitor: interval="10000ms"
     * Fencing: migration-threshold=1000000:
       * (15) start
       * (20) monitor: interval="60000ms"
     * dummy: migration-threshold=1000000:
       * (16) stop
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
     * promotable-rsc: migration-threshold=1000000:
       * (2) start
       * (4) monitor: interval="10000ms"
     * httpd-bundle-ip-192.168.122.131: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-docker-0: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-0: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="30000ms"
   * Node: httpd-bundle-0:
     * httpd: migration-threshold=1000000:
       * (1) start
   * Node: httpd-bundle-1:
     * httpd: migration-threshold=1000000:
       * (1) start
 
 Negative Location Constraints:
   * not-on-cluster1	prevents dummy from running on cluster01
 =#=#=#= End test: Complete brief text output grouped by node - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete brief text output grouped by node
 =#=#=#= Begin test: XML output grouped by node =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon -1 --output-as=xml --group-by-node">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member">
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
       <resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
       <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
-      <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Unpromoted" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+      <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Unpromoted" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1" description="test_description">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
       <resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
       <resource id="httpd-bundle-docker-0" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
       <resource id="httpd-bundle-0" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
     </node>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member">
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="dummy" resource_agent="ocf:pacemaker:Dummy" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="Public-IP" resource_agent="ocf:heartbeat:IPaddr" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="Email" resource_agent="lsb:exim" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
-      <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Promoted" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+      <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Promoted" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1" description="test_description">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="httpd-bundle-docker-1" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="httpd-bundle-1" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
     </node>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0">
       <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="httpd-bundle-0" id="httpd-bundle-0" cached="true"/>
       </resource>
     </node>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1">
       <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="httpd-bundle-1" id="httpd-bundle-1" cached="true"/>
       </resource>
     </node>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <clone id="inactive-clone" multi_state="false" unique="false" maintenance="false" managed="true" disabled="true" failed="false" failure_ignored="false" target_role="stopped">
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
     <group id="inactive-group" number_resources="2" maintenance="false" managed="true" disabled="true">
       <resource id="inactive-dummy-1" resource_agent="ocf:pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dummy-2" resource_agent="ocf:pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </group>
     <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" maintenance="false" managed="true" failed="false">
       <replica id="0">
         <resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="httpd-bundle-0" id="httpd-bundle-0" cached="true"/>
         </resource>
         <resource id="httpd-bundle-docker-0" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
         <resource id="httpd-bundle-0" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </replica>
       <replica id="1">
         <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="httpd-bundle-1" id="httpd-bundle-1" cached="true"/>
         </resource>
         <resource id="httpd-bundle-docker-1" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
         <resource id="httpd-bundle-1" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </replica>
       <replica id="2">
         <resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf:heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-docker-2" resource_agent="ocf:heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-2" resource_agent="ocf:pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </replica>
     </bundle>
     <clone id="mysql-clone-group" multi_state="false" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <group id="mysql-group:0" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:1" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:2" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:3" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:4" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
     </clone>
     <clone id="promotable-clone" multi_state="true" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Promoted" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Unpromoted" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="11" task="start" rc="0" rc_text="ok" exec-time="2044ms" queue-time="0ms"/>
         <operation_history call="12" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2031ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="dummy" orphan="false" migration-threshold="1000000">
         <operation_history call="18" task="start" rc="0" rc_text="ok" exec-time="6020ms" queue-time="0ms"/>
         <operation_history call="19" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="6015ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Public-IP" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Email" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="promotable-rsc" orphan="false" migration-threshold="1000000">
         <operation_history call="4" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="5" task="cancel" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="6" task="promote" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="7" task="monitor" rc="8" rc_text="promoted" interval="5000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-ip-192.168.122.132" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="17" task="start" rc="0" rc_text="ok" exec-time="2038ms" queue-time="0ms"/>
         <operation_history call="18" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2034ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Fencing" orphan="false" migration-threshold="1000000">
         <operation_history call="15" task="start" rc="0" rc_text="ok" exec-time="36ms" queue-time="0ms"/>
         <operation_history call="20" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="dummy" orphan="false" migration-threshold="1000000">
         <operation_history call="16" task="stop" rc="0" rc_text="ok" exec-time="6048ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="promotable-rsc" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="4" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-ip-192.168.122.131" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-0">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-1">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <bans>
     <ban id="not-on-cluster1" resource="dummy" node="cluster01" weight="-1000000" promoted-only="false" master_only="false"/>
   </bans>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output grouped by node - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output grouped by node
 =#=#=#= Begin test: Complete text output filtered by node =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 ]
 
 Active Resources:
   * Clone Set: ping-clone [ping]:
     * Started: [ cluster01 ]
   * Fencing	(stonith:fence_xvm):	 Started cluster01
   * Container bundle set: httpd-bundle [pcmk:http]:
     * httpd-bundle-0 (192.168.122.131)	(ocf:heartbeat:apache):	 Started cluster01
     * httpd-bundle-2 (192.168.122.133)	(ocf:heartbeat:apache):	 Stopped
   * Clone Set: mysql-clone-group [mysql-group]:
     * Started: [ cluster01 ]
   * Clone Set: promotable-clone [promotable-rsc] (promotable):
     * Unpromoted: [ cluster01 ]
 
 Node Attributes:
   * Node: cluster01:
     * location                        	: office    
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster01:
     * ping: migration-threshold=1000000:
       * (17) start
       * (18) monitor: interval="10000ms"
     * Fencing: migration-threshold=1000000:
       * (15) start
       * (20) monitor: interval="60000ms"
     * dummy: migration-threshold=1000000:
       * (16) stop
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
     * promotable-rsc: migration-threshold=1000000:
       * (2) start
       * (4) monitor: interval="10000ms"
     * httpd-bundle-ip-192.168.122.131: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-docker-0: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-0: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="30000ms"
 
 Negative Location Constraints:
   * not-on-cluster1	prevents dummy from running on cluster01
 =#=#=#= End test: Complete text output filtered by node - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete text output filtered by node
 =#=#=#= Begin test: XML output filtered by node =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as xml --include=all --node=cluster01">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
   </nodes>
   <resources>
     <clone id="ping-clone" multi_state="false" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
     </clone>
     <resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
       <node name="cluster01" id="1" cached="true"/>
     </resource>
     <clone id="inactive-clone" multi_state="false" unique="false" maintenance="false" managed="true" disabled="true" failed="false" failure_ignored="false" target_role="stopped">
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
     <group id="inactive-group" number_resources="2" maintenance="false" managed="true" disabled="true">
       <resource id="inactive-dummy-1" resource_agent="ocf:pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dummy-2" resource_agent="ocf:pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </group>
     <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" maintenance="false" managed="true" failed="false">
       <replica id="0">
         <resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="httpd-bundle-0" id="httpd-bundle-0" cached="true"/>
         </resource>
         <resource id="httpd-bundle-docker-0" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
         <resource id="httpd-bundle-0" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </replica>
       <replica id="2">
         <resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf:heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-docker-2" resource_agent="ocf:heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-2" resource_agent="ocf:pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </replica>
     </bundle>
     <clone id="mysql-clone-group" multi_state="false" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <group id="mysql-group:1" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:2" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:3" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:4" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
     </clone>
     <clone id="promotable-clone" multi_state="true" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Unpromoted" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster01">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="17" task="start" rc="0" rc_text="ok" exec-time="2038ms" queue-time="0ms"/>
         <operation_history call="18" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2034ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Fencing" orphan="false" migration-threshold="1000000">
         <operation_history call="15" task="start" rc="0" rc_text="ok" exec-time="36ms" queue-time="0ms"/>
         <operation_history call="20" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="dummy" orphan="false" migration-threshold="1000000">
         <operation_history call="16" task="stop" rc="0" rc_text="ok" exec-time="6048ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="promotable-rsc" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="4" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-ip-192.168.122.131" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <bans>
     <ban id="not-on-cluster1" resource="dummy" node="cluster01" weight="-1000000" promoted-only="false" master_only="false"/>
   </bans>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output filtered by node - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output filtered by node
 =#=#=#= Begin test: Complete text output filtered by tag =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster02 ]
 
 Active Resources:
   * Clone Set: ping-clone [ping]:
     * Started: [ cluster02 ]
   * dummy	(ocf:pacemaker:Dummy):	 Started cluster02
   * Container bundle set: httpd-bundle [pcmk:http]:
     * httpd-bundle-1 (192.168.122.132)	(ocf:heartbeat:apache):	 Started cluster02
     * httpd-bundle-2 (192.168.122.133)	(ocf:heartbeat:apache):	 Stopped
   * Resource Group: exim-group:
     * Public-IP	(ocf:heartbeat:IPaddr):	 Started cluster02
     * Email	(lsb:exim):	 Started cluster02
   * Clone Set: mysql-clone-group [mysql-group]:
     * Started: [ cluster02 ]
   * Clone Set: promotable-clone [promotable-rsc] (promotable):
     * Promoted: [ cluster02 ]
 
 Node Attributes:
   * Node: cluster02:
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02:
     * ping: migration-threshold=1000000:
       * (11) start
       * (12) monitor: interval="10000ms"
     * dummy: migration-threshold=1000000:
       * (18) start
       * (19) monitor: interval="60000ms"
     * Public-IP: migration-threshold=1000000:
       * (2) start
     * Email: migration-threshold=1000000:
       * (2) start
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
     * promotable-rsc: migration-threshold=1000000:
       * (4) monitor: interval="10000ms"
       * (5) cancel: interval="10000ms"
       * (6) promote
       * (7) monitor: interval="5000ms"
     * httpd-bundle-ip-192.168.122.132: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-docker-1: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-1: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="30000ms"
 
 Negative Location Constraints:
   * not-on-cluster1	prevents dummy from running on cluster01
 =#=#=#= End test: Complete text output filtered by tag - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete text output filtered by tag
 =#=#=#= Begin test: XML output filtered by tag =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --include=all --node=even-nodes">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
   </nodes>
   <resources>
     <clone id="ping-clone" multi_state="false" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
     </clone>
     <resource id="dummy" resource_agent="ocf:pacemaker:Dummy" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
       <node name="cluster02" id="2" cached="true"/>
     </resource>
     <clone id="inactive-clone" multi_state="false" unique="false" maintenance="false" managed="true" disabled="true" failed="false" failure_ignored="false" target_role="stopped">
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
     <group id="inactive-group" number_resources="2" maintenance="false" managed="true" disabled="true">
       <resource id="inactive-dummy-1" resource_agent="ocf:pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dummy-2" resource_agent="ocf:pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </group>
     <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" maintenance="false" managed="true" failed="false">
       <replica id="1">
         <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="httpd-bundle-1" id="httpd-bundle-1" cached="true"/>
         </resource>
         <resource id="httpd-bundle-docker-1" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
         <resource id="httpd-bundle-1" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </replica>
       <replica id="2">
         <resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf:heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-docker-2" resource_agent="ocf:heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-2" resource_agent="ocf:pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </replica>
     </bundle>
     <group id="exim-group" number_resources="2" maintenance="false" managed="true" disabled="false">
       <resource id="Public-IP" resource_agent="ocf:heartbeat:IPaddr" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="Email" resource_agent="lsb:exim" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
     </group>
     <clone id="mysql-clone-group" multi_state="false" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <group id="mysql-group:0" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:2" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:3" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:4" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
     </clone>
     <clone id="promotable-clone" multi_state="true" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Promoted" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
   </resources>
   <node_attributes>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="11" task="start" rc="0" rc_text="ok" exec-time="2044ms" queue-time="0ms"/>
         <operation_history call="12" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2031ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="dummy" orphan="false" migration-threshold="1000000">
         <operation_history call="18" task="start" rc="0" rc_text="ok" exec-time="6020ms" queue-time="0ms"/>
         <operation_history call="19" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="6015ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Public-IP" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Email" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="promotable-rsc" orphan="false" migration-threshold="1000000">
         <operation_history call="4" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="5" task="cancel" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="6" task="promote" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="7" task="monitor" rc="8" rc_text="promoted" interval="5000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-ip-192.168.122.132" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <bans>
     <ban id="not-on-cluster1" resource="dummy" node="cluster01" weight="-1000000" promoted-only="false" master_only="false"/>
   </bans>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output filtered by tag - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output filtered by tag
 =#=#=#= Begin test: Complete text output filtered by resource tag =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
 
 Active Resources:
   * Fencing	(stonith:fence_xvm):	 Started cluster01
 
 Node Attributes:
   * Node: cluster01:
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02:
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster01:
     * Fencing: migration-threshold=1000000:
       * (15) start
       * (20) monitor: interval="60000ms"
 =#=#=#= End test: Complete text output filtered by resource tag - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete text output filtered by resource tag
 =#=#=#= Begin test: XML output filtered by resource tag =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --include=all --resource=fencing-rscs">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
       <node name="cluster01" id="1" cached="true"/>
     </resource>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster01">
       <resource_history id="Fencing" orphan="false" migration-threshold="1000000">
         <operation_history call="15" task="start" rc="0" rc_text="ok" exec-time="36ms" queue-time="0ms"/>
         <operation_history call="20" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output filtered by resource tag - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output filtered by resource tag
 =#=#=#= Begin test: Basic text output filtered by node that doesn't exist =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Active Resources:
   * No active resources
 =#=#=#= End test: Basic text output filtered by node that doesn't exist - OK (0) =#=#=#=
 * Passed: crm_mon        - Basic text output filtered by node that doesn't exist
 =#=#=#= Begin test: XML output filtered by node that doesn't exist =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --node=blah">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes/>
   <resources>
     <clone id="inactive-clone" multi_state="false" unique="false" maintenance="false" managed="true" disabled="true" failed="false" failure_ignored="false" target_role="stopped">
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
     <group id="inactive-group" number_resources="2" maintenance="false" managed="true" disabled="true">
       <resource id="inactive-dummy-1" resource_agent="ocf:pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dummy-2" resource_agent="ocf:pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </group>
   </resources>
   <bans>
     <ban id="not-on-cluster1" resource="dummy" node="cluster01" weight="-1000000" promoted-only="false" master_only="false"/>
   </bans>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output filtered by node that doesn't exist - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output filtered by node that doesn't exist
 =#=#=#= Begin test: Basic text output with inactive resources =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
 
 Full List of Resources:
   * Clone Set: ping-clone [ping]:
     * Started: [ cluster01 cluster02 ]
   * Fencing	(stonith:fence_xvm):	 Started cluster01
   * dummy	(ocf:pacemaker:Dummy):	 Started cluster02
   * Clone Set: inactive-clone [inactive-dhcpd] (disabled):
     * Stopped (disabled): [ cluster01 cluster02 ]
   * Resource Group: inactive-group (disabled):
     * inactive-dummy-1	(ocf:pacemaker:Dummy):	 Stopped (disabled)
     * inactive-dummy-2	(ocf:pacemaker:Dummy):	 Stopped (disabled)
   * Container bundle set: httpd-bundle [pcmk:http]:
     * httpd-bundle-0 (192.168.122.131)	(ocf:heartbeat:apache):	 Started cluster01
     * httpd-bundle-1 (192.168.122.132)	(ocf:heartbeat:apache):	 Started cluster02
     * httpd-bundle-2 (192.168.122.133)	(ocf:heartbeat:apache):	 Stopped
   * Resource Group: exim-group:
     * Public-IP	(ocf:heartbeat:IPaddr):	 Started cluster02
     * Email	(lsb:exim):	 Started cluster02
   * Clone Set: mysql-clone-group [mysql-group]:
     * Started: [ cluster01 cluster02 ]
   * Clone Set: promotable-clone [promotable-rsc] (promotable):
     * Promoted: [ cluster02 ]
     * Unpromoted: [ cluster01 ]
 =#=#=#= End test: Basic text output with inactive resources - OK (0) =#=#=#=
 * Passed: crm_mon        - Basic text output with inactive resources
 =#=#=#= Begin test: Basic text output with inactive resources, filtered by node =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster02 ]
 
 Full List of Resources:
   * Clone Set: ping-clone [ping]:
     * Started: [ cluster02 ]
   * dummy	(ocf:pacemaker:Dummy):	 Started cluster02
   * Clone Set: inactive-clone [inactive-dhcpd] (disabled):
     * Stopped (disabled): [ cluster02 ]
   * Resource Group: inactive-group (disabled):
     * inactive-dummy-1	(ocf:pacemaker:Dummy):	 Stopped (disabled)
     * inactive-dummy-2	(ocf:pacemaker:Dummy):	 Stopped (disabled)
   * Container bundle set: httpd-bundle [pcmk:http]:
     * httpd-bundle-1 (192.168.122.132)	(ocf:heartbeat:apache):	 Started cluster02
     * httpd-bundle-2 (192.168.122.133)	(ocf:heartbeat:apache):	 Stopped
   * Resource Group: exim-group:
     * Public-IP	(ocf:heartbeat:IPaddr):	 Started cluster02
     * Email	(lsb:exim):	 Started cluster02
   * Clone Set: mysql-clone-group [mysql-group]:
     * Started: [ cluster02 ]
   * Clone Set: promotable-clone [promotable-rsc] (promotable):
     * Promoted: [ cluster02 ]
 =#=#=#= End test: Basic text output with inactive resources, filtered by node - OK (0) =#=#=#=
 * Passed: crm_mon        - Basic text output with inactive resources, filtered by node
 =#=#=#= Begin test: Complete text output filtered by primitive resource =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
 
 Active Resources:
   * Fencing	(stonith:fence_xvm):	 Started cluster01
 
 Node Attributes:
   * Node: cluster01:
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02:
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster01:
     * Fencing: migration-threshold=1000000:
       * (15) start
       * (20) monitor: interval="60000ms"
 =#=#=#= End test: Complete text output filtered by primitive resource - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete text output filtered by primitive resource
 =#=#=#= Begin test: XML output filtered by primitive resource =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=Fencing">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
       <node name="cluster01" id="1" cached="true"/>
     </resource>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster01">
       <resource_history id="Fencing" orphan="false" migration-threshold="1000000">
         <operation_history call="15" task="start" rc="0" rc_text="ok" exec-time="36ms" queue-time="0ms"/>
         <operation_history call="20" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output filtered by primitive resource - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output filtered by primitive resource
 =#=#=#= Begin test: Complete text output filtered by group resource =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
 
 Active Resources:
   * Resource Group: exim-group:
     * Public-IP	(ocf:heartbeat:IPaddr):	 Started cluster02
     * Email	(lsb:exim):	 Started cluster02
 
 Node Attributes:
   * Node: cluster01:
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02:
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02:
     * Public-IP: migration-threshold=1000000:
       * (2) start
     * Email: migration-threshold=1000000:
       * (2) start
 =#=#=#= End test: Complete text output filtered by group resource - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete text output filtered by group resource
 =#=#=#= Begin test: XML output filtered by group resource =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=exim-group">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <group id="exim-group" number_resources="2" maintenance="false" managed="true" disabled="false">
       <resource id="Public-IP" resource_agent="ocf:heartbeat:IPaddr" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="Email" resource_agent="lsb:exim" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
     </group>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="Public-IP" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Email" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output filtered by group resource - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output filtered by group resource
 =#=#=#= Begin test: Complete text output filtered by group resource member =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
 
 Active Resources:
   * Resource Group: exim-group:
     * Public-IP	(ocf:heartbeat:IPaddr):	 Started cluster02
 
 Node Attributes:
   * Node: cluster01:
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02:
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02:
     * Public-IP: migration-threshold=1000000:
       * (2) start
 =#=#=#= End test: Complete text output filtered by group resource member - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete text output filtered by group resource member
 =#=#=#= Begin test: XML output filtered by group resource member =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=Email">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <group id="exim-group" number_resources="1" maintenance="false" managed="true" disabled="false">
       <resource id="Email" resource_agent="lsb:exim" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
     </group>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="Email" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output filtered by group resource member - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output filtered by group resource member
 =#=#=#= Begin test: Complete text output filtered by clone resource =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
 
 Active Resources:
   * Clone Set: ping-clone [ping]:
     * Started: [ cluster01 cluster02 ]
 
 Node Attributes:
   * Node: cluster01:
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02:
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02:
     * ping: migration-threshold=1000000:
       * (11) start
       * (12) monitor: interval="10000ms"
   * Node: cluster01:
     * ping: migration-threshold=1000000:
       * (17) start
       * (18) monitor: interval="10000ms"
 =#=#=#= End test: Complete text output filtered by clone resource - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete text output filtered by clone resource
 =#=#=#= Begin test: XML output filtered by clone resource =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=ping-clone">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <clone id="ping-clone" multi_state="false" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
     </clone>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="11" task="start" rc="0" rc_text="ok" exec-time="2044ms" queue-time="0ms"/>
         <operation_history call="12" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2031ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="17" task="start" rc="0" rc_text="ok" exec-time="2038ms" queue-time="0ms"/>
         <operation_history call="18" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2034ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output filtered by clone resource - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output filtered by clone resource
 =#=#=#= Begin test: Complete text output filtered by clone resource instance =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
 
 Active Resources:
   * Clone Set: ping-clone [ping]:
     * Started: [ cluster01 cluster02 ]
 
 Node Attributes:
   * Node: cluster01:
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02:
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02:
     * ping: migration-threshold=1000000:
       * (11) start
       * (12) monitor: interval="10000ms"
   * Node: cluster01:
     * ping: migration-threshold=1000000:
       * (17) start
       * (18) monitor: interval="10000ms"
 =#=#=#= End test: Complete text output filtered by clone resource instance - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete text output filtered by clone resource instance
 =#=#=#= Begin test: XML output filtered by clone resource instance =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=ping">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <clone id="ping-clone" multi_state="false" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
     </clone>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="11" task="start" rc="0" rc_text="ok" exec-time="2044ms" queue-time="0ms"/>
         <operation_history call="12" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2031ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="17" task="start" rc="0" rc_text="ok" exec-time="2038ms" queue-time="0ms"/>
         <operation_history call="18" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2034ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output filtered by clone resource instance - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output filtered by clone resource instance
 =#=#=#= Begin test: Complete text output filtered by exact clone resource instance =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (2) (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Node cluster01 (1): online, feature set <3.15.1
   * Node cluster02 (2): online, feature set <3.15.1
   * GuestNode httpd-bundle-0@cluster01: online
   * GuestNode httpd-bundle-1@cluster02: online
   * GuestNode httpd-bundle-2@: OFFLINE
 
 Active Resources:
   * Clone Set: ping-clone [ping]:
     * ping	(ocf:pacemaker:ping):	 Started cluster02
 
 Node Attributes:
   * Node: cluster01 (1):
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02 (2):
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02 (2):
     * ping: migration-threshold=1000000:
       * (11) start
       * (12) monitor: interval="10000ms"
   * Node: cluster01 (1):
     * ping: migration-threshold=1000000:
       * (17) start
       * (18) monitor: interval="10000ms"
 =#=#=#= End test: Complete text output filtered by exact clone resource instance - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete text output filtered by exact clone resource instance
 =#=#=#= Begin test: XML output filtered by exact clone resource instance =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=ping:1">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <clone id="ping-clone" multi_state="false" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
     </clone>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="11" task="start" rc="0" rc_text="ok" exec-time="2044ms" queue-time="0ms"/>
         <operation_history call="12" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2031ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="17" task="start" rc="0" rc_text="ok" exec-time="2038ms" queue-time="0ms"/>
         <operation_history call="18" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2034ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output filtered by exact clone resource instance - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output filtered by exact clone resource instance
 =#=#=#= Begin test: Basic text output filtered by resource that doesn't exist =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
 
 Active Resources:
   * No active resources
 =#=#=#= End test: Basic text output filtered by resource that doesn't exist - OK (0) =#=#=#=
 * Passed: crm_mon        - Basic text output filtered by resource that doesn't exist
 =#=#=#= Begin test: XML output filtered by resource that doesn't exist =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=blah">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources/>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history/>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output filtered by resource that doesn't exist - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output filtered by resource that doesn't exist
 =#=#=#= Begin test: Basic text output with inactive resources, filtered by tag =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
 
 Full List of Resources:
   * Clone Set: inactive-clone [inactive-dhcpd] (disabled):
     * Stopped (disabled): [ cluster01 cluster02 ]
   * Resource Group: inactive-group (disabled):
     * inactive-dummy-1	(ocf:pacemaker:Dummy):	 Stopped (disabled)
     * inactive-dummy-2	(ocf:pacemaker:Dummy):	 Stopped (disabled)
 =#=#=#= End test: Basic text output with inactive resources, filtered by tag - OK (0) =#=#=#=
 * Passed: crm_mon        - Basic text output with inactive resources, filtered by tag
 =#=#=#= Begin test: Basic text output with inactive resources, filtered by bundle resource =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
 
 Full List of Resources:
   * Container bundle set: httpd-bundle [pcmk:http]:
     * httpd-bundle-0 (192.168.122.131)	(ocf:heartbeat:apache):	 Started cluster01
     * httpd-bundle-1 (192.168.122.132)	(ocf:heartbeat:apache):	 Started cluster02
     * httpd-bundle-2 (192.168.122.133)	(ocf:heartbeat:apache):	 Stopped
 =#=#=#= End test: Basic text output with inactive resources, filtered by bundle resource - OK (0) =#=#=#=
 * Passed: crm_mon        - Basic text output with inactive resources, filtered by bundle resource
 =#=#=#= Begin test: XML output filtered by inactive bundle resource =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=httpd-bundle">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" maintenance="false" managed="true" failed="false">
       <replica id="0">
         <resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="httpd-bundle-0" id="httpd-bundle-0" cached="true"/>
         </resource>
         <resource id="httpd-bundle-docker-0" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
         <resource id="httpd-bundle-0" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </replica>
       <replica id="1">
         <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="httpd-bundle-1" id="httpd-bundle-1" cached="true"/>
         </resource>
         <resource id="httpd-bundle-docker-1" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
         <resource id="httpd-bundle-1" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </replica>
       <replica id="2">
         <resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf:heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-docker-2" resource_agent="ocf:heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-2" resource_agent="ocf:pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </replica>
     </bundle>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="httpd-bundle-ip-192.168.122.132" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="httpd-bundle-ip-192.168.122.131" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-0">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-1">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output filtered by inactive bundle resource - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output filtered by inactive bundle resource
 =#=#=#= Begin test: Basic text output with inactive resources, filtered by bundled IP address resource =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
 
 Full List of Resources:
   * Container bundle set: httpd-bundle [pcmk:http]:
     * Replica[0]
       * httpd-bundle-ip-192.168.122.131	(ocf:heartbeat:IPaddr2):	 Started cluster01
 =#=#=#= End test: Basic text output with inactive resources, filtered by bundled IP address resource - OK (0) =#=#=#=
 * Passed: crm_mon        - Basic text output with inactive resources, filtered by bundled IP address resource
 =#=#=#= Begin test: XML output filtered by bundled IP address resource =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=httpd-bundle-ip-192.168.122.132">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" maintenance="false" managed="true" failed="false">
       <replica id="1">
         <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </replica>
     </bundle>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="httpd-bundle-ip-192.168.122.132" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="httpd-bundle-ip-192.168.122.131" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-0">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-1">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output filtered by bundled IP address resource - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output filtered by bundled IP address resource
 =#=#=#= Begin test: Basic text output with inactive resources, filtered by bundled container =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
 
 Full List of Resources:
   * Container bundle set: httpd-bundle [pcmk:http]:
     * Replica[1]
       * httpd-bundle-docker-1	(ocf:heartbeat:docker):	 Started cluster02
 =#=#=#= End test: Basic text output with inactive resources, filtered by bundled container - OK (0) =#=#=#=
 * Passed: crm_mon        - Basic text output with inactive resources, filtered by bundled container
 =#=#=#= Begin test: XML output filtered by bundled container =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=httpd-bundle-docker-2">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" maintenance="false" managed="true" failed="false">
       <replica id="2">
         <resource id="httpd-bundle-docker-2" resource_agent="ocf:heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </replica>
     </bundle>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="httpd-bundle-ip-192.168.122.132" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="httpd-bundle-ip-192.168.122.131" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-0">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-1">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output filtered by bundled container - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output filtered by bundled container
 =#=#=#= Begin test: Basic text output with inactive resources, filtered by bundle connection =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
 
 Full List of Resources:
   * Container bundle set: httpd-bundle [pcmk:http]:
     * Replica[0]
       * httpd-bundle-0	(ocf:pacemaker:remote):	 Started cluster01
 =#=#=#= End test: Basic text output with inactive resources, filtered by bundle connection - OK (0) =#=#=#=
 * Passed: crm_mon        - Basic text output with inactive resources, filtered by bundle connection
 =#=#=#= Begin test: XML output filtered by bundle connection =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=httpd-bundle-0">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" maintenance="false" managed="true" failed="false">
       <replica id="0">
         <resource id="httpd-bundle-0" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </replica>
     </bundle>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="httpd-bundle-ip-192.168.122.132" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="httpd-bundle-ip-192.168.122.131" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-0">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-1">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output filtered by bundle connection - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output filtered by bundle connection
 =#=#=#= Begin test: Basic text output with inactive resources, filtered by bundled primitive resource =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
 
 Full List of Resources:
   * Container bundle set: httpd-bundle [pcmk:http]:
     * Replica[0]
       * httpd	(ocf:heartbeat:apache):	 Started httpd-bundle-0
     * Replica[1]
       * httpd	(ocf:heartbeat:apache):	 Started httpd-bundle-1
     * Replica[2]
       * httpd	(ocf:heartbeat:apache):	 Stopped
 =#=#=#= End test: Basic text output with inactive resources, filtered by bundled primitive resource - OK (0) =#=#=#=
 * Passed: crm_mon        - Basic text output with inactive resources, filtered by bundled primitive resource
 =#=#=#= Begin test: XML output filtered by bundled primitive resource =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=httpd">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" maintenance="false" managed="true" failed="false">
       <replica id="0">
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="httpd-bundle-0" id="httpd-bundle-0" cached="true"/>
         </resource>
       </replica>
       <replica id="1">
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="httpd-bundle-1" id="httpd-bundle-1" cached="true"/>
         </resource>
       </replica>
       <replica id="2">
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </replica>
     </bundle>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="httpd-bundle-ip-192.168.122.132" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="httpd-bundle-ip-192.168.122.131" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-0">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-1">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output filtered by bundled primitive resource - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output filtered by bundled primitive resource
 =#=#=#= Begin test: Complete text output, filtered by clone name in cloned group =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (2) (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Node cluster01 (1): online, feature set <3.15.1
   * Node cluster02 (2): online, feature set <3.15.1
   * GuestNode httpd-bundle-0@cluster01: online
   * GuestNode httpd-bundle-1@cluster02: online
   * GuestNode httpd-bundle-2@: OFFLINE
 
 Active Resources:
   * Clone Set: mysql-clone-group [mysql-group]:
     * Resource Group: mysql-group:0:
       * mysql-proxy	(lsb:mysql-proxy):	 Started cluster02
     * Resource Group: mysql-group:1:
       * mysql-proxy	(lsb:mysql-proxy):	 Started cluster01
 
 Node Attributes:
   * Node: cluster01 (1):
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02 (2):
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02 (2):
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
   * Node: cluster01 (1):
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
 =#=#=#= End test: Complete text output, filtered by clone name in cloned group - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete text output, filtered by clone name in cloned group
 =#=#=#= Begin test: XML output, filtered by clone name in cloned group =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=mysql-clone-group">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <clone id="mysql-clone-group" multi_state="false" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <group id="mysql-group:0" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:1" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:2" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:3" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:4" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
     </clone>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output, filtered by clone name in cloned group - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output, filtered by clone name in cloned group
 =#=#=#= Begin test: Complete text output, filtered by group name in cloned group =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (2) (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Node cluster01 (1): online, feature set <3.15.1
   * Node cluster02 (2): online, feature set <3.15.1
   * GuestNode httpd-bundle-0@cluster01: online
   * GuestNode httpd-bundle-1@cluster02: online
   * GuestNode httpd-bundle-2@: OFFLINE
 
 Active Resources:
   * Clone Set: mysql-clone-group [mysql-group]:
     * Resource Group: mysql-group:0:
       * mysql-proxy	(lsb:mysql-proxy):	 Started cluster02
     * Resource Group: mysql-group:1:
       * mysql-proxy	(lsb:mysql-proxy):	 Started cluster01
 
 Node Attributes:
   * Node: cluster01 (1):
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02 (2):
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02 (2):
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
   * Node: cluster01 (1):
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
 =#=#=#= End test: Complete text output, filtered by group name in cloned group - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete text output, filtered by group name in cloned group
 =#=#=#= Begin test: XML output, filtered by group name in cloned group =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=mysql-group">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <clone id="mysql-clone-group" multi_state="false" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <group id="mysql-group:0" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:1" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:2" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:3" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:4" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
     </clone>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output, filtered by group name in cloned group - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output, filtered by group name in cloned group
 =#=#=#= Begin test: Complete text output, filtered by exact group instance name in cloned group =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (2) (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Node cluster01 (1): online, feature set <3.15.1
   * Node cluster02 (2): online, feature set <3.15.1
   * GuestNode httpd-bundle-0@cluster01: online
   * GuestNode httpd-bundle-1@cluster02: online
   * GuestNode httpd-bundle-2@: OFFLINE
 
 Active Resources:
   * Clone Set: mysql-clone-group [mysql-group]:
     * Resource Group: mysql-group:1:
       * mysql-proxy	(lsb:mysql-proxy):	 Started cluster01
 
 Node Attributes:
   * Node: cluster01 (1):
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02 (2):
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02 (2):
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
   * Node: cluster01 (1):
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
 =#=#=#= End test: Complete text output, filtered by exact group instance name in cloned group - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete text output, filtered by exact group instance name in cloned group
 =#=#=#= Begin test: XML output, filtered by exact group instance name in cloned group =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=mysql-group:1">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <clone id="mysql-clone-group" multi_state="false" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <group id="mysql-group:1" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </group>
     </clone>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output, filtered by exact group instance name in cloned group - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output, filtered by exact group instance name in cloned group
 =#=#=#= Begin test: Complete text output, filtered by primitive name in cloned group =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (2) (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Node cluster01 (1): online, feature set <3.15.1
   * Node cluster02 (2): online, feature set <3.15.1
   * GuestNode httpd-bundle-0@cluster01: online
   * GuestNode httpd-bundle-1@cluster02: online
   * GuestNode httpd-bundle-2@: OFFLINE
 
 Active Resources:
   * Clone Set: mysql-clone-group [mysql-group]:
     * Resource Group: mysql-group:0:
       * mysql-proxy	(lsb:mysql-proxy):	 Started cluster02
     * Resource Group: mysql-group:1:
       * mysql-proxy	(lsb:mysql-proxy):	 Started cluster01
 
 Node Attributes:
   * Node: cluster01 (1):
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02 (2):
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02 (2):
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
   * Node: cluster01 (1):
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
 =#=#=#= End test: Complete text output, filtered by primitive name in cloned group - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete text output, filtered by primitive name in cloned group
 =#=#=#= Begin test: XML output, filtered by primitive name in cloned group =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=mysql-proxy">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <clone id="mysql-clone-group" multi_state="false" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <group id="mysql-group:0" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:1" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:2" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:3" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:4" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
     </clone>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output, filtered by primitive name in cloned group - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output, filtered by primitive name in cloned group
 =#=#=#= Begin test: Complete text output, filtered by exact primitive instance name in cloned group =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (2) (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Node cluster01 (1): online, feature set <3.15.1
   * Node cluster02 (2): online, feature set <3.15.1
   * GuestNode httpd-bundle-0@cluster01: online
   * GuestNode httpd-bundle-1@cluster02: online
   * GuestNode httpd-bundle-2@: OFFLINE
 
 Active Resources:
   * Clone Set: mysql-clone-group [mysql-group]:
     * Resource Group: mysql-group:1:
       * mysql-proxy	(lsb:mysql-proxy):	 Started cluster01
 
 Node Attributes:
   * Node: cluster01 (1):
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02 (2):
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02 (2):
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
   * Node: cluster01 (1):
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
 =#=#=#= End test: Complete text output, filtered by exact primitive instance name in cloned group - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete text output, filtered by exact primitive instance name in cloned group
 =#=#=#= Begin test: XML output, filtered by exact primitive instance name in cloned group =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=mysql-proxy:1">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <clone id="mysql-clone-group" multi_state="false" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <group id="mysql-group:1" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </group>
     </clone>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output, filtered by exact primitive instance name in cloned group - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output, filtered by exact primitive instance name in cloned group
 =#=#=#= Begin test: Text output of partially active resources =#=#=#=
 unpack_rsc_op 	error: Preventing dummy-2 from restarting on cluster02 because of hard failure (unimplemented feature)| rc=3 id=dummy-2_last_failure_0
 unpack_rsc_op 	error: Preventing httpd-bundle-clone from restarting on httpd-bundle-1 because of hard failure (invalid parameter)| rc=2 id=httpd_last_failure_0
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (2) (version) - partition with quorum
   * Last updated:
   * Last change:
   * 4 nodes configured
   * 16 resource instances configured (1 DISABLED)
 
 Node List:
   * Node cluster01 (1): online, feature set <3.15.1
   * Node cluster02 (2): online, feature set <3.15.1
   * GuestNode httpd-bundle-0@cluster02: online
   * GuestNode httpd-bundle-1@cluster01: online
 
 Active Resources:
   * Clone Set: ping-clone [ping]:
     * ping	(ocf:pacemaker:ping):	 Started cluster01
     * ping	(ocf:pacemaker:ping):	 Stopped (not installed) 
   * Fencing	(stonith:fence_xvm):	 Started cluster01
   * Container bundle set: httpd-bundle [pcmk:http]:
     * Replica[0]
       * httpd-bundle-ip-192.168.122.131	(ocf:heartbeat:IPaddr2):	 Started cluster02
       * httpd	(ocf:heartbeat:apache):	 Started httpd-bundle-0
       * httpd-bundle-docker-0	(ocf:heartbeat:docker):	 Started cluster02
       * httpd-bundle-0	(ocf:pacemaker:remote):	 Started cluster02
     * Replica[1]
       * httpd-bundle-ip-192.168.122.132	(ocf:heartbeat:IPaddr2):	 Started cluster01
       * httpd	(ocf:heartbeat:apache):	 FAILED httpd-bundle-1
       * httpd-bundle-docker-1	(ocf:heartbeat:docker):	 Started cluster01
       * httpd-bundle-1	(ocf:pacemaker:remote):	 Started cluster01
   * Resource Group: partially-active-group (2 members inactive):
     * dummy-1	(ocf:pacemaker:Dummy):	 Started cluster02
     * dummy-2	(ocf:pacemaker:Dummy):	 FAILED cluster02
 
 Failed Resource Actions:
   * dummy-2_monitor_0 on cluster02 'unimplemented feature' (3): call=2, status='complete', queued=0ms, exec=33ms
 =#=#=#= End test: Text output of partially active resources - OK (0) =#=#=#=
 * Passed: crm_mon        - Text output of partially active resources
 =#=#=#= Begin test: XML output of partially active resources =#=#=#=
 unpack_rsc_op 	error: Preventing dummy-2 from restarting on cluster02 because of hard failure (unimplemented feature)| rc=3 id=dummy-2_last_failure_0
 unpack_rsc_op 	error: Preventing httpd-bundle-clone from restarting on httpd-bundle-1 because of hard failure (invalid parameter)| rc=2 id=httpd_last_failure_0
 <pacemaker-result api-version="X" request="crm_mon -1 --output-as=xml">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="4"/>
     <resources_configured number="16" disabled="1" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="false" resources_running="5" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
   </nodes>
   <resources>
     <clone id="ping-clone" multi_state="false" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
     <resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
       <node name="cluster01" id="1" cached="true"/>
     </resource>
     <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" maintenance="false" managed="true" failed="false">
       <replica id="0">
         <resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="httpd-bundle-0" id="httpd-bundle-0" cached="true"/>
         </resource>
         <resource id="httpd-bundle-docker-0" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
         <resource id="httpd-bundle-0" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </replica>
       <replica id="1">
         <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="true" failure_ignored="false" nodes_running_on="1">
           <node name="httpd-bundle-1" id="httpd-bundle-1" cached="true"/>
         </resource>
         <resource id="httpd-bundle-docker-1" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
         <resource id="httpd-bundle-1" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </replica>
     </bundle>
     <group id="partially-active-group" number_resources="4" maintenance="false" managed="true" disabled="false">
       <resource id="dummy-1" resource_agent="ocf:pacemaker:Dummy" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="dummy-2" resource_agent="ocf:pacemaker:Dummy" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="true" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="dummy-3" resource_agent="ocf:pacemaker:Dummy" role="Stopped" target_role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="dummy-4" resource_agent="ocf:pacemaker:Dummy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </group>
     <resource id="smart-mon" resource_agent="ocf:pacemaker:HealthSMART" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="httpd-bundle-ip-192.168.122.131" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="dummy-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="dummy-2" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="probe" rc="3" rc_text="unimplemented feature" exec-time="33ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="dummy-4" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="probe" rc="5" rc_text="not installed" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="smart-mon" orphan="false" migration-threshold="1000000">
         <operation_history call="9" task="probe" rc="5" rc_text="not installed" exec-time="33ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="6" task="probe" rc="5" rc_text="not installed" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="Fencing" orphan="false" migration-threshold="1000000">
         <operation_history call="15" task="start" rc="0" rc_text="ok" exec-time="36ms" queue-time="0ms"/>
         <operation_history call="20" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-ip-192.168.122.132" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-0">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-1">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="probe" rc="2" rc_text="invalid parameter" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <failures>
     <failure op_key="dummy-2_monitor_0" node="cluster02" exitstatus="unimplemented feature" exitreason="" exitcode="3" call="2" status="complete" queued="0" exec="33" interval="0" task="monitor"/>
   </failures>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output of partially active resources - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output of partially active resources
 =#=#=#= Begin test: Text output of partially active resources, with inactive resources =#=#=#=
 unpack_rsc_op 	error: Preventing dummy-2 from restarting on cluster02 because of hard failure (unimplemented feature)| rc=3 id=dummy-2_last_failure_0
 unpack_rsc_op 	error: Preventing httpd-bundle-clone from restarting on httpd-bundle-1 because of hard failure (invalid parameter)| rc=2 id=httpd_last_failure_0
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (2) (version) - partition with quorum
   * Last updated:
   * Last change:
   * 4 nodes configured
   * 16 resource instances configured (1 DISABLED)
 
 Node List:
   * Node cluster01 (1): online, feature set <3.15.1
   * Node cluster02 (2): online, feature set <3.15.1
   * GuestNode httpd-bundle-0@cluster02: online
   * GuestNode httpd-bundle-1@cluster01: online
 
 Full List of Resources:
   * Clone Set: ping-clone [ping]:
     * ping	(ocf:pacemaker:ping):	 Started cluster01
     * ping	(ocf:pacemaker:ping):	 Stopped (not installed) 
   * Fencing	(stonith:fence_xvm):	 Started cluster01
   * Container bundle set: httpd-bundle [pcmk:http]:
     * Replica[0]
       * httpd-bundle-ip-192.168.122.131	(ocf:heartbeat:IPaddr2):	 Started cluster02
       * httpd	(ocf:heartbeat:apache):	 Started httpd-bundle-0
       * httpd-bundle-docker-0	(ocf:heartbeat:docker):	 Started cluster02
       * httpd-bundle-0	(ocf:pacemaker:remote):	 Started cluster02
     * Replica[1]
       * httpd-bundle-ip-192.168.122.132	(ocf:heartbeat:IPaddr2):	 Started cluster01
       * httpd	(ocf:heartbeat:apache):	 FAILED httpd-bundle-1
       * httpd-bundle-docker-1	(ocf:heartbeat:docker):	 Started cluster01
       * httpd-bundle-1	(ocf:pacemaker:remote):	 Started cluster01
   * Resource Group: partially-active-group:
     * dummy-1	(ocf:pacemaker:Dummy):	 Started cluster02
     * dummy-2	(ocf:pacemaker:Dummy):	 FAILED cluster02
     * dummy-3	(ocf:pacemaker:Dummy):	 Stopped (disabled)
     * dummy-4	(ocf:pacemaker:Dummy):	 Stopped (not installed) 
   * smart-mon	(ocf:pacemaker:HealthSMART):	 Stopped (not installed) 
 
 Failed Resource Actions:
   * dummy-2_monitor_0 on cluster02 'unimplemented feature' (3): call=2, status='complete', queued=0ms, exec=33ms
 =#=#=#= End test: Text output of partially active resources, with inactive resources - OK (0) =#=#=#=
 * Passed: crm_mon        - Text output of partially active resources, with inactive resources
 =#=#=#= Begin test: Complete brief text output, with inactive resources =#=#=#=
 unpack_rsc_op 	error: Preventing dummy-2 from restarting on cluster02 because of hard failure (unimplemented feature)| rc=3 id=dummy-2_last_failure_0
 unpack_rsc_op 	error: Preventing httpd-bundle-clone from restarting on httpd-bundle-1 because of hard failure (invalid parameter)| rc=2 id=httpd_last_failure_0
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (2) (version) - partition with quorum
   * Last updated:
   * Last change:
   * 4 nodes configured
   * 16 resource instances configured (1 DISABLED)
 
 Node List:
   * Node cluster01 (1): online, feature set <3.15.1
   * Node cluster02 (2): online, feature set <3.15.1
   * GuestNode httpd-bundle-0@cluster02: online
   * GuestNode httpd-bundle-1@cluster01: online
 
 Full List of Resources:
   * 0/1	(ocf:pacemaker:HealthSMART):	Active
   * 1/1	(stonith:fence_xvm):	Active cluster01
   * Clone Set: ping-clone [ping]:
     * ping	(ocf:pacemaker:ping):	 Started cluster01
     * ping	(ocf:pacemaker:ping):	 Stopped (not installed) 
   * Container bundle set: httpd-bundle [pcmk:http]:
     * Replica[0]
       * httpd-bundle-ip-192.168.122.131	(ocf:heartbeat:IPaddr2):	 Started cluster02
       * httpd	(ocf:heartbeat:apache):	 Started httpd-bundle-0
       * httpd-bundle-docker-0	(ocf:heartbeat:docker):	 Started cluster02
       * httpd-bundle-0	(ocf:pacemaker:remote):	 Started cluster02
     * Replica[1]
       * httpd-bundle-ip-192.168.122.132	(ocf:heartbeat:IPaddr2):	 Started cluster01
       * httpd	(ocf:heartbeat:apache):	 FAILED httpd-bundle-1
       * httpd-bundle-docker-1	(ocf:heartbeat:docker):	 Started cluster01
       * httpd-bundle-1	(ocf:pacemaker:remote):	 Started cluster01
   * Resource Group: partially-active-group:
     * 2/4	(ocf:pacemaker:Dummy):	Active cluster02
 
 Node Attributes:
   * Node: cluster01 (1):
     * pingd                           	: 1000      
   * Node: cluster02 (2):
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02 (2):
     * httpd-bundle-ip-192.168.122.131: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-docker-0: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-0: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="30000ms"
     * dummy-1: migration-threshold=1000000:
       * (2) start
     * dummy-2: migration-threshold=1000000:
       * (2) probe
     * dummy-4: migration-threshold=1000000:
       * (2) probe
     * smart-mon: migration-threshold=1000000:
       * (9) probe
     * ping: migration-threshold=1000000:
       * (6) probe
   * Node: cluster01 (1):
     * Fencing: migration-threshold=1000000:
       * (15) start
       * (20) monitor: interval="60000ms"
     * ping: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
     * httpd-bundle-ip-192.168.122.132: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-docker-1: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-1: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="30000ms"
   * Node: httpd-bundle-0@cluster02:
     * httpd: migration-threshold=1000000:
       * (1) start
   * Node: httpd-bundle-1@cluster01:
     * httpd: migration-threshold=1000000:
       * (1) probe
 
 Failed Resource Actions:
   * dummy-2_monitor_0 on cluster02 'unimplemented feature' (3): call=2, status='complete', queued=0ms, exec=33ms
 =#=#=#= End test: Complete brief text output, with inactive resources - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete brief text output, with inactive resources
 =#=#=#= Begin test: Text output of partially active group =#=#=#=
 unpack_rsc_op 	error: Preventing dummy-2 from restarting on cluster02 because of hard failure (unimplemented feature)| rc=3 id=dummy-2_last_failure_0
 unpack_rsc_op 	error: Preventing httpd-bundle-clone from restarting on httpd-bundle-1 because of hard failure (invalid parameter)| rc=2 id=httpd_last_failure_0
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 4 nodes configured
   * 16 resource instances configured (1 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
 
 Active Resources:
   * Resource Group: partially-active-group (2 members inactive):
     * dummy-1	(ocf:pacemaker:Dummy):	 Started cluster02
     * dummy-2	(ocf:pacemaker:Dummy):	 FAILED cluster02
 =#=#=#= End test: Text output of partially active group - OK (0) =#=#=#=
 * Passed: crm_mon        - Text output of partially active group
 =#=#=#= Begin test: Text output of partially active group, with inactive resources =#=#=#=
 unpack_rsc_op 	error: Preventing dummy-2 from restarting on cluster02 because of hard failure (unimplemented feature)| rc=3 id=dummy-2_last_failure_0
 unpack_rsc_op 	error: Preventing httpd-bundle-clone from restarting on httpd-bundle-1 because of hard failure (invalid parameter)| rc=2 id=httpd_last_failure_0
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 4 nodes configured
   * 16 resource instances configured (1 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
 
 Full List of Resources:
   * Resource Group: partially-active-group:
     * dummy-1	(ocf:pacemaker:Dummy):	 Started cluster02
     * dummy-2	(ocf:pacemaker:Dummy):	 FAILED cluster02
     * dummy-3	(ocf:pacemaker:Dummy):	 Stopped (disabled)
     * dummy-4	(ocf:pacemaker:Dummy):	 Stopped (not installed) 
 =#=#=#= End test: Text output of partially active group, with inactive resources - OK (0) =#=#=#=
 * Passed: crm_mon        - Text output of partially active group, with inactive resources
 =#=#=#= Begin test: Text output of active member of partially active group =#=#=#=
 unpack_rsc_op 	error: Preventing dummy-2 from restarting on cluster02 because of hard failure (unimplemented feature)| rc=3 id=dummy-2_last_failure_0
 unpack_rsc_op 	error: Preventing httpd-bundle-clone from restarting on httpd-bundle-1 because of hard failure (invalid parameter)| rc=2 id=httpd_last_failure_0
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 4 nodes configured
   * 16 resource instances configured (1 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
 
 Active Resources:
   * Resource Group: partially-active-group (2 members inactive):
     * dummy-1	(ocf:pacemaker:Dummy):	 Started cluster02
 =#=#=#= End test: Text output of active member of partially active group - OK (0) =#=#=#=
 * Passed: crm_mon        - Text output of active member of partially active group
 =#=#=#= Begin test: Text output of inactive member of partially active group =#=#=#=
 unpack_rsc_op 	error: Preventing dummy-2 from restarting on cluster02 because of hard failure (unimplemented feature)| rc=3 id=dummy-2_last_failure_0
 unpack_rsc_op 	error: Preventing httpd-bundle-clone from restarting on httpd-bundle-1 because of hard failure (invalid parameter)| rc=2 id=httpd_last_failure_0
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (2) (version) - partition with quorum
   * Last updated:
   * Last change:
   * 4 nodes configured
   * 16 resource instances configured (1 DISABLED)
 
 Node List:
   * Node cluster01 (1): online, feature set <3.15.1
   * Node cluster02 (2): online, feature set <3.15.1
   * GuestNode httpd-bundle-0@cluster02: online
   * GuestNode httpd-bundle-1@cluster01: online
 
 Active Resources:
   * Resource Group: partially-active-group (2 members inactive):
     * dummy-2	(ocf:pacemaker:Dummy):	 FAILED cluster02
 
 Failed Resource Actions:
   * dummy-2_monitor_0 on cluster02 'unimplemented feature' (3): call=2, status='complete', queued=0ms, exec=33ms
 =#=#=#= End test: Text output of inactive member of partially active group - OK (0) =#=#=#=
 * Passed: crm_mon        - Text output of inactive member of partially active group
 =#=#=#= Begin test: Complete brief text output grouped by node, with inactive resources =#=#=#=
 unpack_rsc_op 	error: Preventing dummy-2 from restarting on cluster02 because of hard failure (unimplemented feature)| rc=3 id=dummy-2_last_failure_0
 unpack_rsc_op 	error: Preventing httpd-bundle-clone from restarting on httpd-bundle-1 because of hard failure (invalid parameter)| rc=2 id=httpd_last_failure_0
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (2) (version) - partition with quorum
   * Last updated:
   * Last change:
   * 4 nodes configured
   * 16 resource instances configured (1 DISABLED)
 
 Node List:
   * Node cluster01 (1): online, feature set <3.15.1:
     * Resources:
       * 1	(ocf:heartbeat:IPaddr2):	Active 
       * 1	(ocf:heartbeat:docker):	Active 
       * 1	(ocf:pacemaker:ping):	Active 
       * 1	(ocf:pacemaker:remote):	Active 
       * 1	(stonith:fence_xvm):	Active 
   * Node cluster02 (2): online, feature set <3.15.1:
     * Resources:
       * 1	(ocf:heartbeat:IPaddr2):	Active 
       * 1	(ocf:heartbeat:docker):	Active 
       * 2	(ocf:pacemaker:Dummy):	Active 
       * 1	(ocf:pacemaker:remote):	Active 
   * GuestNode httpd-bundle-0@cluster02: online:
     * Resources:
       * 1	(ocf:heartbeat:apache):	Active 
   * GuestNode httpd-bundle-1@cluster01: online:
     * Resources:
       * 1	(ocf:heartbeat:apache):	Active 
 
 Inactive Resources:
   * Clone Set: ping-clone [ping]:
     * ping	(ocf:pacemaker:ping):	 Started cluster01
     * ping	(ocf:pacemaker:ping):	 Stopped (not installed) 
   * Resource Group: partially-active-group:
     * 2/4	(ocf:pacemaker:Dummy):	Active cluster02
   * smart-mon	(ocf:pacemaker:HealthSMART):	 Stopped (not installed) 
 
 Node Attributes:
   * Node: cluster01 (1):
     * pingd                           	: 1000      
   * Node: cluster02 (2):
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02 (2):
     * httpd-bundle-ip-192.168.122.131: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-docker-0: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-0: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="30000ms"
     * dummy-1: migration-threshold=1000000:
       * (2) start
     * dummy-2: migration-threshold=1000000:
       * (2) probe
     * dummy-4: migration-threshold=1000000:
       * (2) probe
     * smart-mon: migration-threshold=1000000:
       * (9) probe
     * ping: migration-threshold=1000000:
       * (6) probe
   * Node: cluster01 (1):
     * Fencing: migration-threshold=1000000:
       * (15) start
       * (20) monitor: interval="60000ms"
     * ping: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
     * httpd-bundle-ip-192.168.122.132: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-docker-1: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-1: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="30000ms"
   * Node: httpd-bundle-0@cluster02:
     * httpd: migration-threshold=1000000:
       * (1) start
   * Node: httpd-bundle-1@cluster01:
     * httpd: migration-threshold=1000000:
       * (1) probe
 
 Failed Resource Actions:
   * dummy-2_monitor_0 on cluster02 'unimplemented feature' (3): call=2, status='complete', queued=0ms, exec=33ms
 =#=#=#= End test: Complete brief text output grouped by node, with inactive resources - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete brief text output grouped by node, with inactive resources
 =#=#=#= Begin test: Text output of partially active resources, with inactive resources, filtered by node =#=#=#=
 unpack_rsc_op 	error: Preventing dummy-2 from restarting on cluster02 because of hard failure (unimplemented feature)| rc=3 id=dummy-2_last_failure_0
 unpack_rsc_op 	error: Preventing httpd-bundle-clone from restarting on httpd-bundle-1 because of hard failure (invalid parameter)| rc=2 id=httpd_last_failure_0
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 4 nodes configured
   * 16 resource instances configured (1 DISABLED)
 
 Node List:
   * Online: [ cluster01 ]
 
 Full List of Resources:
   * Clone Set: ping-clone [ping]:
     * Started: [ cluster01 ]
   * Fencing	(stonith:fence_xvm):	 Started cluster01
   * Container bundle set: httpd-bundle [pcmk:http]:
     * httpd-bundle-1 (192.168.122.132)	(ocf:heartbeat:apache):	 FAILED cluster01
   * smart-mon	(ocf:pacemaker:HealthSMART):	 Stopped (not installed) 
 =#=#=#= End test: Text output of partially active resources, with inactive resources, filtered by node - OK (0) =#=#=#=
 * Passed: crm_mon        - Text output of partially active resources, with inactive resources, filtered by node
 =#=#=#= Begin test: Text output of partially active resources, filtered by node =#=#=#=
 unpack_rsc_op 	error: Preventing dummy-2 from restarting on cluster02 because of hard failure (unimplemented feature)| rc=3 id=dummy-2_last_failure_0
 unpack_rsc_op 	error: Preventing httpd-bundle-clone from restarting on httpd-bundle-1 because of hard failure (invalid parameter)| rc=2 id=httpd_last_failure_0
 <pacemaker-result api-version="X" request="crm_mon -1 --output-as=xml --node=cluster01">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="4"/>
     <resources_configured number="16" disabled="1" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="false" resources_running="5" type="member"/>
   </nodes>
   <resources>
     <clone id="ping-clone" multi_state="false" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
     <resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
       <node name="cluster01" id="1" cached="true"/>
     </resource>
     <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" maintenance="false" managed="true" failed="false">
       <replica id="1">
         <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="true" failure_ignored="false" nodes_running_on="1">
           <node name="httpd-bundle-1" id="httpd-bundle-1" cached="true"/>
         </resource>
         <resource id="httpd-bundle-docker-1" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
         <resource id="httpd-bundle-1" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </replica>
     </bundle>
     <resource id="smart-mon" resource_agent="ocf:pacemaker:HealthSMART" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster01">
       <resource_history id="Fencing" orphan="false" migration-threshold="1000000">
         <operation_history call="15" task="start" rc="0" rc_text="ok" exec-time="36ms" queue-time="0ms"/>
         <operation_history call="20" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-ip-192.168.122.132" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Text output of partially active resources, filtered by node - OK (0) =#=#=#=
 * Passed: crm_mon        - Text output of partially active resources, filtered by node
 =#=#=#= Begin test: Text output of active unmanaged resource on offline node =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 2 nodes configured
   * 3 resource instances configured
 
               *** Resource management is DISABLED ***
   The cluster will not attempt to start, stop or recover services
 
 Node List:
   * Online: [ cluster01 ]
   * OFFLINE: [ cluster02 ]
 
 Active Resources:
   * Fencing	(stonith:fence_xvm):	 Started cluster01 (maintenance)
   * rsc1	(ocf:pacemaker:Dummy):	 Started cluster01 (maintenance)
   * rsc2	(ocf:pacemaker:Dummy):	 Started cluster02 (maintenance)
 =#=#=#= End test: Text output of active unmanaged resource on offline node - OK (0) =#=#=#=
 * Passed: crm_mon        - Text output of active unmanaged resource on offline node
 =#=#=#= Begin test: XML output of active unmanaged resource on offline node =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon -1 --output-as=xml">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="2"/>
     <resources_configured number="3" disabled="0" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="true" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="false" resources_running="2" type="member"/>
     <node name="cluster02" id="2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="true" resources_running="1" type="member"/>
   </nodes>
   <resources>
     <resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
       <node name="cluster01" id="1" cached="true"/>
     </resource>
     <resource id="rsc1" resource_agent="ocf:pacemaker:Dummy" role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
       <node name="cluster01" id="1" cached="true"/>
     </resource>
     <resource id="rsc2" resource_agent="ocf:pacemaker:Dummy" role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
       <node name="cluster02" id="2" cached="false"/>
     </resource>
   </resources>
   <node_history>
     <node name="cluster01">
       <resource_history id="Fencing" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="6" task="cancel" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="rsc1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output of active unmanaged resource on offline node - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output of active unmanaged resource on offline node
 =#=#=#= Begin test: Brief text output of active unmanaged resource on offline node =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 2 nodes configured
   * 3 resource instances configured
 
               *** Resource management is DISABLED ***
   The cluster will not attempt to start, stop or recover services
 
 Node List:
   * Online: [ cluster01 ]
   * OFFLINE: [ cluster02 ]
 
 Active Resources:
   * 1	(ocf:pacemaker:Dummy):	Active cluster01
   * 1	(ocf:pacemaker:Dummy):	Active cluster02
   * 1	(stonith:fence_xvm):	Active cluster01
 =#=#=#= End test: Brief text output of active unmanaged resource on offline node - OK (0) =#=#=#=
 * Passed: crm_mon        - Brief text output of active unmanaged resource on offline node
 =#=#=#= Begin test: Brief text output of active unmanaged resource on offline node, grouped by node =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 2 nodes configured
   * 3 resource instances configured
 
               *** Resource management is DISABLED ***
   The cluster will not attempt to start, stop or recover services
 
 Node List:
   * Node cluster01: online:
     * Resources:
       * 1	(ocf:pacemaker:Dummy):	Active 
       * 1	(stonith:fence_xvm):	Active 
   * Node cluster02: OFFLINE:
     * Resources:
       * 1	(ocf:pacemaker:Dummy):	Active 
 =#=#=#= End test: Brief text output of active unmanaged resource on offline node, grouped by node - OK (0) =#=#=#=
 * Passed: crm_mon        - Brief text output of active unmanaged resource on offline node, grouped by node
 =#=#=#= Begin test: Text output of all resources with maintenance-mode enabled =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
               *** Resource management is DISABLED ***
   The cluster will not attempt to start, stop or recover services
 
 Node List:
   * GuestNode httpd-bundle-0: maintenance
   * GuestNode httpd-bundle-1: maintenance
   * Online: [ cluster01 cluster02 ]
 
 Full List of Resources:
   * Clone Set: ping-clone [ping] (maintenance):
     * ping	(ocf:pacemaker:ping):	 Started cluster02 (maintenance)
     * ping	(ocf:pacemaker:ping):	 Started cluster01 (maintenance)
   * Fencing	(stonith:fence_xvm):	 Started cluster01 (maintenance)
   * dummy	(ocf:pacemaker:Dummy):	 Started cluster02 (maintenance)
   * Clone Set: inactive-clone [inactive-dhcpd] (disabled, maintenance):
     * Stopped (disabled): [ cluster01 cluster02 ]
   * Resource Group: inactive-group (disabled, maintenance):
     * inactive-dummy-1	(ocf:pacemaker:Dummy):	 Stopped (disabled, maintenance)
     * inactive-dummy-2	(ocf:pacemaker:Dummy):	 Stopped (disabled, maintenance)
   * Container bundle set: httpd-bundle [pcmk:http] (maintenance):
     * httpd-bundle-0 (192.168.122.131)	(ocf:heartbeat:apache):	 Started cluster01 (maintenance)
     * httpd-bundle-1 (192.168.122.132)	(ocf:heartbeat:apache):	 Started cluster02 (maintenance)
     * httpd-bundle-2 (192.168.122.133)	(ocf:heartbeat:apache):	 Stopped (maintenance)
   * Resource Group: exim-group (maintenance):
     * Public-IP	(ocf:heartbeat:IPaddr):	 Started cluster02 (maintenance)
     * Email	(lsb:exim):	 Started cluster02 (maintenance)
   * Clone Set: mysql-clone-group [mysql-group] (maintenance):
     * Resource Group: mysql-group:0 (maintenance):
       * mysql-proxy	(lsb:mysql-proxy):	 Started cluster02 (maintenance)
     * Resource Group: mysql-group:1 (maintenance):
       * mysql-proxy	(lsb:mysql-proxy):	 Started cluster01 (maintenance)
   * Clone Set: promotable-clone [promotable-rsc] (promotable, maintenance):
     * promotable-rsc	(ocf:pacemaker:Stateful):	 Promoted cluster02 (maintenance)
     * promotable-rsc	(ocf:pacemaker:Stateful):	 Unpromoted cluster01 (maintenance)
 =#=#=#= End test: Text output of all resources with maintenance-mode enabled - OK (0) =#=#=#=
 * Passed: crm_mon        - Text output of all resources with maintenance-mode enabled
 =#=#=#= Begin test: XML output of all resources with maintenance-mode enabled =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon -1 -r --output-as=xml">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="true" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="true" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="true" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <clone id="ping-clone" multi_state="false" unique="false" maintenance="true" managed="false" disabled="false" failed="false" failure_ignored="false">
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
     </clone>
     <resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
       <node name="cluster01" id="1" cached="true"/>
     </resource>
     <resource id="dummy" resource_agent="ocf:pacemaker:Dummy" role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
       <node name="cluster02" id="2" cached="true"/>
     </resource>
     <clone id="inactive-clone" multi_state="false" unique="false" maintenance="true" managed="false" disabled="true" failed="false" failure_ignored="false" target_role="stopped">
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
     <group id="inactive-group" number_resources="2" maintenance="true" managed="false" disabled="true">
       <resource id="inactive-dummy-1" resource_agent="ocf:pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dummy-2" resource_agent="ocf:pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </group>
     <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" maintenance="true" managed="false" failed="false">
       <replica id="0">
         <resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="httpd-bundle-0" id="httpd-bundle-0" cached="true"/>
         </resource>
         <resource id="httpd-bundle-docker-0" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
         <resource id="httpd-bundle-0" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </replica>
       <replica id="1">
         <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="httpd-bundle-1" id="httpd-bundle-1" cached="true"/>
         </resource>
         <resource id="httpd-bundle-docker-1" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
         <resource id="httpd-bundle-1" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </replica>
       <replica id="2">
         <resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf:heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-docker-2" resource_agent="ocf:heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-2" resource_agent="ocf:pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </replica>
     </bundle>
     <group id="exim-group" number_resources="2" maintenance="true" managed="false" disabled="false">
       <resource id="Public-IP" resource_agent="ocf:heartbeat:IPaddr" role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="Email" resource_agent="lsb:exim" role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
     </group>
     <clone id="mysql-clone-group" multi_state="false" unique="false" maintenance="true" managed="false" disabled="false" failed="false" failure_ignored="false">
       <group id="mysql-group:0" number_resources="1" maintenance="true" managed="false" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:1" number_resources="1" maintenance="true" managed="false" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:2" number_resources="1" maintenance="true" managed="false" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:3" number_resources="1" maintenance="true" managed="false" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:4" number_resources="1" maintenance="true" managed="false" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
     </clone>
     <clone id="promotable-clone" multi_state="true" unique="false" maintenance="true" managed="false" disabled="false" failed="false" failure_ignored="false">
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Promoted" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Unpromoted" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="11" task="start" rc="0" rc_text="ok" exec-time="2044ms" queue-time="0ms"/>
         <operation_history call="12" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2031ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="dummy" orphan="false" migration-threshold="1000000">
         <operation_history call="18" task="start" rc="0" rc_text="ok" exec-time="6020ms" queue-time="0ms"/>
         <operation_history call="19" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="6015ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Public-IP" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Email" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="promotable-rsc" orphan="false" migration-threshold="1000000">
         <operation_history call="4" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="5" task="cancel" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="6" task="promote" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="7" task="monitor" rc="8" rc_text="promoted" interval="5000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-ip-192.168.122.132" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="17" task="start" rc="0" rc_text="ok" exec-time="2038ms" queue-time="0ms"/>
         <operation_history call="18" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2034ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Fencing" orphan="false" migration-threshold="1000000">
         <operation_history call="15" task="start" rc="0" rc_text="ok" exec-time="36ms" queue-time="0ms"/>
         <operation_history call="20" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="dummy" orphan="false" migration-threshold="1000000">
         <operation_history call="16" task="stop" rc="0" rc_text="ok" exec-time="6048ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="promotable-rsc" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="4" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-ip-192.168.122.131" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-0">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-1">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <bans>
     <ban id="not-on-cluster1" resource="dummy" node="cluster01" weight="-1000000" promoted-only="false" master_only="false"/>
   </bans>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output of all resources with maintenance-mode enabled - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output of all resources with maintenance-mode enabled
 =#=#=#= Begin test: Text output of all resources with maintenance enabled for a node =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Node cluster02: maintenance
   * GuestNode httpd-bundle-1: maintenance
   * Online: [ cluster01 ]
   * GuestOnline: [ httpd-bundle-0 ]
 
 Full List of Resources:
   * Clone Set: ping-clone [ping]:
     * ping	(ocf:pacemaker:ping):	 Started cluster02 (maintenance)
     * Started: [ cluster01 ]
   * Fencing	(stonith:fence_xvm):	 Started cluster01
   * dummy	(ocf:pacemaker:Dummy):	 Started cluster02 (maintenance)
   * Clone Set: inactive-clone [inactive-dhcpd] (disabled):
     * Stopped (disabled): [ cluster01 cluster02 ]
   * Resource Group: inactive-group (disabled):
     * inactive-dummy-1	(ocf:pacemaker:Dummy):	 Stopped (disabled)
     * inactive-dummy-2	(ocf:pacemaker:Dummy):	 Stopped (disabled)
   * Container bundle set: httpd-bundle [pcmk:http]:
     * httpd-bundle-0 (192.168.122.131)	(ocf:heartbeat:apache):	 Started cluster01
     * httpd-bundle-1 (192.168.122.132)	(ocf:heartbeat:apache):	 Started cluster02 (maintenance)
     * httpd-bundle-2 (192.168.122.133)	(ocf:heartbeat:apache):	 Stopped
   * Resource Group: exim-group:
     * Public-IP	(ocf:heartbeat:IPaddr):	 Started cluster02 (maintenance)
     * Email	(lsb:exim):	 Started cluster02 (maintenance)
   * Clone Set: mysql-clone-group [mysql-group]:
     * Resource Group: mysql-group:0:
       * mysql-proxy	(lsb:mysql-proxy):	 Started cluster02 (maintenance)
     * Started: [ cluster01 ]
   * Clone Set: promotable-clone [promotable-rsc] (promotable):
     * promotable-rsc	(ocf:pacemaker:Stateful):	 Promoted cluster02 (maintenance)
     * Unpromoted: [ cluster01 ]
 =#=#=#= End test: Text output of all resources with maintenance enabled for a node - OK (0) =#=#=#=
 * Passed: crm_mon        - Text output of all resources with maintenance enabled for a node
 =#=#=#= Begin test: XML output of all resources with maintenance enabled for a node =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon -1 -r --output-as=xml">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="true" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="true" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <clone id="ping-clone" multi_state="false" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
     </clone>
     <resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
       <node name="cluster01" id="1" cached="true"/>
     </resource>
     <resource id="dummy" resource_agent="ocf:pacemaker:Dummy" role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
       <node name="cluster02" id="2" cached="true"/>
     </resource>
     <clone id="inactive-clone" multi_state="false" unique="false" maintenance="false" managed="true" disabled="true" failed="false" failure_ignored="false" target_role="stopped">
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
     <group id="inactive-group" number_resources="2" maintenance="false" managed="true" disabled="true">
       <resource id="inactive-dummy-1" resource_agent="ocf:pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dummy-2" resource_agent="ocf:pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </group>
     <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" maintenance="false" managed="true" failed="false">
       <replica id="0">
         <resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="httpd-bundle-0" id="httpd-bundle-0" cached="true"/>
         </resource>
         <resource id="httpd-bundle-docker-0" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
         <resource id="httpd-bundle-0" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </replica>
       <replica id="1">
         <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="httpd-bundle-1" id="httpd-bundle-1" cached="true"/>
         </resource>
         <resource id="httpd-bundle-docker-1" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
         <resource id="httpd-bundle-1" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </replica>
       <replica id="2">
         <resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf:heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-docker-2" resource_agent="ocf:heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-2" resource_agent="ocf:pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </replica>
     </bundle>
     <group id="exim-group" number_resources="2" maintenance="false" managed="true" disabled="false">
       <resource id="Public-IP" resource_agent="ocf:heartbeat:IPaddr" role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="Email" resource_agent="lsb:exim" role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
     </group>
     <clone id="mysql-clone-group" multi_state="false" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <group id="mysql-group:0" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:1" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:2" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:3" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:4" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
     </clone>
     <clone id="promotable-clone" multi_state="true" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Promoted" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Unpromoted" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="maintenance" value="true"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="11" task="start" rc="0" rc_text="ok" exec-time="2044ms" queue-time="0ms"/>
         <operation_history call="12" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2031ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="dummy" orphan="false" migration-threshold="1000000">
         <operation_history call="18" task="start" rc="0" rc_text="ok" exec-time="6020ms" queue-time="0ms"/>
         <operation_history call="19" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="6015ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Public-IP" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Email" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="promotable-rsc" orphan="false" migration-threshold="1000000">
         <operation_history call="4" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="5" task="cancel" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="6" task="promote" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="7" task="monitor" rc="8" rc_text="promoted" interval="5000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-ip-192.168.122.132" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="17" task="start" rc="0" rc_text="ok" exec-time="2038ms" queue-time="0ms"/>
         <operation_history call="18" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2034ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Fencing" orphan="false" migration-threshold="1000000">
         <operation_history call="15" task="start" rc="0" rc_text="ok" exec-time="36ms" queue-time="0ms"/>
         <operation_history call="20" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="dummy" orphan="false" migration-threshold="1000000">
         <operation_history call="16" task="stop" rc="0" rc_text="ok" exec-time="6048ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="promotable-rsc" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="4" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-ip-192.168.122.131" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-0">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-1">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <bans>
     <ban id="not-on-cluster1" resource="dummy" node="cluster01" weight="-1000000" promoted-only="false" master_only="false"/>
   </bans>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output of all resources with maintenance enabled for a node - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output of all resources with maintenance enabled for a node
 =#=#=#= Begin test: Text output of all resources with maintenance meta attribute true =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * GuestNode httpd-bundle-0: maintenance
   * GuestNode httpd-bundle-1: maintenance
   * Online: [ cluster01 cluster02 ]
 
 Full List of Resources:
   * Clone Set: ping-clone [ping] (maintenance):
     * ping	(ocf:pacemaker:ping):	 Started cluster02 (maintenance)
     * ping	(ocf:pacemaker:ping):	 Started cluster01 (maintenance)
   * Fencing	(stonith:fence_xvm):	 Started cluster01
   * dummy	(ocf:pacemaker:Dummy):	 Started cluster02 (maintenance)
   * Clone Set: inactive-clone [inactive-dhcpd] (disabled, maintenance):
     * Stopped (disabled): [ cluster01 cluster02 ]
   * Resource Group: inactive-group (disabled, maintenance):
     * inactive-dummy-1	(ocf:pacemaker:Dummy):	 Stopped (disabled, maintenance)
     * inactive-dummy-2	(ocf:pacemaker:Dummy):	 Stopped (disabled, maintenance)
   * Container bundle set: httpd-bundle [pcmk:http] (maintenance):
     * httpd-bundle-0 (192.168.122.131)	(ocf:heartbeat:apache):	 Started cluster01 (maintenance)
     * httpd-bundle-1 (192.168.122.132)	(ocf:heartbeat:apache):	 Started cluster02 (maintenance)
     * httpd-bundle-2 (192.168.122.133)	(ocf:heartbeat:apache):	 Stopped (maintenance)
   * Resource Group: exim-group (maintenance):
     * Public-IP	(ocf:heartbeat:IPaddr):	 Started cluster02 (maintenance)
     * Email	(lsb:exim):	 Started cluster02 (maintenance)
   * Clone Set: mysql-clone-group [mysql-group] (maintenance):
     * Resource Group: mysql-group:0 (maintenance):
       * mysql-proxy	(lsb:mysql-proxy):	 Started cluster02 (maintenance)
     * Resource Group: mysql-group:1 (maintenance):
       * mysql-proxy	(lsb:mysql-proxy):	 Started cluster01 (maintenance)
   * Clone Set: promotable-clone [promotable-rsc] (promotable, maintenance):
     * promotable-rsc	(ocf:pacemaker:Stateful):	 Promoted cluster02 (maintenance)
     * promotable-rsc	(ocf:pacemaker:Stateful):	 Unpromoted cluster01 (maintenance)
 =#=#=#= End test: Text output of all resources with maintenance meta attribute true - OK (0) =#=#=#=
 * Passed: crm_mon        - Text output of all resources with maintenance meta attribute true
 =#=#=#= Begin test: XML output of all resources with maintenance meta attribute true =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon -1 -r --output-as=xml">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="true" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="true" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <clone id="ping-clone" multi_state="false" unique="false" maintenance="true" managed="false" disabled="false" failed="false" failure_ignored="false">
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
     </clone>
     <resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
       <node name="cluster01" id="1" cached="true"/>
     </resource>
     <resource id="dummy" resource_agent="ocf:pacemaker:Dummy" role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
       <node name="cluster02" id="2" cached="true"/>
     </resource>
     <clone id="inactive-clone" multi_state="false" unique="false" maintenance="true" managed="false" disabled="true" failed="false" failure_ignored="false" target_role="stopped">
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
     <group id="inactive-group" number_resources="2" maintenance="true" managed="false" disabled="true">
       <resource id="inactive-dummy-1" resource_agent="ocf:pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dummy-2" resource_agent="ocf:pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </group>
     <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" maintenance="true" managed="false" failed="false">
       <replica id="0">
         <resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="httpd-bundle-0" id="httpd-bundle-0" cached="true"/>
         </resource>
         <resource id="httpd-bundle-docker-0" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
         <resource id="httpd-bundle-0" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </replica>
       <replica id="1">
         <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="httpd-bundle-1" id="httpd-bundle-1" cached="true"/>
         </resource>
         <resource id="httpd-bundle-docker-1" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
         <resource id="httpd-bundle-1" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </replica>
       <replica id="2">
         <resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf:heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-docker-2" resource_agent="ocf:heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-2" resource_agent="ocf:pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </replica>
     </bundle>
     <group id="exim-group" number_resources="2" maintenance="true" managed="false" disabled="false">
       <resource id="Public-IP" resource_agent="ocf:heartbeat:IPaddr" role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="Email" resource_agent="lsb:exim" role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
     </group>
     <clone id="mysql-clone-group" multi_state="false" unique="false" maintenance="true" managed="false" disabled="false" failed="false" failure_ignored="false">
       <group id="mysql-group:0" number_resources="1" maintenance="true" managed="false" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:1" number_resources="1" maintenance="true" managed="false" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:2" number_resources="1" maintenance="true" managed="false" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:3" number_resources="1" maintenance="true" managed="false" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:4" number_resources="1" maintenance="true" managed="false" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
     </clone>
     <clone id="promotable-clone" multi_state="true" unique="false" maintenance="true" managed="false" disabled="false" failed="false" failure_ignored="false">
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Promoted" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Unpromoted" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="11" task="start" rc="0" rc_text="ok" exec-time="2044ms" queue-time="0ms"/>
         <operation_history call="12" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2031ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="dummy" orphan="false" migration-threshold="1000000">
         <operation_history call="18" task="start" rc="0" rc_text="ok" exec-time="6020ms" queue-time="0ms"/>
         <operation_history call="19" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="6015ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Public-IP" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Email" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="promotable-rsc" orphan="false" migration-threshold="1000000">
         <operation_history call="4" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="5" task="cancel" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="6" task="promote" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="7" task="monitor" rc="8" rc_text="promoted" interval="5000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-ip-192.168.122.132" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="17" task="start" rc="0" rc_text="ok" exec-time="2038ms" queue-time="0ms"/>
         <operation_history call="18" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2034ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Fencing" orphan="false" migration-threshold="1000000">
         <operation_history call="15" task="start" rc="0" rc_text="ok" exec-time="36ms" queue-time="0ms"/>
         <operation_history call="20" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="dummy" orphan="false" migration-threshold="1000000">
         <operation_history call="16" task="stop" rc="0" rc_text="ok" exec-time="6048ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="promotable-rsc" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="4" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-ip-192.168.122.131" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-0">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-1">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <bans>
     <ban id="not-on-cluster1" resource="dummy" node="cluster01" weight="-1000000" promoted-only="false" master_only="false"/>
   </bans>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output of all resources with maintenance meta attribute true - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output of all resources with maintenance meta attribute true
 =#=#=#= Begin test: Text output of guest node's container on different node from its remote resource =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cent7-host2 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 4 nodes configured
   * 10 resource instances configured
 
 Node List:
   * Online: [ cent7-host1 cent7-host2 ]
   * GuestOnline: [ httpd-bundle1-0 httpd-bundle2-0 ]
 
 Active Resources:
   * Resource Group: group1:
     * dummy1	(ocf:pacemaker:Dummy):	 Started cent7-host1
   * Resource Group: group2:
     * dummy2	(ocf:pacemaker:Dummy):	 Started cent7-host2
   * Container bundle: httpd-bundle1 [pcmktest:http]:
     * httpd-bundle1-0 (192.168.20.188)	(ocf:heartbeat:apache):	 Started cent7-host1
   * Container bundle: httpd-bundle2 [pcmktest:http]:
     * httpd-bundle2-0 (192.168.20.190)	(ocf:heartbeat:apache):	 Started cent7-host2
 =#=#=#= End test: Text output of guest node's container on different node from its remote resource - OK (0) =#=#=#=
 * Passed: crm_mon        - Text output of guest node's container on different node from its remote resource
 =#=#=#= Begin test: Complete text output of guest node's container on different node from its remote resource =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cent7-host2 (3232262829) (version) - partition with quorum
   * Last updated:
   * Last change:
   * 4 nodes configured
   * 10 resource instances configured
 
 Node List:
   * Node cent7-host1 (3232262828): online, feature set <3.15.1
   * Node cent7-host2 (3232262829): online, feature set <3.15.1
   * GuestNode httpd-bundle1-0@cent7-host1: online
   * GuestNode httpd-bundle2-0@cent7-host2: online
 
 Active Resources:
   * Resource Group: group1:
     * dummy1	(ocf:pacemaker:Dummy):	 Started cent7-host1
   * Resource Group: group2:
     * dummy2	(ocf:pacemaker:Dummy):	 Started cent7-host2
   * Container bundle: httpd-bundle1 [pcmktest:http]:
       * httpd-bundle1-ip-192.168.20.188	(ocf:heartbeat:IPaddr2):	 Started cent7-host1
       * httpd1	(ocf:heartbeat:apache):	 Started httpd-bundle1-0
       * httpd-bundle1-docker-0	(ocf:heartbeat:docker):	 Started cent7-host1
       * httpd-bundle1-0	(ocf:pacemaker:remote):	 Started cent7-host2
   * Container bundle: httpd-bundle2 [pcmktest:http]:
       * httpd-bundle2-ip-192.168.20.190	(ocf:heartbeat:IPaddr2):	 Started cent7-host2
       * httpd2	(ocf:heartbeat:apache):	 Started httpd-bundle2-0
       * httpd-bundle2-docker-0	(ocf:heartbeat:docker):	 Started cent7-host2
       * httpd-bundle2-0	(ocf:pacemaker:remote):	 Started cent7-host2
 =#=#=#= End test: Complete text output of guest node's container on different node from its remote resource - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete text output of guest node's container on different node from its remote resource
diff --git a/cts/scheduler/summary/bundle-order-partial-stop.summary b/cts/scheduler/summary/bundle-order-partial-stop.summary
index 8e4c007b12..5fc2efe04c 100644
--- a/cts/scheduler/summary/bundle-order-partial-stop.summary
+++ b/cts/scheduler/summary/bundle-order-partial-stop.summary
@@ -1,127 +1,127 @@
 Current cluster status:
   * Node List:
     * Online: [ undercloud ]
     * GuestOnline: [ galera-bundle-0 rabbitmq-bundle-0 redis-bundle-0 ]
 
   * Full List of Resources:
     * Container bundle: rabbitmq-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-rabbitmq:latest]:
       * rabbitmq-bundle-0	(ocf:heartbeat:rabbitmq-cluster):	 Started undercloud
     * Container bundle: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest]:
       * galera-bundle-0	(ocf:heartbeat:galera):	 Promoted undercloud
     * Container bundle: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest]:
       * redis-bundle-0	(ocf:heartbeat:redis):	 Promoted undercloud
     * ip-192.168.122.254	(ocf:heartbeat:IPaddr2):	 Started undercloud
     * ip-192.168.122.250	(ocf:heartbeat:IPaddr2):	 Started undercloud
     * ip-192.168.122.249	(ocf:heartbeat:IPaddr2):	 Started undercloud
     * ip-192.168.122.253	(ocf:heartbeat:IPaddr2):	 Started undercloud
     * ip-192.168.122.247	(ocf:heartbeat:IPaddr2):	 Started undercloud
     * ip-192.168.122.248	(ocf:heartbeat:IPaddr2):	 Started undercloud
     * Container bundle: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest]:
       * haproxy-bundle-docker-0	(ocf:heartbeat:docker):	 Started undercloud
     * Container bundle: openstack-cinder-volume [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-volume:latest]:
       * openstack-cinder-volume-docker-0	(ocf:heartbeat:docker):	 Started undercloud
 
 Transition Summary:
   * Stop       rabbitmq-bundle-docker-0             (               undercloud )  due to node availability
   * Stop       rabbitmq-bundle-0                    (               undercloud )  due to node availability
-  * Stop       rabbitmq:0                           (      rabbitmq-bundle-0 )  due to colocation with haproxy-bundle-docker-0
+  * Stop       rabbitmq:0                           (        rabbitmq-bundle-0 )  due to colocation with haproxy-bundle-docker-0
   * Stop       galera-bundle-docker-0               (               undercloud )  due to node availability
   * Stop       galera-bundle-0                      (               undercloud )  due to node availability
   * Stop       galera:0                             ( Promoted galera-bundle-0 )  due to unrunnable galera-bundle-0 start
   * Stop       redis-bundle-docker-0                (               undercloud )  due to node availability
   * Stop       redis-bundle-0                       (               undercloud )  due to node availability
   * Stop       redis:0                              (  Promoted redis-bundle-0 )  due to unrunnable redis-bundle-0 start
   * Stop       ip-192.168.122.254                   (               undercloud )  due to node availability
   * Stop       ip-192.168.122.250                   (               undercloud )  due to node availability
   * Stop       ip-192.168.122.249                   (               undercloud )  due to node availability
   * Stop       ip-192.168.122.253                   (               undercloud )  due to node availability
   * Stop       ip-192.168.122.247                   (               undercloud )  due to node availability
   * Stop       ip-192.168.122.248                   (               undercloud )  due to node availability
   * Stop       haproxy-bundle-docker-0              (               undercloud )  due to node availability
   * Stop       openstack-cinder-volume-docker-0     (               undercloud )  due to node availability
 
 Executing Cluster Transition:
   * Pseudo action:   rabbitmq-bundle-clone_pre_notify_stop_0
   * Resource action: galera          cancel=10000 on galera-bundle-0
   * Resource action: redis           cancel=20000 on redis-bundle-0
   * Pseudo action:   redis-bundle-master_pre_notify_demote_0
   * Pseudo action:   openstack-cinder-volume_stop_0
   * Pseudo action:   haproxy-bundle_stop_0
   * Pseudo action:   redis-bundle_demote_0
   * Pseudo action:   galera-bundle_demote_0
   * Pseudo action:   rabbitmq-bundle_stop_0
   * Resource action: rabbitmq        notify on rabbitmq-bundle-0
   * Pseudo action:   rabbitmq-bundle-clone_confirmed-pre_notify_stop_0
   * Pseudo action:   rabbitmq-bundle-clone_stop_0
   * Pseudo action:   galera-bundle-master_demote_0
   * Resource action: redis           notify on redis-bundle-0
   * Pseudo action:   redis-bundle-master_confirmed-pre_notify_demote_0
   * Pseudo action:   redis-bundle-master_demote_0
   * Resource action: haproxy-bundle-docker-0 stop on undercloud
   * Resource action: openstack-cinder-volume-docker-0 stop on undercloud
   * Pseudo action:   openstack-cinder-volume_stopped_0
   * Pseudo action:   haproxy-bundle_stopped_0
   * Resource action: rabbitmq        stop on rabbitmq-bundle-0
   * Pseudo action:   rabbitmq-bundle-clone_stopped_0
   * Resource action: rabbitmq-bundle-0 stop on undercloud
   * Resource action: galera          demote on galera-bundle-0
   * Pseudo action:   galera-bundle-master_demoted_0
   * Resource action: redis           demote on redis-bundle-0
   * Pseudo action:   redis-bundle-master_demoted_0
   * Resource action: ip-192.168.122.254 stop on undercloud
   * Resource action: ip-192.168.122.250 stop on undercloud
   * Resource action: ip-192.168.122.249 stop on undercloud
   * Resource action: ip-192.168.122.253 stop on undercloud
   * Resource action: ip-192.168.122.247 stop on undercloud
   * Resource action: ip-192.168.122.248 stop on undercloud
   * Pseudo action:   galera-bundle_demoted_0
   * Pseudo action:   galera-bundle_stop_0
   * Pseudo action:   rabbitmq-bundle-clone_post_notify_stopped_0
   * Resource action: rabbitmq-bundle-docker-0 stop on undercloud
   * Pseudo action:   galera-bundle-master_stop_0
   * Pseudo action:   redis-bundle-master_post_notify_demoted_0
   * Pseudo action:   rabbitmq-bundle-clone_confirmed-post_notify_stopped_0
   * Resource action: galera          stop on galera-bundle-0
   * Pseudo action:   galera-bundle-master_stopped_0
   * Resource action: galera-bundle-0 stop on undercloud
   * Resource action: redis           notify on redis-bundle-0
   * Pseudo action:   redis-bundle-master_confirmed-post_notify_demoted_0
   * Pseudo action:   redis-bundle-master_pre_notify_stop_0
   * Pseudo action:   redis-bundle_demoted_0
   * Pseudo action:   rabbitmq-bundle_stopped_0
   * Resource action: galera-bundle-docker-0 stop on undercloud
   * Resource action: redis           notify on redis-bundle-0
   * Pseudo action:   redis-bundle-master_confirmed-pre_notify_stop_0
   * Pseudo action:   galera-bundle_stopped_0
   * Pseudo action:   redis-bundle_stop_0
   * Pseudo action:   redis-bundle-master_stop_0
   * Resource action: redis           stop on redis-bundle-0
   * Pseudo action:   redis-bundle-master_stopped_0
   * Resource action: redis-bundle-0  stop on undercloud
   * Pseudo action:   redis-bundle-master_post_notify_stopped_0
   * Resource action: redis-bundle-docker-0 stop on undercloud
   * Cluster action:  do_shutdown on undercloud
   * Pseudo action:   redis-bundle-master_confirmed-post_notify_stopped_0
   * Pseudo action:   redis-bundle_stopped_0
 
 Revised Cluster Status:
   * Node List:
     * Online: [ undercloud ]
 
   * Full List of Resources:
     * Container bundle: rabbitmq-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-rabbitmq:latest]:
       * rabbitmq-bundle-0	(ocf:heartbeat:rabbitmq-cluster):	 Stopped
     * Container bundle: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest]:
       * galera-bundle-0	(ocf:heartbeat:galera):	 Stopped
     * Container bundle: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest]:
       * redis-bundle-0	(ocf:heartbeat:redis):	 Stopped
     * ip-192.168.122.254	(ocf:heartbeat:IPaddr2):	 Stopped
     * ip-192.168.122.250	(ocf:heartbeat:IPaddr2):	 Stopped
     * ip-192.168.122.249	(ocf:heartbeat:IPaddr2):	 Stopped
     * ip-192.168.122.253	(ocf:heartbeat:IPaddr2):	 Stopped
     * ip-192.168.122.247	(ocf:heartbeat:IPaddr2):	 Stopped
     * ip-192.168.122.248	(ocf:heartbeat:IPaddr2):	 Stopped
     * Container bundle: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest]:
       * haproxy-bundle-docker-0	(ocf:heartbeat:docker):	 Stopped
     * Container bundle: openstack-cinder-volume [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-volume:latest]:
       * openstack-cinder-volume-docker-0	(ocf:heartbeat:docker):	 Stopped
diff --git a/cts/scheduler/summary/bundle-order-stop.summary b/cts/scheduler/summary/bundle-order-stop.summary
index 8e4c007b12..5fc2efe04c 100644
--- a/cts/scheduler/summary/bundle-order-stop.summary
+++ b/cts/scheduler/summary/bundle-order-stop.summary
@@ -1,127 +1,127 @@
 Current cluster status:
   * Node List:
     * Online: [ undercloud ]
     * GuestOnline: [ galera-bundle-0 rabbitmq-bundle-0 redis-bundle-0 ]
 
   * Full List of Resources:
     * Container bundle: rabbitmq-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-rabbitmq:latest]:
       * rabbitmq-bundle-0	(ocf:heartbeat:rabbitmq-cluster):	 Started undercloud
     * Container bundle: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest]:
       * galera-bundle-0	(ocf:heartbeat:galera):	 Promoted undercloud
     * Container bundle: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest]:
       * redis-bundle-0	(ocf:heartbeat:redis):	 Promoted undercloud
     * ip-192.168.122.254	(ocf:heartbeat:IPaddr2):	 Started undercloud
     * ip-192.168.122.250	(ocf:heartbeat:IPaddr2):	 Started undercloud
     * ip-192.168.122.249	(ocf:heartbeat:IPaddr2):	 Started undercloud
     * ip-192.168.122.253	(ocf:heartbeat:IPaddr2):	 Started undercloud
     * ip-192.168.122.247	(ocf:heartbeat:IPaddr2):	 Started undercloud
     * ip-192.168.122.248	(ocf:heartbeat:IPaddr2):	 Started undercloud
     * Container bundle: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest]:
       * haproxy-bundle-docker-0	(ocf:heartbeat:docker):	 Started undercloud
     * Container bundle: openstack-cinder-volume [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-volume:latest]:
       * openstack-cinder-volume-docker-0	(ocf:heartbeat:docker):	 Started undercloud
 
 Transition Summary:
   * Stop       rabbitmq-bundle-docker-0             (               undercloud )  due to node availability
   * Stop       rabbitmq-bundle-0                    (               undercloud )  due to node availability
-  * Stop       rabbitmq:0                           (      rabbitmq-bundle-0 )  due to colocation with haproxy-bundle-docker-0
+  * Stop       rabbitmq:0                           (        rabbitmq-bundle-0 )  due to colocation with haproxy-bundle-docker-0
   * Stop       galera-bundle-docker-0               (               undercloud )  due to node availability
   * Stop       galera-bundle-0                      (               undercloud )  due to node availability
   * Stop       galera:0                             ( Promoted galera-bundle-0 )  due to unrunnable galera-bundle-0 start
   * Stop       redis-bundle-docker-0                (               undercloud )  due to node availability
   * Stop       redis-bundle-0                       (               undercloud )  due to node availability
   * Stop       redis:0                              (  Promoted redis-bundle-0 )  due to unrunnable redis-bundle-0 start
   * Stop       ip-192.168.122.254                   (               undercloud )  due to node availability
   * Stop       ip-192.168.122.250                   (               undercloud )  due to node availability
   * Stop       ip-192.168.122.249                   (               undercloud )  due to node availability
   * Stop       ip-192.168.122.253                   (               undercloud )  due to node availability
   * Stop       ip-192.168.122.247                   (               undercloud )  due to node availability
   * Stop       ip-192.168.122.248                   (               undercloud )  due to node availability
   * Stop       haproxy-bundle-docker-0              (               undercloud )  due to node availability
   * Stop       openstack-cinder-volume-docker-0     (               undercloud )  due to node availability
 
 Executing Cluster Transition:
   * Pseudo action:   rabbitmq-bundle-clone_pre_notify_stop_0
   * Resource action: galera          cancel=10000 on galera-bundle-0
   * Resource action: redis           cancel=20000 on redis-bundle-0
   * Pseudo action:   redis-bundle-master_pre_notify_demote_0
   * Pseudo action:   openstack-cinder-volume_stop_0
   * Pseudo action:   haproxy-bundle_stop_0
   * Pseudo action:   redis-bundle_demote_0
   * Pseudo action:   galera-bundle_demote_0
   * Pseudo action:   rabbitmq-bundle_stop_0
   * Resource action: rabbitmq        notify on rabbitmq-bundle-0
   * Pseudo action:   rabbitmq-bundle-clone_confirmed-pre_notify_stop_0
   * Pseudo action:   rabbitmq-bundle-clone_stop_0
   * Pseudo action:   galera-bundle-master_demote_0
   * Resource action: redis           notify on redis-bundle-0
   * Pseudo action:   redis-bundle-master_confirmed-pre_notify_demote_0
   * Pseudo action:   redis-bundle-master_demote_0
   * Resource action: haproxy-bundle-docker-0 stop on undercloud
   * Resource action: openstack-cinder-volume-docker-0 stop on undercloud
   * Pseudo action:   openstack-cinder-volume_stopped_0
   * Pseudo action:   haproxy-bundle_stopped_0
   * Resource action: rabbitmq        stop on rabbitmq-bundle-0
   * Pseudo action:   rabbitmq-bundle-clone_stopped_0
   * Resource action: rabbitmq-bundle-0 stop on undercloud
   * Resource action: galera          demote on galera-bundle-0
   * Pseudo action:   galera-bundle-master_demoted_0
   * Resource action: redis           demote on redis-bundle-0
   * Pseudo action:   redis-bundle-master_demoted_0
   * Resource action: ip-192.168.122.254 stop on undercloud
   * Resource action: ip-192.168.122.250 stop on undercloud
   * Resource action: ip-192.168.122.249 stop on undercloud
   * Resource action: ip-192.168.122.253 stop on undercloud
   * Resource action: ip-192.168.122.247 stop on undercloud
   * Resource action: ip-192.168.122.248 stop on undercloud
   * Pseudo action:   galera-bundle_demoted_0
   * Pseudo action:   galera-bundle_stop_0
   * Pseudo action:   rabbitmq-bundle-clone_post_notify_stopped_0
   * Resource action: rabbitmq-bundle-docker-0 stop on undercloud
   * Pseudo action:   galera-bundle-master_stop_0
   * Pseudo action:   redis-bundle-master_post_notify_demoted_0
   * Pseudo action:   rabbitmq-bundle-clone_confirmed-post_notify_stopped_0
   * Resource action: galera          stop on galera-bundle-0
   * Pseudo action:   galera-bundle-master_stopped_0
   * Resource action: galera-bundle-0 stop on undercloud
   * Resource action: redis           notify on redis-bundle-0
   * Pseudo action:   redis-bundle-master_confirmed-post_notify_demoted_0
   * Pseudo action:   redis-bundle-master_pre_notify_stop_0
   * Pseudo action:   redis-bundle_demoted_0
   * Pseudo action:   rabbitmq-bundle_stopped_0
   * Resource action: galera-bundle-docker-0 stop on undercloud
   * Resource action: redis           notify on redis-bundle-0
   * Pseudo action:   redis-bundle-master_confirmed-pre_notify_stop_0
   * Pseudo action:   galera-bundle_stopped_0
   * Pseudo action:   redis-bundle_stop_0
   * Pseudo action:   redis-bundle-master_stop_0
   * Resource action: redis           stop on redis-bundle-0
   * Pseudo action:   redis-bundle-master_stopped_0
   * Resource action: redis-bundle-0  stop on undercloud
   * Pseudo action:   redis-bundle-master_post_notify_stopped_0
   * Resource action: redis-bundle-docker-0 stop on undercloud
   * Cluster action:  do_shutdown on undercloud
   * Pseudo action:   redis-bundle-master_confirmed-post_notify_stopped_0
   * Pseudo action:   redis-bundle_stopped_0
 
 Revised Cluster Status:
   * Node List:
     * Online: [ undercloud ]
 
   * Full List of Resources:
     * Container bundle: rabbitmq-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-rabbitmq:latest]:
       * rabbitmq-bundle-0	(ocf:heartbeat:rabbitmq-cluster):	 Stopped
     * Container bundle: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest]:
       * galera-bundle-0	(ocf:heartbeat:galera):	 Stopped
     * Container bundle: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest]:
       * redis-bundle-0	(ocf:heartbeat:redis):	 Stopped
     * ip-192.168.122.254	(ocf:heartbeat:IPaddr2):	 Stopped
     * ip-192.168.122.250	(ocf:heartbeat:IPaddr2):	 Stopped
     * ip-192.168.122.249	(ocf:heartbeat:IPaddr2):	 Stopped
     * ip-192.168.122.253	(ocf:heartbeat:IPaddr2):	 Stopped
     * ip-192.168.122.247	(ocf:heartbeat:IPaddr2):	 Stopped
     * ip-192.168.122.248	(ocf:heartbeat:IPaddr2):	 Stopped
     * Container bundle: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest]:
       * haproxy-bundle-docker-0	(ocf:heartbeat:docker):	 Stopped
     * Container bundle: openstack-cinder-volume [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-volume:latest]:
       * openstack-cinder-volume-docker-0	(ocf:heartbeat:docker):	 Stopped
diff --git a/cts/scheduler/summary/promoted-with-blocked.summary b/cts/scheduler/summary/promoted-with-blocked.summary
index f045b61dfe..82177a9a6a 100644
--- a/cts/scheduler/summary/promoted-with-blocked.summary
+++ b/cts/scheduler/summary/promoted-with-blocked.summary
@@ -1,59 +1,59 @@
 1 of 8 resource instances DISABLED and 0 BLOCKED from further action due to failure
 
 Current cluster status:
   * Node List:
     * Online: [ node1 node2 node3 node4 node5 ]
 
   * Full List of Resources:
     * Fencing	(stonith:fence_xvm):	 Started node1
     * rsc1	(ocf:pacemaker:Dummy):	 Stopped
     * Clone Set: rsc2-clone [rsc2] (promotable):
       * Stopped: [ node1 node2 node3 node4 node5 ]
     * rsc3	(ocf:pacemaker:Dummy):	 Stopped (disabled)
 
 Transition Summary:
-  * Start      rsc1       (                   node2 )  due to unrunnable rsc3 start (blocked)
-  * Start      rsc2:0     (                   node3 )
-  * Start      rsc2:1     (                   node4 )
-  * Start      rsc2:2     (                   node5 )
-  * Start      rsc2:3     (                   node1 )
+  * Start      rsc1       (                     node2 )  due to unrunnable rsc3 start (blocked)
+  * Start      rsc2:0     (                     node3 )
+  * Start      rsc2:1     (                     node4 )
+  * Start      rsc2:2     (                     node5 )
+  * Start      rsc2:3     (                     node1 )
   * Promote    rsc2:4     ( Stopped -> Promoted node2 )  due to colocation with rsc1 (blocked)
 
 Executing Cluster Transition:
   * Resource action: rsc1            monitor on node5
   * Resource action: rsc1            monitor on node4
   * Resource action: rsc1            monitor on node3
   * Resource action: rsc1            monitor on node2
   * Resource action: rsc1            monitor on node1
   * Resource action: rsc2:0          monitor on node3
   * Resource action: rsc2:1          monitor on node4
   * Resource action: rsc2:2          monitor on node5
   * Resource action: rsc2:3          monitor on node1
   * Resource action: rsc2:4          monitor on node2
   * Pseudo action:   rsc2-clone_start_0
   * Resource action: rsc3            monitor on node5
   * Resource action: rsc3            monitor on node4
   * Resource action: rsc3            monitor on node3
   * Resource action: rsc3            monitor on node2
   * Resource action: rsc3            monitor on node1
   * Resource action: rsc2:0          start on node3
   * Resource action: rsc2:1          start on node4
   * Resource action: rsc2:2          start on node5
   * Resource action: rsc2:3          start on node1
   * Resource action: rsc2:4          start on node2
   * Pseudo action:   rsc2-clone_running_0
   * Resource action: rsc2:0          monitor=10000 on node3
   * Resource action: rsc2:1          monitor=10000 on node4
   * Resource action: rsc2:2          monitor=10000 on node5
   * Resource action: rsc2:3          monitor=10000 on node1
 
 Revised Cluster Status:
   * Node List:
     * Online: [ node1 node2 node3 node4 node5 ]
 
   * Full List of Resources:
     * Fencing	(stonith:fence_xvm):	 Started node1
     * rsc1	(ocf:pacemaker:Dummy):	 Stopped
     * Clone Set: rsc2-clone [rsc2] (promotable):
       * Unpromoted: [ node1 node2 node3 node4 node5 ]
     * rsc3	(ocf:pacemaker:Dummy):	 Stopped (disabled)
diff --git a/include/crm/common/output.h b/include/crm/common/output.h
index e629691598..112ebcbd0f 100644
--- a/include/crm/common/output.h
+++ b/include/crm/common/output.h
@@ -1,81 +1,83 @@
 /*
- * Copyright 2021 the Pacemaker project contributors
+ * Copyright 2021-2023 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #ifndef PCMK__CRM_COMMON_OUTPUT__H
 #  define PCMK__CRM_COMMON_OUTPUT__H
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
 /**
  * \file
  * \brief Control output from tools
  * \ingroup core
  */
 
 /*!
  * \brief Control which sections are output
  */
 typedef enum {
     pcmk_section_stack         = 1 << 0,
     pcmk_section_dc            = 1 << 1,
     pcmk_section_times         = 1 << 2,
     pcmk_section_counts        = 1 << 3,
     pcmk_section_options       = 1 << 4,
     pcmk_section_nodes         = 1 << 5,
     pcmk_section_resources     = 1 << 6,
     pcmk_section_attributes    = 1 << 7,
     pcmk_section_failcounts    = 1 << 8,
     pcmk_section_operations    = 1 << 9,
     pcmk_section_fence_failed  = 1 << 10,
     pcmk_section_fence_pending = 1 << 11,
     pcmk_section_fence_worked  = 1 << 12,
     pcmk_section_tickets       = 1 << 13,
     pcmk_section_bans          = 1 << 14,
     pcmk_section_failures      = 1 << 15,
     pcmk_section_maint_mode    = 1 << 16,
 } pcmk_section_e;
 
 #define pcmk_section_fencing_all    (pcmk_section_fence_failed | pcmk_section_fence_pending | pcmk_section_fence_worked)
 #define pcmk_section_summary        (pcmk_section_stack | pcmk_section_dc | pcmk_section_times | \
                                      pcmk_section_counts | pcmk_section_maint_mode)
 #define pcmk_section_all            (pcmk_section_summary | pcmk_section_options | pcmk_section_nodes | \
                                      pcmk_section_resources | pcmk_section_attributes | pcmk_section_failcounts | \
                                      pcmk_section_operations | pcmk_section_fencing_all | pcmk_section_tickets | \
                                      pcmk_section_bans | pcmk_section_failures | pcmk_section_maint_mode)
 
 /*!
  * \brief Further modify the output of sections
  */
 typedef enum {
     pcmk_show_brief         = 1 << 0,
     pcmk_show_clone_detail  = 1 << 1,
     pcmk_show_node_id       = 1 << 2,
     pcmk_show_implicit_rscs = 1 << 3,
     pcmk_show_timing        = 1 << 4,
     pcmk_show_inactive_rscs = 1 << 5,
     pcmk_show_rscs_by_node  = 1 << 6,
     pcmk_show_pending       = 1 << 7,
     pcmk_show_rsc_only      = 1 << 8,
     pcmk_show_failed_detail = 1 << 9,
     pcmk_show_feature_set   = 1 << 10,
+    pcmk_show_description   = 1 << 11,
 } pcmk_show_opt_e;
 
-#define pcmk_show_details   (pcmk_show_clone_detail     \
-                             | pcmk_show_node_id        \
-                             | pcmk_show_implicit_rscs  \
-                             | pcmk_show_failed_detail  \
-                             | pcmk_show_feature_set)
+#define pcmk_show_details   ((pcmk_show_clone_detail)     \
+                             | (pcmk_show_node_id)        \
+                             | (pcmk_show_implicit_rscs)  \
+                             | (pcmk_show_failed_detail)  \
+                             | (pcmk_show_feature_set)    \
+                             | (pcmk_show_description))
 
 #ifdef __cplusplus
 }
 #endif
 
 #endif // PCMK__CRM_COMMON_OUTPUT__H
diff --git a/include/crm/common/output_internal.h b/include/crm/common/output_internal.h
index f6f33f5f03..78665408a0 100644
--- a/include/crm/common/output_internal.h
+++ b/include/crm/common/output_internal.h
@@ -1,948 +1,948 @@
 /*
  * Copyright 2019-2023 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #ifndef PCMK__OUTPUT_INTERNAL__H
 #  define PCMK__OUTPUT_INTERNAL__H
 
 #  include <stdbool.h>
 #  include <stdio.h>
 #  include <libxml/tree.h>
 #  include <libxml/HTMLtree.h>
 
 #  include <glib.h>
 #  include <crm/common/results.h>
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
 /**
  * \file
  * \brief Formatted output for pacemaker tools
  */
 
 
-#  define PCMK__API_VERSION "2.28"
+#  define PCMK__API_VERSION "2.29"
 
 #if defined(PCMK__WITH_ATTRIBUTE_OUTPUT_ARGS)
 #  define PCMK__OUTPUT_ARGS(ARGS...) __attribute__((output_args(ARGS)))
 #else
 #  define PCMK__OUTPUT_ARGS(ARGS...)
 #endif
 
 typedef struct pcmk__output_s pcmk__output_t;
 
 /*!
  * \internal
  * \brief The type of a function that creates a ::pcmk__output_t.
  *
  * Instances of this type are passed to pcmk__register_format(), stored in an
  * internal data structure, and later accessed by pcmk__output_new().  For 
  * examples, see pcmk__mk_xml_output() and pcmk__mk_text_output().
  *
  * \param[in] argv The list of command line arguments.
  */
 typedef pcmk__output_t * (*pcmk__output_factory_t)(char **argv);
 
 /*!
  * \internal
  * \brief The type of a custom message formatting function.
  *
  * These functions are defined by various libraries to support formatting of
  * types aside from the basic types provided by a ::pcmk__output_t.
  *
  * The meaning of the return value will be different for each message.
  * In general, however, 0 should be returned on success and a positive value
  * on error.
  *
  * \param[in,out] out   Output object to use to display message
  * \param[in,out] args  Message-specific arguments needed
  *
  * \note These functions must not call va_start or va_end - that is done
  *       automatically before the custom formatting function is called.
  */
 typedef int (*pcmk__message_fn_t)(pcmk__output_t *out, va_list args);
 
 /*!
  * \internal
  * \brief Internal type for tracking custom messages.
  *
  * Each library can register functions that format custom message types.  These
  * are commonly used to handle some library-specific type.  Registration is
  * done by first defining a table of ::pcmk__message_entry_t structures and
  * then passing that table to pcmk__register_messages().  Separate handlers
  * can be defined for the same message, but for different formats (xml vs.
  * text).  Unknown formats will be ignored.
  *
  * Additionally, a "default" value for fmt_table can be used.  In this case,
  * fn will be registered for all supported formats.  It is also possible to
  * register a default and then override that registration with a format-specific
  * function if necessary.
  *
  * \note The ::pcmk__message_entry_t table is processed in one pass, in order,
  * from top to bottom.  This means later entries with the same message_id will
  * override previous ones.  Thus, any default entry must come before any
  * format-specific entries for the same message_id.
  */
 typedef struct pcmk__message_entry_s {
     /*!
      * \brief The message to be handled.
      *
      * This must be the same ID that is passed to the message function of
      * a ::pcmk__output_t.  Unknown message IDs will be ignored.
      */
     const char *message_id;
 
     /*!
      * \brief The format type this handler is for.
      *
      * This name must match the fmt_name of the currently active formatter in
      * order for the registered function to be called.  It is valid to have
      * multiple entries for the same message_id but with different fmt_name
      * values.
      */
     const char *fmt_name;
 
     /*!
      * \brief The function to be called for message_id given a match on
      *        fmt_name.  See comments on ::pcmk__message_fn_t.
      */
     pcmk__message_fn_t fn;
 } pcmk__message_entry_t;
 
 /*!
  * \internal
  * \brief This structure contains everything needed to add support for a
  *        single output formatter to a command line program.
  */
 typedef struct pcmk__supported_format_s {
     /*!
      * \brief The name of this output formatter, which should match the
      *        fmt_name parameter in some ::pcmk__output_t structure.
      */
     const char *name;
 
     /*!
      * \brief A function that creates a ::pcmk__output_t.
      */
     pcmk__output_factory_t create;
 
     /*!
      * \brief Format-specific command line options.  This can be NULL if
      *        no command line options should be supported.
      */
     GOptionEntry *options;
 } pcmk__supported_format_t;
 
 /* The following three blocks need to be updated each time a new base formatter
  * is added.
  */
 
 extern GOptionEntry pcmk__html_output_entries[];
 extern GOptionEntry pcmk__log_output_entries[];
 extern GOptionEntry pcmk__none_output_entries[];
 extern GOptionEntry pcmk__text_output_entries[];
 extern GOptionEntry pcmk__xml_output_entries[];
 
 pcmk__output_t *pcmk__mk_html_output(char **argv);
 pcmk__output_t *pcmk__mk_log_output(char **argv);
 pcmk__output_t *pcmk__mk_none_output(char **argv);
 pcmk__output_t *pcmk__mk_text_output(char **argv);
 pcmk__output_t *pcmk__mk_xml_output(char **argv);
 
 #define PCMK__SUPPORTED_FORMAT_HTML { "html", pcmk__mk_html_output, pcmk__html_output_entries }
 #define PCMK__SUPPORTED_FORMAT_LOG  { "log", pcmk__mk_log_output, pcmk__log_output_entries }
 #define PCMK__SUPPORTED_FORMAT_NONE { PCMK__VALUE_NONE, pcmk__mk_none_output,   \
                                       pcmk__none_output_entries }
 #define PCMK__SUPPORTED_FORMAT_TEXT { "text", pcmk__mk_text_output, pcmk__text_output_entries }
 #define PCMK__SUPPORTED_FORMAT_XML  { "xml", pcmk__mk_xml_output, pcmk__xml_output_entries }
 
 /*!
  * \brief This structure contains everything that makes up a single output
  *        formatter.
  *
  * Instances of this structure may be created by calling pcmk__output_new()
  * with the name of the desired formatter.  They should later be freed with
  * pcmk__output_free().
  */
 struct pcmk__output_s {
     /*!
      * \brief The name of this output formatter.
      */
     const char *fmt_name;
 
     /*!
      * \brief Should this formatter supress most output?
      *
      * \note This setting is not respected by all formatters.  In general,
      *       machine-readable output formats will not support this while
      *       user-oriented formats will.  Callers should use is_quiet()
      *       to test whether to print or not.
      */
     bool quiet;
 
     /*!
      * \brief A copy of the request that generated this output.
      *
      * In the case of command line usage, this would be the command line
      * arguments.  For other use cases, it could be different.
      */
     gchar *request;
 
     /*!
      * \brief Where output should be written.
      *
      * This could be a file handle, or stdout or stderr.  This is really only
      * useful internally.
      */
     FILE *dest;
 
     /*!
      * \brief Custom messages that are currently registered on this formatter.
      *
      * Keys are the string message IDs, values are ::pcmk__message_fn_t function
      * pointers.
      */
     GHashTable *messages;
 
     /*!
      * \brief Implementation-specific private data.
      *
      * Each individual formatter may have some private data useful in its
      * implementation.  This points to that data.  Callers should not rely on
      * its contents or structure.
      */
     void *priv;
 
     /*!
      * \internal
      * \brief Take whatever actions are necessary to prepare out for use.  This is
      *        called by pcmk__output_new().  End users should not need to call this.
      *
      * \note For formatted output implementers - This function should be written in
      *       such a way that it can be called repeatedly on an already initialized
      *       object without causing problems, or on a previously finished object
      *       without crashing.
      *
      * \param[in,out] out The output functions structure.
      *
      * \return true on success, false on error.
      */
     bool (*init) (pcmk__output_t *out);
 
     /*!
      * \internal
      * \brief Free the private formatter-specific data.
      *
      * This is called from pcmk__output_free() and does not typically need to be
      * called directly.
      *
      * \param[in,out] out The output functions structure.
      */
     void (*free_priv) (pcmk__output_t *out);
 
     /*!
      * \internal
      * \brief Take whatever actions are necessary to end formatted output.
      *
      * This could include flushing output to a file, but does not include freeing
      * anything.  The finish method can potentially be fairly complicated, adding
      * additional information to the internal data structures or doing whatever
      * else.  It is therefore suggested that finish only be called once.
      *
      * \note The print parameter will only affect those formatters that do all
      *       their output at the end.  Console-oriented formatters typically print
      *       a line at a time as they go, so this parameter will not affect them.
      *       Structured formatters will honor it, however.
      *
      * \note The copy_dest parameter does not apply to all formatters.  Console-
      *       oriented formatters do not build up a structure as they go, and thus
      *       do not have anything to return.  Structured formatters will honor it,
      *       however.  Note that each type of formatter will return a different
      *       type of value in this parameter.  To use this parameter, call this
      *       function like so:
      *
      * \code
      * xmlNode *dest = NULL;
      * out->finish(out, exit_code, false, (void **) &dest);
      * \endcode
      *
      * \param[in,out] out         The output functions structure.
      * \param[in]     exit_status The exit value of the whole program.
      * \param[in]     print       Whether this function should write any output.
      * \param[out]    copy_dest   A destination to store a copy of the internal
      *                            data structure for this output, or NULL if no
      *                            copy is required.  The caller should free this
      *                            memory when done with it.
      */
     void (*finish) (pcmk__output_t *out, crm_exit_t exit_status, bool print,
                     void **copy_dest);
 
     /*!
      * \internal
      * \brief Finalize output and then immediately set back up to start a new set
      *        of output.
      *
      * This is conceptually the same as calling finish and then init, though in
      * practice more be happening behind the scenes.
      *
      * \note This function differs from finish in that no exit_status is added.
      *       The idea is that the program is not shutting down, so there is not
      *       yet a final exit code.  Call finish on the last time through if this
      *       is needed.
      *
      * \param[in,out] out The output functions structure.
      */
     void (*reset) (pcmk__output_t *out);
 
     /*!
      * \internal
      * \brief Register a custom message.
      *
      * \param[in,out] out        The output functions structure.
      * \param[in]     message_id The name of the message to register.  This name
      *                           will be used as the message_id parameter to the
      *                           message function in order to call the custom
      *                           format function.
      * \param[in]     fn         The custom format function to call for message_id.
      */
     void (*register_message) (pcmk__output_t *out, const char *message_id,
                               pcmk__message_fn_t fn);
 
     /*!
      * \internal
      * \brief Call a previously registered custom message.
      *
      * \param[in,out] out        The output functions structure.
      * \param[in]     message_id The name of the message to call.  This name must
      *                           be the same as the message_id parameter of some
      *                           previous call to register_message.
      * \param[in] ...            Arguments to be passed to the registered function.
      *
      * \return A standard Pacemaker return code.  Generally: 0 if a function was
      *         registered for the message, that function was called, and returned
      *         successfully; EINVAL if no function was registered; or pcmk_rc_no_output
      *         if a function was called but produced no output.
      */
     int (*message) (pcmk__output_t *out, const char *message_id, ...);
 
     /*!
      * \internal
      * \brief Format the output of a completed subprocess.
      *
      * \param[in,out] out         The output functions structure.
      * \param[in]     exit_status The exit value of the subprocess.
      * \param[in]     proc_stdout stdout from the completed subprocess.
      * \param[in]     proc_stderr stderr from the completed subprocess.
      */
     void (*subprocess_output) (pcmk__output_t *out, int exit_status,
                                const char *proc_stdout, const char *proc_stderr);
 
     /*!
      * \internal
      * \brief Format version information.  This is useful for the --version
      *        argument of command line tools.
      *
      * \param[in,out] out      The output functions structure.
      * \param[in]     extended Add additional version information.
      */
     void (*version) (pcmk__output_t *out, bool extended);
 
     /*!
      * \internal
      * \brief Format an informational message that should be shown to
      *        to an interactive user.  Not all formatters will do this.
      *
      * \note A newline will automatically be added to the end of the format
      *       string, so callers should not include a newline.
      *
      * \note It is possible for a formatter that supports this method to
      *       still not print anything out if is_quiet returns true.
      *
      * \param[in,out] out The output functions structure.
      * \param[in]     buf The message to be printed.
      * \param[in]     ... Arguments to be formatted.
      *
      * \return A standard Pacemaker return code.  Generally: pcmk_rc_ok
      *         if output was produced and pcmk_rc_no_output if it was not.
      *         As not all formatters implement this function, those that
      *         do not will always just return pcmk_rc_no_output.
      */
     int (*info) (pcmk__output_t *out, const char *format, ...) G_GNUC_PRINTF(2, 3);
 
     /*!
      * \internal
      * \brief Like \p info() but for messages that should appear only
      *        transiently. Not all formatters will do this.
      *
      * The originally envisioned use case is for console output, where a
      * transient status-related message may be quickly overwritten by a refresh.
      *
      * \param[in,out] out     The output functions structure.
      * \param[in]     format  The format string of the message to be printed.
      * \param[in]     ...     Arguments to be formatted.
      *
      * \return A standard Pacemaker return code. Generally: \p pcmk_rc_ok if
      *         output was produced and \p pcmk_rc_no_output if it was not. As
      *         not all formatters implement this function, those that do not
      *         will always just return \p pcmk_rc_no_output.
      */
     int (*transient) (pcmk__output_t *out, const char *format, ...)
         G_GNUC_PRINTF(2, 3);
 
     /*!
      * \internal
      * \brief Format an error message that should be shown to an interactive
      *        user.  Not all formatters will do this.
      *
      * \note A newline will automatically be added to the end of the format
      *       string, so callers should not include a newline.
      *
      * \note Formatters that support this method should always generate output,
      *       even if is_quiet returns true.
      *
      * \param[in,out] out The output functions structure.
      * \param[in]     buf The message to be printed.
      * \param[in]     ... Arguments to be formatted.
      */
     void (*err) (pcmk__output_t *out, const char *format, ...) G_GNUC_PRINTF(2, 3);
 
     /*!
      * \internal
      * \brief Format already formatted XML.
      *
      * \param[in,out] out  The output functions structure.
      * \param[in]     name A name to associate with the XML.
      * \param[in]     buf  The XML in a string.
      */
     void (*output_xml) (pcmk__output_t *out, const char *name, const char *buf);
 
     /*!
      * \internal
      * \brief Start a new list of items.
      *
      * \note For text output, this corresponds to another level of indentation.  For
      *       XML output, this corresponds to wrapping any following output in another
      *       layer of tags.
      *
      * \note If singular_noun and plural_noun are non-NULL, calling end_list will
      *       result in a summary being added.
      *
      * \param[in,out] out           The output functions structure.
      * \param[in]     singular_noun When outputting the summary for a list with
      *                              one item, the noun to use.
      * \param[in]     plural_noun   When outputting the summary for a list with
      *                              more than one item, the noun to use.
      * \param[in]     format        The format string.
      * \param[in]     ...           Arguments to be formatted.
      */
     void (*begin_list) (pcmk__output_t *out, const char *singular_noun,
                         const char *plural_noun, const char *format, ...)
                         G_GNUC_PRINTF(4, 5);
 
     /*!
      * \internal
      * \brief Format a single item in a list.
      *
      * \param[in,out] out     The output functions structure.
      * \param[in]     name    A name to associate with this item.
      * \param[in]     format  The format string.
      * \param[in]     ...     Arguments to be formatted.
      */
     void (*list_item) (pcmk__output_t *out, const char *name, const char *format, ...)
                       G_GNUC_PRINTF(3, 4);
 
     /*!
      * \internal
      * \brief Increment the internal counter of the current list's length.
      *
      * Typically, this counter is maintained behind the scenes as a side effect
      * of calling list_item().  However, custom functions that maintain lists
      * some other way will need to manage this counter manually.  This is
      * useful for implementing custom message functions and should not be
      * needed otherwise.
      *
      * \param[in,out] out The output functions structure.
      */
     void (*increment_list) (pcmk__output_t *out);
 
     /*!
      * \internal
      * \brief Conclude a list.
      *
      * \note If begin_list was called with non-NULL for both the singular_noun
      *       and plural_noun arguments, this function will output a summary.
      *       Otherwise, no summary will be added.
      *
      * \param[in,out] out The output functions structure.
      */
     void (*end_list) (pcmk__output_t *out);
 
     /*!
      * \internal
      * \brief Should anything be printed to the user?
      *
      * \note This takes into account both the \p quiet value as well as the
      *       current formatter.
      *
      * \param[in,out] out The output functions structure.
      *
      * \return true if output should be supressed, false otherwise.
      */
     bool (*is_quiet) (pcmk__output_t *out);
 
     /*!
      * \internal
      * \brief Output a spacer.  Not all formatters will do this.
      *
      * \param[in,out] out The output functions structure.
      */
     void (*spacer) (pcmk__output_t *out);
 
     /*!
      * \internal
      * \brief Output a progress indicator.  This is likely only useful for
      *        plain text, console based formatters.
      *
      * \param[in,out] out  The output functions structure
      * \param[in]     end  If true, output a newline afterwards (this should
      *                     only be used the last time this function is called)
      *
      */
     void (*progress) (pcmk__output_t *out, bool end);
 
     /*!
      * \internal
      * \brief Prompt the user for input.  Not all formatters will do this.
      *
      * \note This function is part of pcmk__output_t, but unlike all other
      *       function it does not take that as an argument.  In general, a
      *       prompt will go directly to the screen and therefore bypass any
      *       need to use the formatted output code to decide where and how
      *       to display.
      *
      * \param[in]  prompt The prompt to display.  This is required.
      * \param[in]  echo   If true, echo the user's input to the screen.  Set
      *                    to false for password entry.
      * \param[out] dest   Where to store the user's response.  This is
      *                    required.
      */
     void (*prompt) (const char *prompt, bool echo, char **dest);
 };
 
 /*!
  * \internal
  * \brief Call a formatting function for a previously registered message.
  *
  * \note This function is for implementing custom formatters.  It should not
  *       be called directly.  Instead, call out->message.
  *
  * \param[in,out] out        The output functions structure.
  * \param[in]     message_id The message to be handled.  Unknown messages
  *                           will be ignored.
  * \param[in]     ...        Arguments to be passed to the registered function.
  */
 int
 pcmk__call_message(pcmk__output_t *out, const char *message_id, ...);
 
 /*!
  * \internal
  * \brief Free a ::pcmk__output_t structure that was previously created by
  *        pcmk__output_new().
  *
  * \note While the create and finish functions are designed in such a way that
  *       they can be called repeatedly, this function will completely free the
  *       memory of the object.  Once this function has been called, producing
  *       more output requires starting over from pcmk__output_new().
  *
  * \param[in,out] out         The output structure.
  */
 void pcmk__output_free(pcmk__output_t *out);
 
 /*!
  * \internal
  * \brief Create a new ::pcmk__output_t structure.
  *
  * \param[in,out] out      The destination of the new ::pcmk__output_t.
  * \param[in]     fmt_name How should output be formatted?
  * \param[in]     filename Where should formatted output be written to?  This
  *                         can be a filename (which will be overwritten if it
  *                         already exists), or NULL or "-" for stdout.  For no
  *                         output, pass a filename of "/dev/null".
  * \param[in]     argv     The list of command line arguments.
  *
  * \return Standard Pacemaker return code
  */
 int pcmk__output_new(pcmk__output_t **out, const char *fmt_name,
                      const char *filename, char **argv);
 
 /*!
  * \internal
  * \brief Register a new output formatter, making it available for use
  *        the same as a base formatter.
  *
  * \param[in,out] group   A ::GOptionGroup that formatted output related command
  *                        line arguments should be added to.  This can be NULL
  *                        for use outside of command line programs.
  * \param[in]     name    The name of the format.  This will be used to select a
  *                        format from command line options and for displaying help.
  * \param[in]     create  A function that creates a ::pcmk__output_t.
  * \param[in]     options Format-specific command line options.  These will be
  *                        added to the context.  This argument can also be NULL.
  *
  * \return Standard Pacemaker return code
  */
 int
 pcmk__register_format(GOptionGroup *group, const char *name,
                       pcmk__output_factory_t create,
                       const GOptionEntry *options);
 
 /*!
  * \internal
  * \brief Register an entire table of output formatters at once.
  *
  * \param[in,out] group A ::GOptionGroup that formatted output related command
  *                      line arguments should be added to.  This can be NULL
  *                      for use outside of command line programs.
  * \param[in]     table An array of ::pcmk__supported_format_t which should
  *                      all be registered.  This array must be NULL-terminated.
  *
  */
 void
 pcmk__register_formats(GOptionGroup *group,
                        const pcmk__supported_format_t *table);
 
 /*!
  * \internal
  * \brief Unregister a previously registered table of custom formatting
  *        functions and destroy the internal data structures associated with them.
  */
 void
 pcmk__unregister_formats(void);
 
 /*!
  * \internal
  * \brief Register a function to handle a custom message.
  *
  * \note This function is for implementing custom formatters.  It should not
  *       be called directly.  Instead, call out->register_message.
  *
  * \param[in,out] out        The output functions structure.
  * \param[in]     message_id The message to be handled.
  * \param[in]     fn         The custom format function to call for message_id.
  */
 void
 pcmk__register_message(pcmk__output_t *out, const char *message_id,
                        pcmk__message_fn_t fn);
 
 /*!
  * \internal
  * \brief Register an entire table of custom formatting functions at once.
  *
  * This table can contain multiple formatting functions for the same message ID
  * if they are for different format types.
  *
  * \param[in,out] out   The output functions structure.
  * \param[in]     table An array of ::pcmk__message_entry_t values which should
  *                      all be registered.  This array must be NULL-terminated.
  */
 void
 pcmk__register_messages(pcmk__output_t *out,
                         const pcmk__message_entry_t *table);
 
 /* Functions that are useful for implementing custom message formatters */
 
 /*!
  * \internal
  * \brief A printf-like function.
  *
  * This function writes to out->dest and indents the text to the current level
  * of the text formatter's nesting.  This function should be used when implementing
  * custom message functions for the text output format.  It should not be used
  * for any other purpose.
  *
  * Typically, this function should be used instead of printf.
  *
  * \param[in,out] out    The output functions structure.
  * \param[in]     format The format string.
  * \param[in]     ...    Arguments to be passed to the format string.
  */
 void
 pcmk__indented_printf(pcmk__output_t *out, const char *format, ...) G_GNUC_PRINTF(2, 3);
 
 /*!
  * \internal
  * \brief A vprintf-like function.
  *
  * This function is like pcmk__indented_printf(), except it takes a va_list instead
  * of a list of arguments.  This function should be used when implementing custom
  * functions for the text output format.  It should not be used for any other purpose.
  *
  * Typically, this function should be used instead of vprintf.
  *
  * \param[in,out] out    The output functions structure.
  * \param[in]     format The format string.
  * \param[in]     args   A list of arguments to apply to the format string.
  */
 void
 pcmk__indented_vprintf(pcmk__output_t *out, const char *format, va_list args) G_GNUC_PRINTF(2, 0);
 
 
 /*!
  * \internal
  * \brief A printf-like function.
  *
  * This function writes to out->dest without indenting the text.  This function
  * should be used when implementing custom message functions for the text output
  * format.  It should not be used for any other purpose.
  *
  * \param[in,out] out    The output functions structure.
  * \param[in]     format The format string.
  * \param[in]     ...    Arguments to be passed to the format string.
  */
 void
 pcmk__formatted_printf(pcmk__output_t *out, const char *format, ...) G_GNUC_PRINTF(2, 3);
 
 /*!
  * \internal
  * \brief A vprintf-like function.
  *
  * This function is like pcmk__formatted_printf(), except it takes a va_list instead
  * of a list of arguments.  This function should be used when implementing custom
  * message functions for the text output format.  It should not be used for any
  * other purpose.
  *
  * \param[in,out] out    The output functions structure.
  * \param[in]     format The format string.
  * \param[in]     args   A list of arguments to apply to the format string.
  */
 void
 pcmk__formatted_vprintf(pcmk__output_t *out, const char *format, va_list args) G_GNUC_PRINTF(2, 0);
 
 /*!
  * \internal
  * \brief Prompt the user for input.
  *
  * \param[in]  prompt The prompt to display
  * \param[in]  echo   If true, echo the user's input to the screen.  Set
  *                    to false for password entry.
  * \param[out] dest   Where to store the user's response.
  */
 void
 pcmk__text_prompt(const char *prompt, bool echo, char **dest);
 
 /*!
  * \internal
  * \brief Get the log level used by the formatted output logger
  *
  * \param[in] out  Output object
  *
  * \return Log level used by \p out
  */
 int
 pcmk__output_get_log_level(const pcmk__output_t *out);
 
 /*!
  * \internal
  * \brief Set the log level used by the formatted output logger.
  *
  * \param[in,out] out       The output functions structure.
  * \param[in]     log_level The log level constant (LOG_INFO, LOG_ERR, etc.)
  *                          to use.
  *
  * \note By default, LOG_INFO is used.
  * \note Almost all formatted output messages will respect this setting.
  *       However, out->err will always log at LOG_ERR.
  */
 void
 pcmk__output_set_log_level(pcmk__output_t *out, int log_level);
 
 /*!
  * \internal
  * \brief Create and return a new XML node with the given name, as a child of the
  *        current list parent.  The new node is then added as the new list parent,
  *        meaning all subsequent nodes will be its children.  This is used when
  *        implementing custom functions.
  *
  * \param[in,out] out  The output functions structure.
  * \param[in]     name The name of the node to be created.
  * \param[in]     ...     Name/value pairs to set as XML properties.
  */
 xmlNodePtr
 pcmk__output_xml_create_parent(pcmk__output_t *out, const char *name, ...)
 G_GNUC_NULL_TERMINATED;
 
 /*!
  * \internal
  * \brief Add a copy of the given node as a child of the current list parent.
  *        This is used when implementing custom message functions.
  *
  * \param[in,out] out  The output functions structure.
  * \param[in]     node An XML node to copy as a child.
  */
 void
 pcmk__output_xml_add_node_copy(pcmk__output_t *out, xmlNodePtr node);
 
 /*!
  * \internal
  * \brief Create and return a new XML node with the given name, as a child of the
  *        current list parent.  This is used when implementing custom functions.
  *
  * \param[in,out] out  The output functions structure.
  * \param[in]     name The name of the node to be created.
  * \param[in]     ...     Name/value pairs to set as XML properties.
  */
 xmlNodePtr
 pcmk__output_create_xml_node(pcmk__output_t *out, const char *name, ...)
 G_GNUC_NULL_TERMINATED;
 
 /*!
  * \internal
  * \brief Like pcmk__output_create_xml_node(), but add the given text content to the
  *        new node.
  *
  * \param[in,out] out     The output functions structure.
  * \param[in]     name    The name of the node to be created.
  * \param[in]     content The text content of the node.
  */
 xmlNodePtr
 pcmk__output_create_xml_text_node(pcmk__output_t *out, const char *name, const char *content);
 
 /*!
  * \internal
  * \brief Push a parent XML node onto the stack.  This is used when implementing
  *        custom message functions.
  *
  * The XML output formatter maintains an internal stack to keep track of which nodes
  * are parents in order to build up the tree structure.  This function can be used
  * to temporarily push a new node onto the stack.  After calling this function, any
  * other formatting functions will have their nodes added as children of this new
  * parent.
  *
  * \param[in,out] out     The output functions structure
  * \param[in]     parent  XML node to add
  */
 void
 pcmk__output_xml_push_parent(pcmk__output_t *out, xmlNodePtr parent);
 
 /*!
  * \internal
  * \brief Pop a parent XML node onto the stack.  This is used when implementing
  *        custom message functions.
  *
  * This function removes a parent node from the stack.  See pcmk__xml_push_parent()
  * for more details.
  *
  * \note Little checking is done with this function.  Be sure you only pop parents
  * that were previously pushed.  In general, it is best to keep the code between
  * push and pop simple.
  *
  * \param[in,out] out The output functions structure.
  */
 void
 pcmk__output_xml_pop_parent(pcmk__output_t *out);
 
 /*!
  * \internal
  * \brief Peek a parent XML node onto the stack.  This is used when implementing
  *        custom message functions.
  *
  * This function peeks a parent node on stack.  See pcmk__xml_push_parent()
  * for more details. It has no side-effect and can be called for an empty stack.
  *
  * \note Little checking is done with this function.
  *
  * \param[in,out] out The output functions structure.
  *
  * \return NULL if stack is empty, otherwise the parent of the stack.
  */
 xmlNodePtr
 pcmk__output_xml_peek_parent(pcmk__output_t *out);
 
 /*!
  * \internal
  * \brief Create a new XML node consisting of the provided text inside an HTML
  *        element node of the given name.
  *
  * \param[in,out] out          The output functions structure.
  * \param[in]     element_name The name of the new HTML element.
  * \param[in]     id           The CSS ID selector to apply to this element.
  *                             If NULL, no ID is added.
  * \param[in]     class_name   The CSS class selector to apply to this element.
  *                             If NULL, no class is added.
  * \param[in]     text         The text content of the node.
  */
 xmlNodePtr
 pcmk__output_create_html_node(pcmk__output_t *out, const char *element_name, const char *id,
                               const char *class_name, const char *text);
 
 /*!
  * \internal
  * \brief Add an HTML tag to the <head> section.
  *
  * The arguments after name are a NULL-terminated list of keys and values,
  * all of which will be added as attributes to the given tag.  For instance,
  * the following code would generate the tag "<meta http-equiv='refresh' content='19'>":
  *
  * \code
  * pcmk__html_add_header("meta", "http-equiv", "refresh", "content", "19", NULL);
  * \endcode
  *
  * \param[in]     name   The HTML tag for the new node.
  * \param[in]     ...    A NULL-terminated key/value list of attributes.
  */
 void
 pcmk__html_add_header(const char *name, ...)
 G_GNUC_NULL_TERMINATED;
 
 /*!
  * \internal
  * \brief Handle end-of-program error reporting
  *
  * \param[in,out] error A GError object potentially containing some error.
  *                      If NULL, do nothing.
  * \param[in,out] out   The output functions structure.  If NULL, any errors
  *                      will simply be printed to stderr.
  */
 void pcmk__output_and_clear_error(GError *error, pcmk__output_t *out);
 
 int pcmk__xml_output_new(pcmk__output_t **out, xmlNodePtr *xml);
 void pcmk__xml_output_finish(pcmk__output_t *out, xmlNodePtr *xml);
 int pcmk__log_output_new(pcmk__output_t **out);
 int pcmk__text_output_new(pcmk__output_t **out, const char *filename);
 
 #if defined(PCMK__UNIT_TESTING)
 /* If we are building libcrmcommon_test.a, add this accessor function so we can
  * inspect the internal formatters hash table.
  */
 GHashTable *pcmk__output_formatters(void);
 #endif
 
 #define PCMK__OUTPUT_SPACER_IF(out_obj, cond)   \
     if (cond) {                                 \
         out->spacer(out);                       \
     }
 
 #define PCMK__OUTPUT_LIST_HEADER(out_obj, cond, retcode, title...)  \
     if (retcode == pcmk_rc_no_output) {                             \
         PCMK__OUTPUT_SPACER_IF(out_obj, cond);                      \
         retcode = pcmk_rc_ok;                                       \
         out_obj->begin_list(out_obj, NULL, NULL, title);            \
     }
 
 #define PCMK__OUTPUT_LIST_FOOTER(out_obj, retcode)  \
     if (retcode == pcmk_rc_ok) {                    \
         out_obj->end_list(out_obj);                 \
     }
 
 #ifdef __cplusplus
 }
 #endif
 
 #endif
diff --git a/include/crm/pengine/internal.h b/include/crm/pengine/internal.h
index 0bd66db03a..7451aa5d58 100644
--- a/include/crm/pengine/internal.h
+++ b/include/crm/pengine/internal.h
@@ -1,707 +1,709 @@
 /*
  * Copyright 2004-2023 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #ifndef PE_INTERNAL__H
 #  define PE_INTERNAL__H
 
 #  include <stdint.h>
 #  include <string.h>
 #  include <crm/pengine/status.h>
 #  include <crm/pengine/remote_internal.h>
 #  include <crm/common/internal.h>
 #  include <crm/common/options_internal.h>
 #  include <crm/common/output_internal.h>
 
+const char *pe__resource_description(const pe_resource_t *rsc, uint32_t show_opts);
+
 enum pe__clone_flags {
     // Whether instances should be started sequentially
     pe__clone_ordered               = (1 << 0),
 
     // Whether promotion scores have been added
     pe__clone_promotion_added       = (1 << 1),
 
     // Whether promotion constraints have been added
     pe__clone_promotion_constrained = (1 << 2),
 };
 
 bool pe__clone_is_ordered(const pe_resource_t *clone);
 int pe__set_clone_flag(pe_resource_t *clone, enum pe__clone_flags flag);
 
 
 enum pe__group_flags {
     pe__group_ordered       = (1 << 0), // Members start sequentially
     pe__group_colocated     = (1 << 1), // Members must be on same node
 };
 
 bool pe__group_flag_is_set(const pe_resource_t *group, uint32_t flags);
 pe_resource_t *pe__last_group_member(const pe_resource_t *group);
 
 
 #  define pe_rsc_info(rsc, fmt, args...)  crm_log_tag(LOG_INFO,  rsc ? rsc->id : "<NULL>", fmt, ##args)
 #  define pe_rsc_debug(rsc, fmt, args...) crm_log_tag(LOG_DEBUG, rsc ? rsc->id : "<NULL>", fmt, ##args)
 #  define pe_rsc_trace(rsc, fmt, args...) crm_log_tag(LOG_TRACE, rsc ? rsc->id : "<NULL>", fmt, ##args)
 
 #  define pe_err(fmt...) do {           \
         was_processing_error = TRUE;    \
         pcmk__config_err(fmt);          \
     } while (0)
 
 #  define pe_warn(fmt...) do {          \
         was_processing_warning = TRUE;  \
         pcmk__config_warn(fmt);         \
     } while (0)
 
 #  define pe_proc_err(fmt...) { was_processing_error = TRUE; crm_err(fmt); }
 #  define pe_proc_warn(fmt...) { was_processing_warning = TRUE; crm_warn(fmt); }
 
 #define pe__set_working_set_flags(working_set, flags_to_set) do {           \
         (working_set)->flags = pcmk__set_flags_as(__func__, __LINE__,       \
             LOG_TRACE, "Working set", crm_system_name,                      \
             (working_set)->flags, (flags_to_set), #flags_to_set);           \
     } while (0)
 
 #define pe__clear_working_set_flags(working_set, flags_to_clear) do {       \
         (working_set)->flags = pcmk__clear_flags_as(__func__, __LINE__,     \
             LOG_TRACE, "Working set", crm_system_name,                      \
             (working_set)->flags, (flags_to_clear), #flags_to_clear);       \
     } while (0)
 
 #define pe__set_resource_flags(resource, flags_to_set) do {                 \
         (resource)->flags = pcmk__set_flags_as(__func__, __LINE__,          \
             LOG_TRACE, "Resource", (resource)->id, (resource)->flags,       \
             (flags_to_set), #flags_to_set);                                 \
     } while (0)
 
 #define pe__clear_resource_flags(resource, flags_to_clear) do {             \
         (resource)->flags = pcmk__clear_flags_as(__func__, __LINE__,        \
             LOG_TRACE, "Resource", (resource)->id, (resource)->flags,       \
             (flags_to_clear), #flags_to_clear);                             \
     } while (0)
 
 #define pe__set_action_flags(action, flags_to_set) do {                     \
         (action)->flags = pcmk__set_flags_as(__func__, __LINE__,            \
                                              LOG_TRACE,                     \
                                              "Action", (action)->uuid,      \
                                              (action)->flags,               \
                                              (flags_to_set),                \
                                              #flags_to_set);                \
     } while (0)
 
 #define pe__clear_action_flags(action, flags_to_clear) do {                 \
         (action)->flags = pcmk__clear_flags_as(__func__, __LINE__,          \
                                                LOG_TRACE,                   \
                                                "Action", (action)->uuid,    \
                                                (action)->flags,             \
                                                (flags_to_clear),            \
                                                #flags_to_clear);            \
     } while (0)
 
 #define pe__set_raw_action_flags(action_flags, action_name, flags_to_set) do { \
         action_flags = pcmk__set_flags_as(__func__, __LINE__,               \
                                           LOG_TRACE, "Action", action_name, \
                                           (action_flags),                   \
                                           (flags_to_set), #flags_to_set);   \
     } while (0)
 
 #define pe__clear_raw_action_flags(action_flags, action_name, flags_to_clear) do { \
         action_flags = pcmk__clear_flags_as(__func__, __LINE__,             \
                                             LOG_TRACE,                      \
                                             "Action", action_name,          \
                                             (action_flags),                 \
                                             (flags_to_clear),               \
                                             #flags_to_clear);               \
     } while (0)
 
 #define pe__set_action_flags_as(function, line, action, flags_to_set) do {  \
         (action)->flags = pcmk__set_flags_as((function), (line),            \
                                              LOG_TRACE,                     \
                                              "Action", (action)->uuid,      \
                                              (action)->flags,               \
                                              (flags_to_set),                \
                                              #flags_to_set);                \
     } while (0)
 
 #define pe__clear_action_flags_as(function, line, action, flags_to_clear) do { \
         (action)->flags = pcmk__clear_flags_as((function), (line),          \
                                                LOG_TRACE,                   \
                                                "Action", (action)->uuid,    \
                                                (action)->flags,             \
                                                (flags_to_clear),            \
                                                #flags_to_clear);            \
     } while (0)
 
 #define pe__set_order_flags(order_flags, flags_to_set) do {                 \
         order_flags = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE,     \
                                          "Ordering", "constraint",          \
                                          order_flags, (flags_to_set),       \
                                          #flags_to_set);                    \
     } while (0)
 
 #define pe__clear_order_flags(order_flags, flags_to_clear) do {               \
         order_flags = pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE,     \
                                            "Ordering", "constraint",          \
                                            order_flags, (flags_to_clear),     \
                                            #flags_to_clear);                  \
     } while (0)
 
 // Some warnings we don't want to print every transition
 
 enum pe_warn_once_e {
     pe_wo_blind         = (1 << 0),
     pe_wo_restart_type  = (1 << 1),
     pe_wo_role_after    = (1 << 2),
     pe_wo_poweroff      = (1 << 3),
     pe_wo_require_all   = (1 << 4),
     pe_wo_order_score   = (1 << 5),
     pe_wo_neg_threshold = (1 << 6),
     pe_wo_remove_after  = (1 << 7),
     pe_wo_ping_node     = (1 << 8),
     pe_wo_order_inst    = (1 << 9),
     pe_wo_coloc_inst    = (1 << 10),
     pe_wo_group_order   = (1 << 11),
     pe_wo_group_coloc   = (1 << 12),
 };
 
 extern uint32_t pe_wo;
 
 #define pe_warn_once(pe_wo_bit, fmt...) do {    \
         if (!pcmk_is_set(pe_wo, pe_wo_bit)) {  \
             if (pe_wo_bit == pe_wo_blind) {     \
                 crm_warn(fmt);                  \
             } else {                            \
                 pe_warn(fmt);                   \
             }                                   \
             pe_wo = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE,       \
                                       "Warn-once", "logging", pe_wo,        \
                                       (pe_wo_bit), #pe_wo_bit);             \
         }                                       \
     } while (0);
 
 
 typedef struct pe__location_constraint_s {
     char *id;                           // Constraint XML ID
     pe_resource_t *rsc_lh;              // Resource being located
     enum rsc_role_e role_filter;        // Role to locate
     enum pe_discover_e discover_mode;   // Resource discovery
     GList *node_list_rh;              // List of pe_node_t*
 } pe__location_t;
 
 typedef struct pe__order_constraint_s {
     int id;
     uint32_t flags; // Group of enum pe_ordering flags
 
     void *lh_opaque;
     pe_resource_t *lh_rsc;
     pe_action_t *lh_action;
     char *lh_action_task;
 
     void *rh_opaque;
     pe_resource_t *rh_rsc;
     pe_action_t *rh_action;
     char *rh_action_task;
 } pe__ordering_t;
 
 typedef struct notify_data_s {
     GSList *keys;               // Environment variable name/value pairs
 
     const char *action;
 
     pe_action_t *pre;
     pe_action_t *post;
     pe_action_t *pre_done;
     pe_action_t *post_done;
 
     GList *active;            /* notify_entry_t*  */
     GList *inactive;          /* notify_entry_t*  */
     GList *start;             /* notify_entry_t*  */
     GList *stop;              /* notify_entry_t*  */
     GList *demote;            /* notify_entry_t*  */
     GList *promote;           /* notify_entry_t*  */
     GList *promoted;          /* notify_entry_t*  */
     GList *unpromoted;        /* notify_entry_t*  */
     GHashTable *allowed_nodes;
 
 } notify_data_t;
 
 const pe_resource_t *pe__const_top_resource(const pe_resource_t *rsc,
                                             bool include_bundle);
 
 int pe__clone_max(const pe_resource_t *clone);
 int pe__clone_promoted_max(const pe_resource_t *clone);
 int pe__clone_promoted_node_max(const pe_resource_t *clone);
 
 pe_action_t *pe__new_rsc_pseudo_action(pe_resource_t *rsc, const char *task,
                                        bool optional, bool runnable);
 
 void pe__create_promotable_pseudo_ops(pe_resource_t *clone, bool any_promoting,
                                       bool any_demoting);
 
 bool pe_can_fence(const pe_working_set_t *data_set, const pe_node_t *node);
 
 void add_hash_param(GHashTable * hash, const char *name, const char *value);
 
 char *native_parameter(pe_resource_t * rsc, pe_node_t * node, gboolean create, const char *name,
                        pe_working_set_t * data_set);
 pe_node_t *native_location(const pe_resource_t *rsc, GList **list, int current);
 
 void pe_metadata(pcmk__output_t *out);
 void verify_pe_options(GHashTable * options);
 
 void common_update_score(pe_resource_t * rsc, const char *id, int score);
 void native_add_running(pe_resource_t * rsc, pe_node_t * node, pe_working_set_t * data_set, gboolean failed);
 
 gboolean native_unpack(pe_resource_t * rsc, pe_working_set_t * data_set);
 gboolean group_unpack(pe_resource_t * rsc, pe_working_set_t * data_set);
 gboolean clone_unpack(pe_resource_t * rsc, pe_working_set_t * data_set);
 gboolean pe__unpack_bundle(pe_resource_t *rsc, pe_working_set_t *data_set);
 
 pe_resource_t *native_find_rsc(pe_resource_t *rsc, const char *id, const pe_node_t *node,
                                int flags);
 
 gboolean native_active(pe_resource_t * rsc, gboolean all);
 gboolean group_active(pe_resource_t * rsc, gboolean all);
 gboolean clone_active(pe_resource_t * rsc, gboolean all);
 gboolean pe__bundle_active(pe_resource_t *rsc, gboolean all);
 
 //! \deprecated This function will be removed in a future release
 void native_print(pe_resource_t *rsc, const char *pre_text, long options,
                   void *print_data);
 
 //! \deprecated This function will be removed in a future release
 void group_print(pe_resource_t *rsc, const char *pre_text, long options,
                  void *print_data);
 
 //! \deprecated This function will be removed in a future release
 void clone_print(pe_resource_t *rsc, const char *pre_text, long options,
                  void *print_data);
 
 //! \deprecated This function will be removed in a future release
 void pe__print_bundle(pe_resource_t *rsc, const char *pre_text, long options,
                       void *print_data);
 
 gchar *pcmk__native_output_string(const pe_resource_t *rsc, const char *name,
                                   const pe_node_t *node, uint32_t show_opts,
                                   const char *target_role, bool show_nodes);
 
 int pe__name_and_nvpairs_xml(pcmk__output_t *out, bool is_list, const char *tag_name
                          , size_t pairs_count, ...);
 char *pe__node_display_name(pe_node_t *node, bool print_detail);
 
 
 // Clone notifications (pe_notif.c)
 void pe__create_notifications(pe_resource_t *rsc, notify_data_t *n_data);
 notify_data_t *pe__clone_notif_pseudo_ops(pe_resource_t *rsc, const char *task,
                                           pe_action_t *action,
                                           pe_action_t *complete);
 void pe__free_notification_data(notify_data_t *n_data);
 void pe__order_notifs_after_fencing(const pe_action_t *action,
                                     pe_resource_t *rsc,
                                     pe_action_t *stonith_op);
 
 
 static inline const char *
 pe__rsc_bool_str(const pe_resource_t *rsc, uint64_t rsc_flag)
 {
     return pcmk__btoa(pcmk_is_set(rsc->flags, rsc_flag));
 }
 
 int pe__clone_xml(pcmk__output_t *out, va_list args);
 int pe__clone_default(pcmk__output_t *out, va_list args);
 int pe__group_xml(pcmk__output_t *out, va_list args);
 int pe__group_default(pcmk__output_t *out, va_list args);
 int pe__bundle_xml(pcmk__output_t *out, va_list args);
 int pe__bundle_html(pcmk__output_t *out, va_list args);
 int pe__bundle_text(pcmk__output_t *out, va_list args);
 int pe__node_html(pcmk__output_t *out, va_list args);
 int pe__node_text(pcmk__output_t *out, va_list args);
 int pe__node_xml(pcmk__output_t *out, va_list args);
 int pe__resource_xml(pcmk__output_t *out, va_list args);
 int pe__resource_html(pcmk__output_t *out, va_list args);
 int pe__resource_text(pcmk__output_t *out, va_list args);
 
 void native_free(pe_resource_t * rsc);
 void group_free(pe_resource_t * rsc);
 void clone_free(pe_resource_t * rsc);
 void pe__free_bundle(pe_resource_t *rsc);
 
 enum rsc_role_e native_resource_state(const pe_resource_t * rsc, gboolean current);
 enum rsc_role_e group_resource_state(const pe_resource_t * rsc, gboolean current);
 enum rsc_role_e clone_resource_state(const pe_resource_t * rsc, gboolean current);
 enum rsc_role_e pe__bundle_resource_state(const pe_resource_t *rsc,
                                           gboolean current);
 
 void pe__count_common(pe_resource_t *rsc);
 void pe__count_bundle(pe_resource_t *rsc);
 
 void common_free(pe_resource_t * rsc);
 
 pe_node_t *pe__copy_node(const pe_node_t *this_node);
 extern time_t get_effective_time(pe_working_set_t * data_set);
 
 /* Failure handling utilities (from failcounts.c) */
 
 // bit flags for fail count handling options
 enum pe_fc_flags_e {
     pe_fc_default   = (1 << 0),
     pe_fc_effective = (1 << 1), // don't count expired failures
     pe_fc_fillers   = (1 << 2), // if container, include filler failures in count
 };
 
 int pe_get_failcount(const pe_node_t *node, pe_resource_t *rsc,
                      time_t *last_failure, uint32_t flags,
                      const xmlNode *xml_op);
 
 pe_action_t *pe__clear_failcount(pe_resource_t *rsc, const pe_node_t *node,
                                  const char *reason,
                                  pe_working_set_t *data_set);
 
 /* Functions for finding/counting a resource's active nodes */
 
 pe_node_t *pe__find_active_on(const pe_resource_t *rsc,
                               unsigned int *count_all,
                               unsigned int *count_clean);
 pe_node_t *pe__find_active_requires(const pe_resource_t *rsc,
                                     unsigned int *count);
 
 static inline pe_node_t *
 pe__current_node(const pe_resource_t *rsc)
 {
     return pe__find_active_on(rsc, NULL, NULL);
 }
 
 
 /* Binary like operators for lists of nodes */
 extern void node_list_exclude(GHashTable * list, GList *list2, gboolean merge_scores);
 
 GHashTable *pe__node_list2table(const GList *list);
 
 static inline gpointer
 pe_hash_table_lookup(GHashTable * hash, gconstpointer key)
 {
     if (hash) {
         return g_hash_table_lookup(hash, key);
     }
     return NULL;
 }
 
 extern pe_action_t *get_pseudo_op(const char *name, pe_working_set_t * data_set);
 extern gboolean order_actions(pe_action_t * lh_action, pe_action_t * rh_action, enum pe_ordering order);
 
 void pe__show_node_weights_as(const char *file, const char *function,
                               int line, bool to_log, const pe_resource_t *rsc,
                               const char *comment, GHashTable *nodes,
                               pe_working_set_t *data_set);
 
 #define pe__show_node_weights(level, rsc, text, nodes, data_set)    \
         pe__show_node_weights_as(__FILE__, __func__, __LINE__,      \
                                  (level), (rsc), (text), (nodes), (data_set))
 
 xmlNode *find_rsc_op_entry(const pe_resource_t *rsc, const char *key);
 
 pe_action_t *custom_action(pe_resource_t *rsc, char *key, const char *task,
                            const pe_node_t *on_node, gboolean optional,
                            gboolean foo, pe_working_set_t *data_set);
 
 #  define delete_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_DELETE, 0)
 #  define delete_action(rsc, node, optional) custom_action(		\
 		rsc, delete_key(rsc), CRMD_ACTION_DELETE, node,		\
 		optional, TRUE, rsc->cluster);
 
 #  define stopped_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_STOPPED, 0)
 #  define stopped_action(rsc, node, optional) custom_action(		\
 		rsc, stopped_key(rsc), CRMD_ACTION_STOPPED, node,	\
 		optional, TRUE, rsc->cluster);
 
 #  define stop_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_STOP, 0)
 #  define stop_action(rsc, node, optional) custom_action(			\
 		rsc, stop_key(rsc), CRMD_ACTION_STOP, node,		\
 		optional, TRUE, rsc->cluster);
 
 #  define reload_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_RELOAD_AGENT, 0)
 #  define start_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_START, 0)
 #  define start_action(rsc, node, optional) custom_action(		\
 		rsc, start_key(rsc), CRMD_ACTION_START, node,		\
 		optional, TRUE, rsc->cluster)
 
 #  define started_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_STARTED, 0)
 #  define started_action(rsc, node, optional) custom_action(		\
 		rsc, started_key(rsc), CRMD_ACTION_STARTED, node,	\
 		optional, TRUE, rsc->cluster)
 
 #  define promote_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_PROMOTE, 0)
 #  define promote_action(rsc, node, optional) custom_action(		\
 		rsc, promote_key(rsc), CRMD_ACTION_PROMOTE, node,	\
 		optional, TRUE, rsc->cluster)
 
 #  define promoted_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_PROMOTED, 0)
 #  define promoted_action(rsc, node, optional) custom_action(		\
 		rsc, promoted_key(rsc), CRMD_ACTION_PROMOTED, node,	\
 		optional, TRUE, rsc->cluster)
 
 #  define demote_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_DEMOTE, 0)
 #  define demote_action(rsc, node, optional) custom_action(		\
 		rsc, demote_key(rsc), CRMD_ACTION_DEMOTE, node,		\
 		optional, TRUE, rsc->cluster)
 
 #  define demoted_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_DEMOTED, 0)
 #  define demoted_action(rsc, node, optional) custom_action(		\
 		rsc, demoted_key(rsc), CRMD_ACTION_DEMOTED, node,	\
 		optional, TRUE, rsc->cluster)
 
 extern int pe_get_configured_timeout(pe_resource_t *rsc, const char *action,
                                      pe_working_set_t *data_set);
 
 pe_action_t *find_first_action(const GList *input, const char *uuid,
                                const char *task, const pe_node_t *on_node);
 
 enum action_tasks get_complex_task(const pe_resource_t *rsc, const char *name);
 
 extern GList *find_actions(GList *input, const char *key, const pe_node_t *on_node);
 GList *find_actions_exact(GList *input, const char *key,
                           const pe_node_t *on_node);
 GList *pe__resource_actions(const pe_resource_t *rsc, const pe_node_t *node,
                             const char *task, bool require_node);
 
 extern void pe_free_action(pe_action_t * action);
 
 void resource_location(pe_resource_t *rsc, const pe_node_t *node, int score,
                        const char *tag, pe_working_set_t *data_set);
 
 extern int pe__is_newer_op(const xmlNode *xml_a, const xmlNode *xml_b,
                            bool same_node_default);
 extern gint sort_op_by_callid(gconstpointer a, gconstpointer b);
 gboolean get_target_role(const pe_resource_t *rsc, enum rsc_role_e *role);
 void pe__set_next_role(pe_resource_t *rsc, enum rsc_role_e role,
                        const char *why);
 
 pe_resource_t *find_clone_instance(const pe_resource_t *rsc,
                                    const char *sub_id);
 
 extern void destroy_ticket(gpointer data);
 extern pe_ticket_t *ticket_new(const char *ticket_id, pe_working_set_t * data_set);
 
 // Resources for manipulating resource names
 const char *pe_base_name_end(const char *id);
 char *clone_strip(const char *last_rsc_id);
 char *clone_zero(const char *last_rsc_id);
 
 static inline bool
 pe_base_name_eq(const pe_resource_t *rsc, const char *id)
 {
     if (id && rsc && rsc->id) {
         // Number of characters in rsc->id before any clone suffix
         size_t base_len = pe_base_name_end(rsc->id) - rsc->id + 1;
 
         return (strlen(id) == base_len) && !strncmp(id, rsc->id, base_len);
     }
     return false;
 }
 
 int pe__target_rc_from_xml(const xmlNode *xml_op);
 
 gint pe__cmp_node_name(gconstpointer a, gconstpointer b);
 bool is_set_recursive(const pe_resource_t *rsc, long long flag, bool any);
 
 enum rsc_digest_cmp_val {
     /*! Digests are the same */
     RSC_DIGEST_MATCH = 0,
     /*! Params that require a restart changed */
     RSC_DIGEST_RESTART,
     /*! Some parameter changed.  */
     RSC_DIGEST_ALL,
     /*! rsc op didn't have a digest associated with it, so
      *  it is unknown if parameters changed or not. */
     RSC_DIGEST_UNKNOWN,
 };
 
 typedef struct op_digest_cache_s {
     enum rsc_digest_cmp_val rc;
     xmlNode *params_all;
     xmlNode *params_secure;
     xmlNode *params_restart;
     char *digest_all_calc;
     char *digest_secure_calc;
     char *digest_restart_calc;
 } op_digest_cache_t;
 
 op_digest_cache_t *pe__calculate_digests(pe_resource_t *rsc, const char *task,
                                          guint *interval_ms,
                                          const pe_node_t *node,
                                          const xmlNode *xml_op,
                                          GHashTable *overrides,
                                          bool calc_secure,
                                          pe_working_set_t *data_set);
 
 void pe__free_digests(gpointer ptr);
 
 op_digest_cache_t *rsc_action_digest_cmp(pe_resource_t *rsc,
                                          const xmlNode *xml_op,
                                          pe_node_t *node,
                                          pe_working_set_t *data_set);
 
 pe_action_t *pe_fence_op(pe_node_t *node, const char *op, bool optional,
                          const char *reason, bool priority_delay,
                          pe_working_set_t *data_set);
 void trigger_unfencing(pe_resource_t *rsc, pe_node_t *node,
                        const char *reason, pe_action_t *dependency,
                        pe_working_set_t *data_set);
 
 char *pe__action2reason(const pe_action_t *action, enum pe_action_flags flag);
 void pe_action_set_reason(pe_action_t *action, const char *reason, bool overwrite);
 void pe__add_action_expected_result(pe_action_t *action, int expected_result);
 
 void pe__set_resource_flags_recursive(pe_resource_t *rsc, uint64_t flags);
 void pe__clear_resource_flags_recursive(pe_resource_t *rsc, uint64_t flags);
 void pe__clear_resource_flags_on_all(pe_working_set_t *data_set, uint64_t flag);
 
 gboolean add_tag_ref(GHashTable * tags, const char * tag_name,  const char * obj_ref);
 
 //! \deprecated This function will be removed in a future release
 void print_rscs_brief(GList *rsc_list, const char * pre_text, long options,
                       void * print_data, gboolean print_all);
 int pe__rscs_brief_output(pcmk__output_t *out, GList *rsc_list, unsigned int options);
 void pe_fence_node(pe_working_set_t * data_set, pe_node_t * node, const char *reason, bool priority_delay);
 
 pe_node_t *pe_create_node(const char *id, const char *uname, const char *type,
                           const char *score, pe_working_set_t * data_set);
 
 //! \deprecated This function will be removed in a future release
 void common_print(pe_resource_t *rsc, const char *pre_text, const char *name,
                   const pe_node_t *node, long options, void *print_data);
 int pe__common_output_text(pcmk__output_t *out, const pe_resource_t *rsc,
                            const char *name, const pe_node_t *node,
                            unsigned int options);
 int pe__common_output_html(pcmk__output_t *out, const pe_resource_t *rsc,
                            const char *name, const pe_node_t *node,
                            unsigned int options);
 pe_resource_t *pe__find_bundle_replica(const pe_resource_t *bundle,
                                        const pe_node_t *node);
 bool pe__bundle_needs_remote_name(pe_resource_t *rsc);
 const char *pe__add_bundle_remote_name(pe_resource_t *rsc,
                                        pe_working_set_t *data_set,
                                        xmlNode *xml, const char *field);
 const char *pe_node_attribute_calculated(const pe_node_t *node,
                                          const char *name,
                                          const pe_resource_t *rsc);
 const char *pe_node_attribute_raw(const pe_node_t *node, const char *name);
 bool pe__is_universal_clone(const pe_resource_t *rsc,
                             const pe_working_set_t *data_set);
 void pe__add_param_check(const xmlNode *rsc_op, pe_resource_t *rsc,
                          pe_node_t *node, enum pe_check_parameters,
                          pe_working_set_t *data_set);
 void pe__foreach_param_check(pe_working_set_t *data_set,
                              void (*cb)(pe_resource_t*, pe_node_t*,
                                         const xmlNode*,
                                         enum pe_check_parameters));
 void pe__free_param_checks(pe_working_set_t *data_set);
 
 bool pe__shutdown_requested(const pe_node_t *node);
 void pe__update_recheck_time(time_t recheck, pe_working_set_t *data_set);
 
 /*!
  * \internal
  * \brief Register xml formatting message functions.
  *
  * \param[in,out] out  Output object to register messages with
  */
 void pe__register_messages(pcmk__output_t *out);
 
 void pe__unpack_dataset_nvpairs(const xmlNode *xml_obj, const char *set_name,
                                 const pe_rule_eval_data_t *rule_data,
                                 GHashTable *hash, const char *always_first,
                                 gboolean overwrite, pe_working_set_t *data_set);
 
 bool pe__resource_is_disabled(const pe_resource_t *rsc);
 pe_action_t *pe__clear_resource_history(pe_resource_t *rsc,
                                         const pe_node_t *node,
                                         pe_working_set_t *data_set);
 
 GList *pe__rscs_with_tag(pe_working_set_t *data_set, const char *tag_name);
 GList *pe__unames_with_tag(pe_working_set_t *data_set, const char *tag_name);
 bool pe__rsc_has_tag(pe_working_set_t *data_set, const char *rsc, const char *tag);
 bool pe__uname_has_tag(pe_working_set_t *data_set, const char *node, const char *tag);
 
 bool pe__rsc_running_on_any(pe_resource_t *rsc, GList *node_list);
 GList *pe__filter_rsc_list(GList *rscs, GList *filter);
 GList * pe__build_node_name_list(pe_working_set_t *data_set, const char *s);
 GList * pe__build_rsc_list(pe_working_set_t *data_set, const char *s);
 
 bool pcmk__rsc_filtered_by_node(pe_resource_t *rsc, GList *only_node);
 
 gboolean pe__bundle_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
                                 gboolean check_parent);
 gboolean pe__clone_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
                                gboolean check_parent);
 gboolean pe__group_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
                                gboolean check_parent);
 gboolean pe__native_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
                                 gboolean check_parent);
 
 xmlNode *pe__failed_probe_for_rsc(const pe_resource_t *rsc, const char *name);
 
 const char *pe__clone_child_id(const pe_resource_t *rsc);
 
 int pe__sum_node_health_scores(const pe_node_t *node, int base_health);
 int pe__node_health(pe_node_t *node);
 
 static inline enum pcmk__health_strategy
 pe__health_strategy(pe_working_set_t *data_set)
 {
     return pcmk__parse_health_strategy(pe_pref(data_set->config_hash,
                                                PCMK__OPT_NODE_HEALTH_STRATEGY));
 }
 
 static inline int
 pe__health_score(const char *option, pe_working_set_t *data_set)
 {
     return char2score(pe_pref(data_set->config_hash, option));
 }
 
 /*!
  * \internal
  * \brief Return a string suitable for logging as a node name
  *
  * \param[in] node  Node to return a node name string for
  *
  * \return Node name if available, otherwise node ID if available,
  *         otherwise "unspecified node" if node is NULL or "unidentified node"
  *         if node has neither a name nor ID.
  */
 static inline const char *
 pe__node_name(const pe_node_t *node)
 {
     if (node == NULL) {
         return "unspecified node";
 
     } else if (node->details->uname != NULL) {
         return node->details->uname;
 
     } else if (node->details->id != NULL) {
         return node->details->id;
 
     } else {
         return "unidentified node";
     }
 }
 
 /*!
  * \internal
  * \brief Check whether two node objects refer to the same node
  *
  * \param[in] node1  First node object to compare
  * \param[in] node2  Second node object to compare
  *
  * \return true if \p node1 and \p node2 refer to the same node
  */
 static inline bool
 pe__same_node(const pe_node_t *node1, const pe_node_t *node2)
 {
     return (node1 != NULL) && (node2 != NULL)
            && (node1->details == node2->details);
 }
 #endif
diff --git a/lib/pengine/bundle.c b/lib/pengine/bundle.c
index 0f906ff53c..0ac62ead2f 100644
--- a/lib/pengine/bundle.c
+++ b/lib/pengine/bundle.c
@@ -1,1853 +1,1868 @@
 /*
  * Copyright 2004-2023 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <ctype.h>
 #include <stdint.h>
 
 #include <crm/pengine/rules.h>
 #include <crm/pengine/status.h>
 #include <crm/pengine/internal.h>
 #include <crm/msg_xml.h>
 #include <crm/common/output.h>
 #include <crm/common/xml_internal.h>
 #include <pe_status_private.h>
 
 #define PE__VARIANT_BUNDLE 1
 #include "./variant.h"
 
 static char *
 next_ip(const char *last_ip)
 {
     unsigned int oct1 = 0;
     unsigned int oct2 = 0;
     unsigned int oct3 = 0;
     unsigned int oct4 = 0;
     int rc = sscanf(last_ip, "%u.%u.%u.%u", &oct1, &oct2, &oct3, &oct4);
 
     if (rc != 4) {
         /*@ TODO check for IPv6 */
         return NULL;
 
     } else if (oct3 > 253) {
         return NULL;
 
     } else if (oct4 > 253) {
         ++oct3;
         oct4 = 1;
 
     } else {
         ++oct4;
     }
 
     return crm_strdup_printf("%u.%u.%u.%u", oct1, oct2, oct3, oct4);
 }
 
 static void
 allocate_ip(pe__bundle_variant_data_t *data, pe__bundle_replica_t *replica,
             GString *buffer)
 {
     if(data->ip_range_start == NULL) {
         return;
 
     } else if(data->ip_last) {
         replica->ipaddr = next_ip(data->ip_last);
 
     } else {
         replica->ipaddr = strdup(data->ip_range_start);
     }
 
     data->ip_last = replica->ipaddr;
     switch (data->agent_type) {
         case PE__CONTAINER_AGENT_DOCKER:
         case PE__CONTAINER_AGENT_PODMAN:
             if (data->add_host) {
                 g_string_append_printf(buffer, " --add-host=%s-%d:%s",
                                        data->prefix, replica->offset,
                                        replica->ipaddr);
             } else {
                 g_string_append_printf(buffer, " --hosts-entry=%s=%s-%d",
                                        replica->ipaddr, data->prefix,
                                        replica->offset);
             }
             break;
 
         case PE__CONTAINER_AGENT_RKT:
             g_string_append_printf(buffer, " --hosts-entry=%s=%s-%d",
                                    replica->ipaddr, data->prefix,
                                    replica->offset);
             break;
 
         default: // PE__CONTAINER_AGENT_UNKNOWN
             break;
     }
 }
 
 static xmlNode *
 create_resource(const char *name, const char *provider, const char *kind)
 {
     xmlNode *rsc = create_xml_node(NULL, XML_CIB_TAG_RESOURCE);
 
     crm_xml_add(rsc, XML_ATTR_ID, name);
     crm_xml_add(rsc, XML_AGENT_ATTR_CLASS, PCMK_RESOURCE_CLASS_OCF);
     crm_xml_add(rsc, XML_AGENT_ATTR_PROVIDER, provider);
     crm_xml_add(rsc, XML_ATTR_TYPE, kind);
 
     return rsc;
 }
 
 /*!
  * \internal
  * \brief Check whether cluster can manage resource inside container
  *
  * \param[in,out] data  Container variant data
  *
  * \return TRUE if networking configuration is acceptable, FALSE otherwise
  *
  * \note The resource is manageable if an IP range or control port has been
  *       specified. If a control port is used without an IP range, replicas per
  *       host must be 1.
  */
 static bool
 valid_network(pe__bundle_variant_data_t *data)
 {
     if(data->ip_range_start) {
         return TRUE;
     }
     if(data->control_port) {
         if(data->nreplicas_per_host > 1) {
             pe_err("Specifying the 'control-port' for %s requires 'replicas-per-host=1'", data->prefix);
             data->nreplicas_per_host = 1;
             // @TODO to be sure: pe__clear_resource_flags(rsc, pe_rsc_unique);
         }
         return TRUE;
     }
     return FALSE;
 }
 
 static int
 create_ip_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
                    pe__bundle_replica_t *replica)
 {
     if(data->ip_range_start) {
         char *id = NULL;
         xmlNode *xml_ip = NULL;
         xmlNode *xml_obj = NULL;
 
         id = crm_strdup_printf("%s-ip-%s", data->prefix, replica->ipaddr);
         crm_xml_sanitize_id(id);
         xml_ip = create_resource(id, "heartbeat", "IPaddr2");
         free(id);
 
         xml_obj = create_xml_node(xml_ip, XML_TAG_ATTR_SETS);
         crm_xml_set_id(xml_obj, "%s-attributes-%d",
                        data->prefix, replica->offset);
 
         crm_create_nvpair_xml(xml_obj, NULL, "ip", replica->ipaddr);
         if(data->host_network) {
             crm_create_nvpair_xml(xml_obj, NULL, "nic", data->host_network);
         }
 
         if(data->host_netmask) {
             crm_create_nvpair_xml(xml_obj, NULL,
                                   "cidr_netmask", data->host_netmask);
 
         } else {
             crm_create_nvpair_xml(xml_obj, NULL, "cidr_netmask", "32");
         }
 
         xml_obj = create_xml_node(xml_ip, "operations");
         crm_create_op_xml(xml_obj, ID(xml_ip), "monitor", "60s", NULL);
 
         // TODO: Other ops? Timeouts and intervals from underlying resource?
 
         if (pe__unpack_resource(xml_ip, &replica->ip, parent,
                                 parent->cluster) != pcmk_rc_ok) {
             return pcmk_rc_unpack_error;
         }
 
         parent->children = g_list_append(parent->children, replica->ip);
     }
     return pcmk_rc_ok;
 }
 
 static const char*
 container_agent_str(enum pe__container_agent t)
 {
     switch (t) {
         case PE__CONTAINER_AGENT_DOCKER: return PE__CONTAINER_AGENT_DOCKER_S;
         case PE__CONTAINER_AGENT_RKT:    return PE__CONTAINER_AGENT_RKT_S;
         case PE__CONTAINER_AGENT_PODMAN: return PE__CONTAINER_AGENT_PODMAN_S;
         default: // PE__CONTAINER_AGENT_UNKNOWN
             break;
     }
     return PE__CONTAINER_AGENT_UNKNOWN_S;
 }
 
 static int
 create_container_resource(pe_resource_t *parent,
                           const pe__bundle_variant_data_t *data,
                           pe__bundle_replica_t *replica)
 {
     char *id = NULL;
     xmlNode *xml_container = NULL;
     xmlNode *xml_obj = NULL;
 
     // Agent-specific
     const char *hostname_opt = NULL;
     const char *env_opt = NULL;
     const char *agent_str = NULL;
     int volid = 0;  // rkt-only
 
     GString *buffer = NULL;
     GString *dbuffer = NULL;
 
     // Where syntax differences are drop-in replacements, set them now
     switch (data->agent_type) {
         case PE__CONTAINER_AGENT_DOCKER:
         case PE__CONTAINER_AGENT_PODMAN:
             hostname_opt = "-h ";
             env_opt = "-e ";
             break;
         case PE__CONTAINER_AGENT_RKT:
             hostname_opt = "--hostname=";
             env_opt = "--environment=";
             break;
         default:    // PE__CONTAINER_AGENT_UNKNOWN
             return pcmk_rc_unpack_error;
     }
     agent_str = container_agent_str(data->agent_type);
 
     buffer = g_string_sized_new(4096);
 
     id = crm_strdup_printf("%s-%s-%d", data->prefix, agent_str,
                            replica->offset);
     crm_xml_sanitize_id(id);
     xml_container = create_resource(id, "heartbeat", agent_str);
     free(id);
 
     xml_obj = create_xml_node(xml_container, XML_TAG_ATTR_SETS);
     crm_xml_set_id(xml_obj, "%s-attributes-%d", data->prefix, replica->offset);
 
     crm_create_nvpair_xml(xml_obj, NULL, "image", data->image);
     crm_create_nvpair_xml(xml_obj, NULL, "allow_pull", XML_BOOLEAN_TRUE);
     crm_create_nvpair_xml(xml_obj, NULL, "force_kill", XML_BOOLEAN_FALSE);
     crm_create_nvpair_xml(xml_obj, NULL, "reuse", XML_BOOLEAN_FALSE);
 
     if (data->agent_type == PE__CONTAINER_AGENT_DOCKER) {
         g_string_append(buffer, " --restart=no");
     }
 
     /* Set a container hostname only if we have an IP to map it to. The user can
      * set -h or --uts=host themselves if they want a nicer name for logs, but
      * this makes applications happy who need their  hostname to match the IP
      * they bind to.
      */
     if (data->ip_range_start != NULL) {
         g_string_append_printf(buffer, " %s%s-%d", hostname_opt, data->prefix,
                                replica->offset);
     }
     pcmk__g_strcat(buffer, " ", env_opt, "PCMK_stderr=1", NULL);
 
     if (data->container_network != NULL) {
         pcmk__g_strcat(buffer, " --net=", data->container_network, NULL);
     }
 
     if (data->control_port != NULL) {
         pcmk__g_strcat(buffer, " ", env_opt, "PCMK_remote_port=",
                       data->control_port, NULL);
     } else {
         g_string_append_printf(buffer, " %sPCMK_remote_port=%d", env_opt,
                                DEFAULT_REMOTE_PORT);
     }
 
     for (GList *iter = data->mounts; iter != NULL; iter = iter->next) {
         pe__bundle_mount_t *mount = (pe__bundle_mount_t *) iter->data;
         char *source = NULL;
 
         if (pcmk_is_set(mount->flags, pe__bundle_mount_subdir)) {
             source = crm_strdup_printf("%s/%s-%d", mount->source, data->prefix,
                                        replica->offset);
             pcmk__add_separated_word(&dbuffer, 1024, source, ",");
         }
 
         switch (data->agent_type) {
             case PE__CONTAINER_AGENT_DOCKER:
             case PE__CONTAINER_AGENT_PODMAN:
                 pcmk__g_strcat(buffer,
                                " -v ", pcmk__s(source, mount->source),
                                ":", mount->target, NULL);
 
                 if (mount->options != NULL) {
                     pcmk__g_strcat(buffer, ":", mount->options, NULL);
                 }
                 break;
             case PE__CONTAINER_AGENT_RKT:
                 g_string_append_printf(buffer,
                                        " --volume vol%d,kind=host,"
                                        "source=%s%s%s "
                                        "--mount volume=vol%d,target=%s",
                                        volid, pcmk__s(source, mount->source),
                                        (mount->options != NULL)? "," : "",
                                        pcmk__s(mount->options, ""),
                                        volid, mount->target);
                 volid++;
                 break;
             default:
                 break;
         }
         free(source);
     }
 
     for (GList *iter = data->ports; iter != NULL; iter = iter->next) {
         pe__bundle_port_t *port = (pe__bundle_port_t *) iter->data;
 
         switch (data->agent_type) {
             case PE__CONTAINER_AGENT_DOCKER:
             case PE__CONTAINER_AGENT_PODMAN:
                 if (replica->ipaddr != NULL) {
                     pcmk__g_strcat(buffer,
                                    " -p ", replica->ipaddr, ":", port->source,
                                    ":", port->target, NULL);
 
                 } else if (!pcmk__str_eq(data->container_network, "host",
                                          pcmk__str_none)) {
                     // No need to do port mapping if net == host
                     pcmk__g_strcat(buffer,
                                    " -p ", port->source, ":", port->target,
                                    NULL);
                 }
                 break;
             case PE__CONTAINER_AGENT_RKT:
                 if (replica->ipaddr != NULL) {
                     pcmk__g_strcat(buffer,
                                    " --port=", port->target,
                                    ":", replica->ipaddr, ":", port->source,
                                    NULL);
                 } else {
                     pcmk__g_strcat(buffer,
                                    " --port=", port->target, ":", port->source,
                                    NULL);
                 }
                 break;
             default:
                 break;
         }
     }
 
     /* @COMPAT: We should use pcmk__add_word() here, but we can't yet, because
      * it would cause restarts during rolling upgrades.
      *
      * In a previous version of the container resource creation logic, if
      * data->launcher_options is not NULL, we append
      * (" %s", data->launcher_options) even if data->launcher_options is an
      * empty string. Likewise for data->container_host_options. Using
      *
      *     pcmk__add_word(buffer, 0, data->launcher_options)
      *
      * removes that extra trailing space, causing a resource definition change.
      */
     if (data->launcher_options != NULL) {
         pcmk__g_strcat(buffer, " ", data->launcher_options, NULL);
     }
 
     if (data->container_host_options != NULL) {
         pcmk__g_strcat(buffer, " ", data->container_host_options, NULL);
     }
 
     crm_create_nvpair_xml(xml_obj, NULL, "run_opts",
                           (const char *) buffer->str);
     g_string_free(buffer, TRUE);
 
     crm_create_nvpair_xml(xml_obj, NULL, "mount_points",
                           (dbuffer != NULL)? (const char *) dbuffer->str : "");
     if (dbuffer != NULL) {
         g_string_free(dbuffer, TRUE);
     }
 
     if (replica->child != NULL) {
         if (data->container_command != NULL) {
             crm_create_nvpair_xml(xml_obj, NULL, "run_cmd",
                                   data->container_command);
         } else {
             crm_create_nvpair_xml(xml_obj, NULL, "run_cmd",
                                   SBIN_DIR "/pacemaker-remoted");
         }
 
         /* TODO: Allow users to specify their own?
          *
          * We just want to know if the container is alive; we'll monitor the
          * child independently.
          */
         crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
 #if 0
         /* @TODO Consider supporting the use case where we can start and stop
          * resources, but not proxy local commands (such as setting node
          * attributes), by running the local executor in stand-alone mode.
          * However, this would probably be better done via ACLs as with other
          * Pacemaker Remote nodes.
          */
     } else if ((child != NULL) && data->untrusted) {
         crm_create_nvpair_xml(xml_obj, NULL, "run_cmd",
                               CRM_DAEMON_DIR "/pacemaker-execd");
         crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd",
                               CRM_DAEMON_DIR "/pacemaker/cts-exec-helper -c poke");
 #endif
     } else {
         if (data->container_command != NULL) {
             crm_create_nvpair_xml(xml_obj, NULL, "run_cmd",
                                   data->container_command);
         }
 
         /* TODO: Allow users to specify their own?
          *
          * We don't know what's in the container, so we just want to know if it
          * is alive.
          */
         crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
     }
 
     xml_obj = create_xml_node(xml_container, "operations");
     crm_create_op_xml(xml_obj, ID(xml_container), "monitor", "60s", NULL);
 
     // TODO: Other ops? Timeouts and intervals from underlying resource?
     if (pe__unpack_resource(xml_container, &replica->container, parent,
                             parent->cluster) != pcmk_rc_ok) {
         return pcmk_rc_unpack_error;
     }
     parent->children = g_list_append(parent->children, replica->container);
 
     return pcmk_rc_ok;
 }
 
 /*!
  * \brief Ban a node from a resource's (and its children's) allowed nodes list
  *
  * \param[in,out] rsc    Resource to modify
  * \param[in]     uname  Name of node to ban
  */
 static void
 disallow_node(pe_resource_t *rsc, const char *uname)
 {
     gpointer match = g_hash_table_lookup(rsc->allowed_nodes, uname);
 
     if (match) {
         ((pe_node_t *) match)->weight = -INFINITY;
         ((pe_node_t *) match)->rsc_discover_mode = pe_discover_never;
     }
     if (rsc->children) {
         g_list_foreach(rsc->children, (GFunc) disallow_node, (gpointer) uname);
     }
 }
 
 static int
 create_remote_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
                        pe__bundle_replica_t *replica)
 {
     if (replica->child && valid_network(data)) {
         GHashTableIter gIter;
         pe_node_t *node = NULL;
         xmlNode *xml_remote = NULL;
         char *id = crm_strdup_printf("%s-%d", data->prefix, replica->offset);
         char *port_s = NULL;
         const char *uname = NULL;
         const char *connect_name = NULL;
 
         if (pe_find_resource(parent->cluster->resources, id) != NULL) {
             free(id);
             // The biggest hammer we have
             id = crm_strdup_printf("pcmk-internal-%s-remote-%d",
                                    replica->child->id, replica->offset);
             //@TODO return error instead of asserting?
             CRM_ASSERT(pe_find_resource(parent->cluster->resources,
                                         id) == NULL);
         }
 
         /* REMOTE_CONTAINER_HACK: Using "#uname" as the server name when the
          * connection does not have its own IP is a magic string that we use to
          * support nested remotes (i.e. a bundle running on a remote node).
          */
         connect_name = (replica->ipaddr? replica->ipaddr : "#uname");
 
         if (data->control_port == NULL) {
             port_s = pcmk__itoa(DEFAULT_REMOTE_PORT);
         }
 
         /* This sets replica->container as replica->remote's container, which is
          * similar to what happens with guest nodes. This is how the scheduler
          * knows that the bundle node is fenced by recovering the container, and
          * that remote should be ordered relative to the container.
          */
         xml_remote = pe_create_remote_xml(NULL, id, replica->container->id,
                                           NULL, NULL, NULL,
                                           connect_name, (data->control_port?
                                           data->control_port : port_s));
         free(port_s);
 
         /* Abandon our created ID, and pull the copy from the XML, because we
          * need something that will get freed during data set cleanup to use as
          * the node ID and uname.
          */
         free(id);
         id = NULL;
         uname = ID(xml_remote);
 
         /* Ensure a node has been created for the guest (it may have already
          * been, if it has a permanent node attribute), and ensure its weight is
          * -INFINITY so no other resources can run on it.
          */
         node = pe_find_node(parent->cluster->nodes, uname);
         if (node == NULL) {
             node = pe_create_node(uname, uname, "remote", "-INFINITY",
                                   parent->cluster);
         } else {
             node->weight = -INFINITY;
         }
         node->rsc_discover_mode = pe_discover_never;
 
         /* unpack_remote_nodes() ensures that each remote node and guest node
          * has a pe_node_t entry. Ideally, it would do the same for bundle nodes.
          * Unfortunately, a bundle has to be mostly unpacked before it's obvious
          * what nodes will be needed, so we do it just above.
          *
          * Worse, that means that the node may have been utilized while
          * unpacking other resources, without our weight correction. The most
          * likely place for this to happen is when pe__unpack_resource() calls
          * resource_location() to set a default score in symmetric clusters.
          * This adds a node *copy* to each resource's allowed nodes, and these
          * copies will have the wrong weight.
          *
          * As a hacky workaround, fix those copies here.
          *
          * @TODO Possible alternative: ensure bundles are unpacked before other
          * resources, so the weight is correct before any copies are made.
          */
         g_list_foreach(parent->cluster->resources, (GFunc) disallow_node,
                        (gpointer) uname);
 
         replica->node = pe__copy_node(node);
         replica->node->weight = 500;
         replica->node->rsc_discover_mode = pe_discover_exclusive;
 
         /* Ensure the node shows up as allowed and with the correct discovery set */
         if (replica->child->allowed_nodes != NULL) {
             g_hash_table_destroy(replica->child->allowed_nodes);
         }
         replica->child->allowed_nodes = pcmk__strkey_table(NULL, free);
         g_hash_table_insert(replica->child->allowed_nodes,
                             (gpointer) replica->node->details->id,
                             pe__copy_node(replica->node));
 
         {
             pe_node_t *copy = pe__copy_node(replica->node);
             copy->weight = -INFINITY;
             g_hash_table_insert(replica->child->parent->allowed_nodes,
                                 (gpointer) replica->node->details->id, copy);
         }
         if (pe__unpack_resource(xml_remote, &replica->remote, parent,
                                 parent->cluster) != pcmk_rc_ok) {
             return pcmk_rc_unpack_error;
         }
 
         g_hash_table_iter_init(&gIter, replica->remote->allowed_nodes);
         while (g_hash_table_iter_next(&gIter, NULL, (void **)&node)) {
             if (pe__is_guest_or_remote_node(node)) {
                 /* Remote resources can only run on 'normal' cluster node */
                 node->weight = -INFINITY;
             }
         }
 
         replica->node->details->remote_rsc = replica->remote;
 
         // Ensure pe__is_guest_node() functions correctly immediately
         replica->remote->container = replica->container;
 
         /* A bundle's #kind is closer to "container" (guest node) than the
          * "remote" set by pe_create_node().
          */
         g_hash_table_insert(replica->node->details->attrs,
                             strdup(CRM_ATTR_KIND), strdup("container"));
 
         /* One effect of this is that setup_container() will add
          * replica->remote to replica->container's fillers, which will make
          * pe__resource_contains_guest_node() true for replica->container.
          *
          * replica->child does NOT get added to replica->container's fillers.
          * The only noticeable effect if it did would be for its fail count to
          * be taken into account when checking replica->container's migration
          * threshold.
          */
         parent->children = g_list_append(parent->children, replica->remote);
     }
     return pcmk_rc_ok;
 }
 
 static int
 create_replica_resources(pe_resource_t *parent, pe__bundle_variant_data_t *data,
                          pe__bundle_replica_t *replica)
 {
     int rc = pcmk_rc_ok;
 
     rc = create_container_resource(parent, data, replica);
     if (rc != pcmk_rc_ok) {
         return rc;
     }
 
     rc = create_ip_resource(parent, data, replica);
     if (rc != pcmk_rc_ok) {
         return rc;
     }
 
     rc = create_remote_resource(parent, data, replica);
     if (rc != pcmk_rc_ok) {
         return rc;
     }
 
     if ((replica->child != NULL) && (replica->ipaddr != NULL)) {
         add_hash_param(replica->child->meta, "external-ip", replica->ipaddr);
     }
 
     if (replica->remote != NULL) {
         /*
          * Allow the remote connection resource to be allocated to a
          * different node than the one on which the container is active.
          *
          * This makes it possible to have Pacemaker Remote nodes running
          * containers with pacemaker-remoted inside in order to start
          * services inside those containers.
          */
         pe__set_resource_flags(replica->remote, pe_rsc_allow_remote_remotes);
     }
     return rc;
 }
 
 static void
 mount_add(pe__bundle_variant_data_t *bundle_data, const char *source,
           const char *target, const char *options, uint32_t flags)
 {
     pe__bundle_mount_t *mount = calloc(1, sizeof(pe__bundle_mount_t));
 
     CRM_ASSERT(mount != NULL);
     mount->source = strdup(source);
     mount->target = strdup(target);
     pcmk__str_update(&mount->options, options);
     mount->flags = flags;
     bundle_data->mounts = g_list_append(bundle_data->mounts, mount);
 }
 
 static void
 mount_free(pe__bundle_mount_t *mount)
 {
     free(mount->source);
     free(mount->target);
     free(mount->options);
     free(mount);
 }
 
 static void
 port_free(pe__bundle_port_t *port)
 {
     free(port->source);
     free(port->target);
     free(port);
 }
 
 static pe__bundle_replica_t *
 replica_for_remote(pe_resource_t *remote)
 {
     pe_resource_t *top = remote;
     pe__bundle_variant_data_t *bundle_data = NULL;
 
     if (top == NULL) {
         return NULL;
     }
 
     while (top->parent != NULL) {
         top = top->parent;
     }
 
     get_bundle_variant_data(bundle_data, top);
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         if (replica->remote == remote) {
             return replica;
         }
     }
     CRM_LOG_ASSERT(FALSE);
     return NULL;
 }
 
 bool
 pe__bundle_needs_remote_name(pe_resource_t *rsc)
 {
     const char *value;
     GHashTable *params = NULL;
 
     if (rsc == NULL) {
         return false;
     }
 
     // Use NULL node since pcmk__bundle_expand() uses that to set value
     params = pe_rsc_params(rsc, NULL, rsc->cluster);
     value = g_hash_table_lookup(params, XML_RSC_ATTR_REMOTE_RA_ADDR);
 
     return pcmk__str_eq(value, "#uname", pcmk__str_casei)
            && xml_contains_remote_node(rsc->xml);
 }
 
 const char *
 pe__add_bundle_remote_name(pe_resource_t *rsc, pe_working_set_t *data_set,
                            xmlNode *xml, const char *field)
 {
     // REMOTE_CONTAINER_HACK: Allow remote nodes that start containers with pacemaker remote inside
 
     pe_node_t *node = NULL;
     pe__bundle_replica_t *replica = NULL;
 
     if (!pe__bundle_needs_remote_name(rsc)) {
         return NULL;
     }
 
     replica = replica_for_remote(rsc);
     if (replica == NULL) {
         return NULL;
     }
 
     node = replica->container->allocated_to;
     if (node == NULL) {
         /* If it won't be running anywhere after the
          * transition, go with where it's running now.
          */
         node = pe__current_node(replica->container);
     }
 
     if(node == NULL) {
         crm_trace("Cannot determine address for bundle connection %s", rsc->id);
         return NULL;
     }
 
     crm_trace("Setting address for bundle connection %s to bundle host %s",
               rsc->id, pe__node_name(node));
     if(xml != NULL && field != NULL) {
         crm_xml_add(xml, field, node->details->uname);
     }
 
     return node->details->uname;
 }
 
 #define pe__set_bundle_mount_flags(mount_xml, flags, flags_to_set) do {     \
         flags = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE,           \
                                    "Bundle mount", ID(mount_xml), flags,    \
                                    (flags_to_set), #flags_to_set);          \
     } while (0)
 
 gboolean
 pe__unpack_bundle(pe_resource_t *rsc, pe_working_set_t *data_set)
 {
     const char *value = NULL;
     xmlNode *xml_obj = NULL;
     xmlNode *xml_resource = NULL;
     pe__bundle_variant_data_t *bundle_data = NULL;
     bool need_log_mount = TRUE;
 
     CRM_ASSERT(rsc != NULL);
     pe_rsc_trace(rsc, "Processing resource %s...", rsc->id);
 
     bundle_data = calloc(1, sizeof(pe__bundle_variant_data_t));
     rsc->variant_opaque = bundle_data;
     bundle_data->prefix = strdup(rsc->id);
 
     xml_obj = first_named_child(rsc->xml, PE__CONTAINER_AGENT_DOCKER_S);
     if (xml_obj != NULL) {
         bundle_data->agent_type = PE__CONTAINER_AGENT_DOCKER;
     } else {
         xml_obj = first_named_child(rsc->xml, PE__CONTAINER_AGENT_RKT_S);
         if (xml_obj != NULL) {
             bundle_data->agent_type = PE__CONTAINER_AGENT_RKT;
         } else {
             xml_obj = first_named_child(rsc->xml, PE__CONTAINER_AGENT_PODMAN_S);
             if (xml_obj != NULL) {
                 bundle_data->agent_type = PE__CONTAINER_AGENT_PODMAN;
             } else {
                 return FALSE;
             }
         }
     }
 
     // Use 0 for default, minimum, and invalid promoted-max
     value = crm_element_value(xml_obj, XML_RSC_ATTR_PROMOTED_MAX);
     if (value == NULL) {
         // @COMPAT deprecated since 2.0.0
         value = crm_element_value(xml_obj, "masters");
     }
     pcmk__scan_min_int(value, &bundle_data->promoted_max, 0);
 
     // Default replicas to promoted-max if it was specified and 1 otherwise
     value = crm_element_value(xml_obj, "replicas");
     if ((value == NULL) && (bundle_data->promoted_max > 0)) {
         bundle_data->nreplicas = bundle_data->promoted_max;
     } else {
         pcmk__scan_min_int(value, &bundle_data->nreplicas, 1);
     }
 
     /*
      * Communication between containers on the same host via the
      * floating IPs only works if the container is started with:
      *   --userland-proxy=false --ip-masq=false
      */
     value = crm_element_value(xml_obj, "replicas-per-host");
     pcmk__scan_min_int(value, &bundle_data->nreplicas_per_host, 1);
     if (bundle_data->nreplicas_per_host == 1) {
         pe__clear_resource_flags(rsc, pe_rsc_unique);
     }
 
     bundle_data->container_command = crm_element_value_copy(xml_obj, "run-command");
     bundle_data->launcher_options = crm_element_value_copy(xml_obj, "options");
     bundle_data->image = crm_element_value_copy(xml_obj, "image");
     bundle_data->container_network = crm_element_value_copy(xml_obj, "network");
 
     xml_obj = first_named_child(rsc->xml, "network");
     if(xml_obj) {
 
         bundle_data->ip_range_start = crm_element_value_copy(xml_obj, "ip-range-start");
         bundle_data->host_netmask = crm_element_value_copy(xml_obj, "host-netmask");
         bundle_data->host_network = crm_element_value_copy(xml_obj, "host-interface");
         bundle_data->control_port = crm_element_value_copy(xml_obj, "control-port");
         value = crm_element_value(xml_obj, "add-host");
         if (crm_str_to_boolean(value, &bundle_data->add_host) != 1) {
             bundle_data->add_host = TRUE;
         }
 
         for (xmlNode *xml_child = pcmk__xe_first_child(xml_obj); xml_child != NULL;
              xml_child = pcmk__xe_next(xml_child)) {
 
             pe__bundle_port_t *port = calloc(1, sizeof(pe__bundle_port_t));
             port->source = crm_element_value_copy(xml_child, "port");
 
             if(port->source == NULL) {
                 port->source = crm_element_value_copy(xml_child, "range");
             } else {
                 port->target = crm_element_value_copy(xml_child, "internal-port");
             }
 
             if(port->source != NULL && strlen(port->source) > 0) {
                 if(port->target == NULL) {
                     port->target = strdup(port->source);
                 }
                 bundle_data->ports = g_list_append(bundle_data->ports, port);
 
             } else {
                 pe_err("Invalid port directive %s", ID(xml_child));
                 port_free(port);
             }
         }
     }
 
     xml_obj = first_named_child(rsc->xml, "storage");
     for (xmlNode *xml_child = pcmk__xe_first_child(xml_obj); xml_child != NULL;
          xml_child = pcmk__xe_next(xml_child)) {
 
         const char *source = crm_element_value(xml_child, "source-dir");
         const char *target = crm_element_value(xml_child, "target-dir");
         const char *options = crm_element_value(xml_child, "options");
         int flags = pe__bundle_mount_none;
 
         if (source == NULL) {
             source = crm_element_value(xml_child, "source-dir-root");
             pe__set_bundle_mount_flags(xml_child, flags,
                                        pe__bundle_mount_subdir);
         }
 
         if (source && target) {
             mount_add(bundle_data, source, target, options, flags);
             if (strcmp(target, "/var/log") == 0) {
                 need_log_mount = FALSE;
             }
         } else {
             pe_err("Invalid mount directive %s", ID(xml_child));
         }
     }
 
     xml_obj = first_named_child(rsc->xml, "primitive");
     if (xml_obj && valid_network(bundle_data)) {
         char *value = NULL;
         xmlNode *xml_set = NULL;
 
         xml_resource = create_xml_node(NULL, XML_CIB_TAG_INCARNATION);
 
         /* @COMPAT We no longer use the <master> tag, but we need to keep it as
          * part of the resource name, so that bundles don't restart in a rolling
          * upgrade. (It also avoids needing to change regression tests.)
          */
         crm_xml_set_id(xml_resource, "%s-%s", bundle_data->prefix,
                       (bundle_data->promoted_max? "master"
                       : (const char *)xml_resource->name));
 
         xml_set = create_xml_node(xml_resource, XML_TAG_META_SETS);
         crm_xml_set_id(xml_set, "%s-%s-meta", bundle_data->prefix, xml_resource->name);
 
         crm_create_nvpair_xml(xml_set, NULL,
                               XML_RSC_ATTR_ORDERED, XML_BOOLEAN_TRUE);
 
         value = pcmk__itoa(bundle_data->nreplicas);
         crm_create_nvpair_xml(xml_set, NULL,
                               XML_RSC_ATTR_INCARNATION_MAX, value);
         free(value);
 
         value = pcmk__itoa(bundle_data->nreplicas_per_host);
         crm_create_nvpair_xml(xml_set, NULL,
                               XML_RSC_ATTR_INCARNATION_NODEMAX, value);
         free(value);
 
         crm_create_nvpair_xml(xml_set, NULL, XML_RSC_ATTR_UNIQUE,
                               pcmk__btoa(bundle_data->nreplicas_per_host > 1));
 
         if (bundle_data->promoted_max) {
             crm_create_nvpair_xml(xml_set, NULL,
                                   XML_RSC_ATTR_PROMOTABLE, XML_BOOLEAN_TRUE);
 
             value = pcmk__itoa(bundle_data->promoted_max);
             crm_create_nvpair_xml(xml_set, NULL,
                                   XML_RSC_ATTR_PROMOTED_MAX, value);
             free(value);
         }
 
         //crm_xml_add(xml_obj, XML_ATTR_ID, bundle_data->prefix);
         add_node_copy(xml_resource, xml_obj);
 
     } else if(xml_obj) {
         pe_err("Cannot control %s inside %s without either ip-range-start or control-port",
                rsc->id, ID(xml_obj));
         return FALSE;
     }
 
     if(xml_resource) {
         int lpc = 0;
         GList *childIter = NULL;
         pe__bundle_port_t *port = NULL;
         GString *buffer = NULL;
 
         if (pe__unpack_resource(xml_resource, &(bundle_data->child), rsc,
                                 data_set) != pcmk_rc_ok) {
             return FALSE;
         }
 
         /* Currently, we always map the default authentication key location
          * into the same location inside the container.
          *
          * Ideally, we would respect the host's PCMK_authkey_location, but:
          * - it may be different on different nodes;
          * - the actual connection will do extra checking to make sure the key
          *   file exists and is readable, that we can't do here on the DC
          * - tools such as crm_resource and crm_simulate may not have the same
          *   environment variables as the cluster, causing operation digests to
          *   differ
          *
          * Always using the default location inside the container is fine,
          * because we control the pacemaker_remote environment, and it avoids
          * having to pass another environment variable to the container.
          *
          * @TODO A better solution may be to have only pacemaker_remote use the
          * environment variable, and have the cluster nodes use a new
          * cluster option for key location. This would introduce the limitation
          * of the location being the same on all cluster nodes, but that's
          * reasonable.
          */
         mount_add(bundle_data, DEFAULT_REMOTE_KEY_LOCATION,
                   DEFAULT_REMOTE_KEY_LOCATION, NULL, pe__bundle_mount_none);
 
         if (need_log_mount) {
             mount_add(bundle_data, CRM_BUNDLE_DIR, "/var/log", NULL,
                       pe__bundle_mount_subdir);
         }
 
         port = calloc(1, sizeof(pe__bundle_port_t));
         if(bundle_data->control_port) {
             port->source = strdup(bundle_data->control_port);
         } else {
             /* If we wanted to respect PCMK_remote_port, we could use
              * crm_default_remote_port() here and elsewhere in this file instead
              * of DEFAULT_REMOTE_PORT.
              *
              * However, it gains nothing, since we control both the container
              * environment and the connection resource parameters, and the user
              * can use a different port if desired by setting control-port.
              */
             port->source = pcmk__itoa(DEFAULT_REMOTE_PORT);
         }
         port->target = strdup(port->source);
         bundle_data->ports = g_list_append(bundle_data->ports, port);
 
         buffer = g_string_sized_new(1024);
         for (childIter = bundle_data->child->children; childIter != NULL;
              childIter = childIter->next) {
 
             pe__bundle_replica_t *replica = calloc(1, sizeof(pe__bundle_replica_t));
 
             replica->child = childIter->data;
             replica->child->exclusive_discover = TRUE;
             replica->offset = lpc++;
 
             // Ensure the child's notify gets set based on the underlying primitive's value
             if (pcmk_is_set(replica->child->flags, pe_rsc_notify)) {
                 pe__set_resource_flags(bundle_data->child, pe_rsc_notify);
             }
 
             allocate_ip(bundle_data, replica, buffer);
             bundle_data->replicas = g_list_append(bundle_data->replicas,
                                                   replica);
             bundle_data->attribute_target = g_hash_table_lookup(replica->child->meta,
                                                                 XML_RSC_ATTR_TARGET);
         }
         bundle_data->container_host_options = g_string_free(buffer, FALSE);
 
         if (bundle_data->attribute_target) {
             g_hash_table_replace(rsc->meta, strdup(XML_RSC_ATTR_TARGET),
                                  strdup(bundle_data->attribute_target));
             g_hash_table_replace(bundle_data->child->meta,
                                  strdup(XML_RSC_ATTR_TARGET),
                                  strdup(bundle_data->attribute_target));
         }
 
     } else {
         // Just a naked container, no pacemaker-remote
         GString *buffer = g_string_sized_new(1024);
 
         for (int lpc = 0; lpc < bundle_data->nreplicas; lpc++) {
             pe__bundle_replica_t *replica = calloc(1, sizeof(pe__bundle_replica_t));
 
             replica->offset = lpc;
             allocate_ip(bundle_data, replica, buffer);
             bundle_data->replicas = g_list_append(bundle_data->replicas,
                                                   replica);
         }
         bundle_data->container_host_options = g_string_free(buffer, FALSE);
     }
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         if (create_replica_resources(rsc, bundle_data, replica) != pcmk_rc_ok) {
             pe_err("Failed unpacking resource %s", rsc->id);
             rsc->fns->free(rsc);
             return FALSE;
         }
 
         /* Utilization needs special handling for bundles. It makes no sense for
          * the inner primitive to have utilization, because it is tied
          * one-to-one to the guest node created by the container resource -- and
          * there's no way to set capacities for that guest node anyway.
          *
          * What the user really wants is to configure utilization for the
          * container. However, the schema only allows utilization for
          * primitives, and the container resource is implicit anyway, so the
          * user can *only* configure utilization for the inner primitive. If
          * they do, move the primitive's utilization values to the container.
          *
          * @TODO This means that bundles without an inner primitive can't have
          * utilization. An alternative might be to allow utilization values in
          * the top-level bundle XML in the schema, and copy those to each
          * container.
          */
         if (replica->child != NULL) {
             GHashTable *empty = replica->container->utilization;
 
             replica->container->utilization = replica->child->utilization;
             replica->child->utilization = empty;
         }
     }
 
     if (bundle_data->child) {
         rsc->children = g_list_append(rsc->children, bundle_data->child);
     }
     return TRUE;
 }
 
 static int
 replica_resource_active(pe_resource_t *rsc, gboolean all)
 {
     if (rsc) {
         gboolean child_active = rsc->fns->active(rsc, all);
 
         if (child_active && !all) {
             return TRUE;
         } else if (!child_active && all) {
             return FALSE;
         }
     }
     return -1;
 }
 
 gboolean
 pe__bundle_active(pe_resource_t *rsc, gboolean all)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
     GList *iter = NULL;
 
     get_bundle_variant_data(bundle_data, rsc);
     for (iter = bundle_data->replicas; iter != NULL; iter = iter->next) {
         pe__bundle_replica_t *replica = iter->data;
         int rsc_active;
 
         rsc_active = replica_resource_active(replica->ip, all);
         if (rsc_active >= 0) {
             return (gboolean) rsc_active;
         }
 
         rsc_active = replica_resource_active(replica->child, all);
         if (rsc_active >= 0) {
             return (gboolean) rsc_active;
         }
 
         rsc_active = replica_resource_active(replica->container, all);
         if (rsc_active >= 0) {
             return (gboolean) rsc_active;
         }
 
         rsc_active = replica_resource_active(replica->remote, all);
         if (rsc_active >= 0) {
             return (gboolean) rsc_active;
         }
     }
 
     /* If "all" is TRUE, we've already checked that no resources were inactive,
      * so return TRUE; if "all" is FALSE, we didn't find any active resources,
      * so return FALSE.
      */
     return all;
 }
 
 /*!
  * \internal
  * \brief Find the bundle replica corresponding to a given node
  *
  * \param[in] bundle  Top-level bundle resource
  * \param[in] node    Node to search for
  *
  * \return Bundle replica if found, NULL otherwise
  */
 pe_resource_t *
 pe__find_bundle_replica(const pe_resource_t *bundle, const pe_node_t *node)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
     CRM_ASSERT(bundle && node);
 
     get_bundle_variant_data(bundle_data, bundle);
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         CRM_ASSERT(replica && replica->node);
         if (replica->node->details == node->details) {
             return replica->child;
         }
     }
     return NULL;
 }
 
 /*!
  * \internal
  * \deprecated This function will be removed in a future release
  */
 static void
 print_rsc_in_list(pe_resource_t *rsc, const char *pre_text, long options,
                   void *print_data)
 {
     if (rsc != NULL) {
         if (options & pe_print_html) {
             status_print("<li>");
         }
         rsc->fns->print(rsc, pre_text, options, print_data);
         if (options & pe_print_html) {
             status_print("</li>\n");
         }
     }
 }
 
 /*!
  * \internal
  * \deprecated This function will be removed in a future release
  */
 static void
 bundle_print_xml(pe_resource_t *rsc, const char *pre_text, long options,
                  void *print_data)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
     char *child_text = NULL;
     CRM_CHECK(rsc != NULL, return);
 
     if (pre_text == NULL) {
         pre_text = "";
     }
     child_text = crm_strdup_printf("%s        ", pre_text);
 
     get_bundle_variant_data(bundle_data, rsc);
 
     status_print("%s<bundle ", pre_text);
     status_print(XML_ATTR_ID "=\"%s\" ", rsc->id);
     status_print("type=\"%s\" ", container_agent_str(bundle_data->agent_type));
     status_print("image=\"%s\" ", bundle_data->image);
     status_print("unique=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_unique));
     status_print("managed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_managed));
     status_print("failed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_failed));
     status_print(">\n");
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         CRM_ASSERT(replica);
         status_print("%s    <replica " XML_ATTR_ID "=\"%d\">\n",
                      pre_text, replica->offset);
         print_rsc_in_list(replica->ip, child_text, options, print_data);
         print_rsc_in_list(replica->child, child_text, options, print_data);
         print_rsc_in_list(replica->container, child_text, options, print_data);
         print_rsc_in_list(replica->remote, child_text, options, print_data);
         status_print("%s    </replica>\n", pre_text);
     }
     status_print("%s</bundle>\n", pre_text);
     free(child_text);
 }
 
 PCMK__OUTPUT_ARGS("bundle", "uint32_t", "pe_resource_t *", "GList *", "GList *")
 int
 pe__bundle_xml(pcmk__output_t *out, va_list args)
 {
     uint32_t show_opts = va_arg(args, uint32_t);
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     GList *only_node = va_arg(args, GList *);
     GList *only_rsc = va_arg(args, GList *);
 
     pe__bundle_variant_data_t *bundle_data = NULL;
     int rc = pcmk_rc_no_output;
     gboolean printed_header = FALSE;
     gboolean print_everything = TRUE;
 
-    CRM_ASSERT(rsc != NULL);
+    const char *desc = NULL;
 
+    CRM_ASSERT(rsc != NULL);
+    
     get_bundle_variant_data(bundle_data, rsc);
 
     if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
         return rc;
     }
 
     print_everything = pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches);
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
         char *id = NULL;
         gboolean print_ip, print_child, print_ctnr, print_remote;
 
         CRM_ASSERT(replica);
 
         if (pcmk__rsc_filtered_by_node(replica->container, only_node)) {
             continue;
         }
 
         print_ip = replica->ip != NULL &&
                    !replica->ip->fns->is_filtered(replica->ip, only_rsc, print_everything);
         print_child = replica->child != NULL &&
                       !replica->child->fns->is_filtered(replica->child, only_rsc, print_everything);
         print_ctnr = !replica->container->fns->is_filtered(replica->container, only_rsc, print_everything);
         print_remote = replica->remote != NULL &&
                        !replica->remote->fns->is_filtered(replica->remote, only_rsc, print_everything);
 
         if (!print_everything && !print_ip && !print_child && !print_ctnr && !print_remote) {
             continue;
         }
 
         if (!printed_header) {
             printed_header = TRUE;
 
-            rc = pe__name_and_nvpairs_xml(out, true, "bundle", 7,
+            desc = pe__resource_description(rsc, show_opts);
+
+            rc = pe__name_and_nvpairs_xml(out, true, "bundle", 8,
                      "id", rsc->id,
                      "type", container_agent_str(bundle_data->agent_type),
                      "image", bundle_data->image,
                      "unique", pe__rsc_bool_str(rsc, pe_rsc_unique),
                      "maintenance", pe__rsc_bool_str(rsc, pe_rsc_maintenance),
                      "managed", pe__rsc_bool_str(rsc, pe_rsc_managed),
-                     "failed", pe__rsc_bool_str(rsc, pe_rsc_failed));
+                     "failed", pe__rsc_bool_str(rsc, pe_rsc_failed),
+                     "description", desc);
             CRM_ASSERT(rc == pcmk_rc_ok);
         }
 
         id = pcmk__itoa(replica->offset);
         rc = pe__name_and_nvpairs_xml(out, true, "replica", 1, "id", id);
         free(id);
         CRM_ASSERT(rc == pcmk_rc_ok);
 
         if (print_ip) {
             out->message(out, crm_map_element_name(replica->ip->xml), show_opts,
                          replica->ip, only_node, only_rsc);
         }
 
         if (print_child) {
             out->message(out, crm_map_element_name(replica->child->xml), show_opts,
                          replica->child, only_node, only_rsc);
         }
 
         if (print_ctnr) {
             out->message(out, crm_map_element_name(replica->container->xml), show_opts,
                          replica->container, only_node, only_rsc);
         }
 
         if (print_remote) {
             out->message(out, crm_map_element_name(replica->remote->xml), show_opts,
                          replica->remote, only_node, only_rsc);
         }
 
         pcmk__output_xml_pop_parent(out); // replica
     }
 
     if (printed_header) {
         pcmk__output_xml_pop_parent(out); // bundle
     }
 
     return rc;
 }
 
 static void
 pe__bundle_replica_output_html(pcmk__output_t *out, pe__bundle_replica_t *replica,
                                pe_node_t *node, uint32_t show_opts)
 {
     pe_resource_t *rsc = replica->child;
 
     int offset = 0;
     char buffer[LINE_MAX];
 
     if(rsc == NULL) {
         rsc = replica->container;
     }
 
     if (replica->remote) {
         offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
                            rsc_printable_id(replica->remote));
     } else {
         offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
                            rsc_printable_id(replica->container));
     }
     if (replica->ipaddr) {
         offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)",
                            replica->ipaddr);
     }
 
     pe__common_output_html(out, rsc, buffer, node, show_opts);
 }
 
 /*!
  * \internal
  * \brief Get a string describing a resource's unmanaged state or lack thereof
  *
  * \param[in] rsc  Resource to describe
  *
  * \return A string indicating that a resource is in maintenance mode or
  *         otherwise unmanaged, or an empty string otherwise
  */
 static const char *
 get_unmanaged_str(const pe_resource_t *rsc)
 {
     if (pcmk_is_set(rsc->flags, pe_rsc_maintenance)) {
         return " (maintenance)";
     }
     if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
         return " (unmanaged)";
     }
     return "";
 }
 
 PCMK__OUTPUT_ARGS("bundle", "uint32_t", "pe_resource_t *", "GList *", "GList *")
 int
 pe__bundle_html(pcmk__output_t *out, va_list args)
 {
     uint32_t show_opts = va_arg(args, uint32_t);
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     GList *only_node = va_arg(args, GList *);
     GList *only_rsc = va_arg(args, GList *);
 
+    const char *desc = NULL;
     pe__bundle_variant_data_t *bundle_data = NULL;
     int rc = pcmk_rc_no_output;
     gboolean print_everything = TRUE;
 
     CRM_ASSERT(rsc != NULL);
 
     get_bundle_variant_data(bundle_data, rsc);
 
+    desc = pe__resource_description(rsc, show_opts);
+
     if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
         return rc;
     }
 
     print_everything = pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches);
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
         gboolean print_ip, print_child, print_ctnr, print_remote;
 
         CRM_ASSERT(replica);
 
         if (pcmk__rsc_filtered_by_node(replica->container, only_node)) {
             continue;
         }
 
         print_ip = replica->ip != NULL &&
                    !replica->ip->fns->is_filtered(replica->ip, only_rsc, print_everything);
         print_child = replica->child != NULL &&
                       !replica->child->fns->is_filtered(replica->child, only_rsc, print_everything);
         print_ctnr = !replica->container->fns->is_filtered(replica->container, only_rsc, print_everything);
         print_remote = replica->remote != NULL &&
                        !replica->remote->fns->is_filtered(replica->remote, only_rsc, print_everything);
 
         if (pcmk_is_set(show_opts, pcmk_show_implicit_rscs) ||
             (print_everything == FALSE && (print_ip || print_child || print_ctnr || print_remote))) {
             /* The text output messages used below require pe_print_implicit to
              * be set to do anything.
              */
             uint32_t new_show_opts = show_opts | pcmk_show_implicit_rscs;
 
-            PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s",
+            PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s%s%s%s",
                                      (bundle_data->nreplicas > 1)? " set" : "",
                                      rsc->id, bundle_data->image,
                                      pcmk_is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
+                                     desc ? " (" : "", desc ? desc : "", desc ? ")" : "",
                                      get_unmanaged_str(rsc));
 
             if (pcmk__list_of_multiple(bundle_data->replicas)) {
                 out->begin_list(out, NULL, NULL, "Replica[%d]", replica->offset);
             }
 
             if (print_ip) {
                 out->message(out, crm_map_element_name(replica->ip->xml),
                              new_show_opts, replica->ip, only_node, only_rsc);
             }
 
             if (print_child) {
                 out->message(out, crm_map_element_name(replica->child->xml),
                              new_show_opts, replica->child, only_node, only_rsc);
             }
 
             if (print_ctnr) {
                 out->message(out, crm_map_element_name(replica->container->xml),
                              new_show_opts, replica->container, only_node, only_rsc);
             }
 
             if (print_remote) {
                 out->message(out, crm_map_element_name(replica->remote->xml),
                              new_show_opts, replica->remote, only_node, only_rsc);
             }
 
             if (pcmk__list_of_multiple(bundle_data->replicas)) {
                 out->end_list(out);
             }
         } else if (print_everything == FALSE && !(print_ip || print_child || print_ctnr || print_remote)) {
             continue;
         } else {
-            PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s",
+            PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s%s%s%s",
                                      (bundle_data->nreplicas > 1)? " set" : "",
                                      rsc->id, bundle_data->image,
                                      pcmk_is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
+                                     desc ? " (" : "", desc ? desc : "", desc ? ")" : "",
                                      get_unmanaged_str(rsc));
 
             pe__bundle_replica_output_html(out, replica, pe__current_node(replica->container),
                                            show_opts);
         }
     }
 
     PCMK__OUTPUT_LIST_FOOTER(out, rc);
     return rc;
 }
 
 static void
 pe__bundle_replica_output_text(pcmk__output_t *out, pe__bundle_replica_t *replica,
                                pe_node_t *node, uint32_t show_opts)
 {
     const pe_resource_t *rsc = replica->child;
 
     int offset = 0;
     char buffer[LINE_MAX];
 
     if(rsc == NULL) {
         rsc = replica->container;
     }
 
     if (replica->remote) {
         offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
                            rsc_printable_id(replica->remote));
     } else {
         offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
                            rsc_printable_id(replica->container));
     }
     if (replica->ipaddr) {
         offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)",
                            replica->ipaddr);
     }
 
     pe__common_output_text(out, rsc, buffer, node, show_opts);
 }
 
 PCMK__OUTPUT_ARGS("bundle", "uint32_t", "pe_resource_t *", "GList *", "GList *")
 int
 pe__bundle_text(pcmk__output_t *out, va_list args)
 {
     uint32_t show_opts = va_arg(args, uint32_t);
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     GList *only_node = va_arg(args, GList *);
     GList *only_rsc = va_arg(args, GList *);
 
+    const char *desc = NULL;
     pe__bundle_variant_data_t *bundle_data = NULL;
     int rc = pcmk_rc_no_output;
     gboolean print_everything = TRUE;
 
+    desc = pe__resource_description(rsc, show_opts);
+    
     get_bundle_variant_data(bundle_data, rsc);
 
     CRM_ASSERT(rsc != NULL);
 
     if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
         return rc;
     }
 
     print_everything = pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches);
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
         gboolean print_ip, print_child, print_ctnr, print_remote;
 
         CRM_ASSERT(replica);
 
         if (pcmk__rsc_filtered_by_node(replica->container, only_node)) {
             continue;
         }
 
         print_ip = replica->ip != NULL &&
                    !replica->ip->fns->is_filtered(replica->ip, only_rsc, print_everything);
         print_child = replica->child != NULL &&
                       !replica->child->fns->is_filtered(replica->child, only_rsc, print_everything);
         print_ctnr = !replica->container->fns->is_filtered(replica->container, only_rsc, print_everything);
         print_remote = replica->remote != NULL &&
                        !replica->remote->fns->is_filtered(replica->remote, only_rsc, print_everything);
 
         if (pcmk_is_set(show_opts, pcmk_show_implicit_rscs) ||
             (print_everything == FALSE && (print_ip || print_child || print_ctnr || print_remote))) {
             /* The text output messages used below require pe_print_implicit to
              * be set to do anything.
              */
             uint32_t new_show_opts = show_opts | pcmk_show_implicit_rscs;
 
-            PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s",
+            PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s%s%s%s",
                                      (bundle_data->nreplicas > 1)? " set" : "",
                                      rsc->id, bundle_data->image,
                                      pcmk_is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
+                                     desc ? " (" : "", desc ? desc : "", desc ? ")" : "",
                                      get_unmanaged_str(rsc));
 
             if (pcmk__list_of_multiple(bundle_data->replicas)) {
                 out->list_item(out, NULL, "Replica[%d]", replica->offset);
             }
 
             out->begin_list(out, NULL, NULL, NULL);
 
             if (print_ip) {
                 out->message(out, crm_map_element_name(replica->ip->xml),
                              new_show_opts, replica->ip, only_node, only_rsc);
             }
 
             if (print_child) {
                 out->message(out, crm_map_element_name(replica->child->xml),
                              new_show_opts, replica->child, only_node, only_rsc);
             }
 
             if (print_ctnr) {
                 out->message(out, crm_map_element_name(replica->container->xml),
                              new_show_opts, replica->container, only_node, only_rsc);
             }
 
             if (print_remote) {
                 out->message(out, crm_map_element_name(replica->remote->xml),
                              new_show_opts, replica->remote, only_node, only_rsc);
             }
 
             out->end_list(out);
         } else if (print_everything == FALSE && !(print_ip || print_child || print_ctnr || print_remote)) {
             continue;
         } else {
-            PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s",
+            PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s%s%s%s",
                                      (bundle_data->nreplicas > 1)? " set" : "",
                                      rsc->id, bundle_data->image,
                                      pcmk_is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
+                                     desc ? " (" : "", desc ? desc : "", desc ? ")" : "",
                                      get_unmanaged_str(rsc));
 
             pe__bundle_replica_output_text(out, replica, pe__current_node(replica->container),
                                            show_opts);
         }
     }
 
     PCMK__OUTPUT_LIST_FOOTER(out, rc);
     return rc;
 }
 
 /*!
  * \internal
  * \deprecated This function will be removed in a future release
  */
 static void
 print_bundle_replica(pe__bundle_replica_t *replica, const char *pre_text,
                      long options, void *print_data)
 {
     pe_node_t *node = NULL;
     pe_resource_t *rsc = replica->child;
 
     int offset = 0;
     char buffer[LINE_MAX];
 
     if(rsc == NULL) {
         rsc = replica->container;
     }
 
     if (replica->remote) {
         offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
                            rsc_printable_id(replica->remote));
     } else {
         offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
                            rsc_printable_id(replica->container));
     }
     if (replica->ipaddr) {
         offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)",
                            replica->ipaddr);
     }
 
     node = pe__current_node(replica->container);
     common_print(rsc, pre_text, buffer, node, options, print_data);
 }
 
 /*!
  * \internal
  * \deprecated This function will be removed in a future release
  */
 void
 pe__print_bundle(pe_resource_t *rsc, const char *pre_text, long options,
                  void *print_data)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
     char *child_text = NULL;
     CRM_CHECK(rsc != NULL, return);
 
     if (options & pe_print_xml) {
         bundle_print_xml(rsc, pre_text, options, print_data);
         return;
     }
 
     get_bundle_variant_data(bundle_data, rsc);
 
     if (pre_text == NULL) {
         pre_text = " ";
     }
 
     status_print("%sContainer bundle%s: %s [%s]%s%s\n",
                  pre_text, ((bundle_data->nreplicas > 1)? " set" : ""),
                  rsc->id, bundle_data->image,
                  pcmk_is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
                  pcmk_is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)");
     if (options & pe_print_html) {
         status_print("<br />\n<ul>\n");
     }
 
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         CRM_ASSERT(replica);
         if (options & pe_print_html) {
             status_print("<li>");
         }
 
         if (pcmk_is_set(options, pe_print_implicit)) {
             child_text = crm_strdup_printf("     %s", pre_text);
             if (pcmk__list_of_multiple(bundle_data->replicas)) {
                 status_print("  %sReplica[%d]\n", pre_text, replica->offset);
             }
             if (options & pe_print_html) {
                 status_print("<br />\n<ul>\n");
             }
             print_rsc_in_list(replica->ip, child_text, options, print_data);
             print_rsc_in_list(replica->container, child_text, options, print_data);
             print_rsc_in_list(replica->remote, child_text, options, print_data);
             print_rsc_in_list(replica->child, child_text, options, print_data);
             if (options & pe_print_html) {
                 status_print("</ul>\n");
             }
         } else {
             child_text = crm_strdup_printf("%s  ", pre_text);
             print_bundle_replica(replica, child_text, options, print_data);
         }
         free(child_text);
 
         if (options & pe_print_html) {
             status_print("</li>\n");
         }
     }
     if (options & pe_print_html) {
         status_print("</ul>\n");
     }
 }
 
 static void
 free_bundle_replica(pe__bundle_replica_t *replica)
 {
     if (replica == NULL) {
         return;
     }
 
     if (replica->node) {
         free(replica->node);
         replica->node = NULL;
     }
 
     if (replica->ip) {
         free_xml(replica->ip->xml);
         replica->ip->xml = NULL;
         replica->ip->fns->free(replica->ip);
         replica->ip = NULL;
     }
     if (replica->container) {
         free_xml(replica->container->xml);
         replica->container->xml = NULL;
         replica->container->fns->free(replica->container);
         replica->container = NULL;
     }
     if (replica->remote) {
         free_xml(replica->remote->xml);
         replica->remote->xml = NULL;
         replica->remote->fns->free(replica->remote);
         replica->remote = NULL;
     }
     free(replica->ipaddr);
     free(replica);
 }
 
 void
 pe__free_bundle(pe_resource_t *rsc)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
     CRM_CHECK(rsc != NULL, return);
 
     get_bundle_variant_data(bundle_data, rsc);
     pe_rsc_trace(rsc, "Freeing %s", rsc->id);
 
     free(bundle_data->prefix);
     free(bundle_data->image);
     free(bundle_data->control_port);
     free(bundle_data->host_network);
     free(bundle_data->host_netmask);
     free(bundle_data->ip_range_start);
     free(bundle_data->container_network);
     free(bundle_data->launcher_options);
     free(bundle_data->container_command);
     g_free(bundle_data->container_host_options);
 
     g_list_free_full(bundle_data->replicas,
                      (GDestroyNotify) free_bundle_replica);
     g_list_free_full(bundle_data->mounts, (GDestroyNotify)mount_free);
     g_list_free_full(bundle_data->ports, (GDestroyNotify)port_free);
     g_list_free(rsc->children);
 
     if(bundle_data->child) {
         free_xml(bundle_data->child->xml);
         bundle_data->child->xml = NULL;
         bundle_data->child->fns->free(bundle_data->child);
     }
     common_free(rsc);
 }
 
 enum rsc_role_e
 pe__bundle_resource_state(const pe_resource_t *rsc, gboolean current)
 {
     enum rsc_role_e container_role = RSC_ROLE_UNKNOWN;
     return container_role;
 }
 
 /*!
  * \brief Get the number of configured replicas in a bundle
  *
  * \param[in] rsc  Bundle resource
  *
  * \return Number of configured replicas, or 0 on error
  */
 int
 pe_bundle_replicas(const pe_resource_t *rsc)
 {
     if ((rsc == NULL) || (rsc->variant != pe_container)) {
         return 0;
     } else {
         pe__bundle_variant_data_t *bundle_data = NULL;
 
         get_bundle_variant_data(bundle_data, rsc);
         return bundle_data->nreplicas;
     }
 }
 
 void
 pe__count_bundle(pe_resource_t *rsc)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
 
     get_bundle_variant_data(bundle_data, rsc);
     for (GList *item = bundle_data->replicas; item != NULL; item = item->next) {
         pe__bundle_replica_t *replica = item->data;
 
         if (replica->ip) {
             replica->ip->fns->count(replica->ip);
         }
         if (replica->child) {
             replica->child->fns->count(replica->child);
         }
         if (replica->container) {
             replica->container->fns->count(replica->container);
         }
         if (replica->remote) {
             replica->remote->fns->count(replica->remote);
         }
     }
 }
 
 gboolean
 pe__bundle_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
                        gboolean check_parent)
 {
     gboolean passes = FALSE;
     pe__bundle_variant_data_t *bundle_data = NULL;
 
     if (pcmk__str_in_list(rsc_printable_id(rsc), only_rsc, pcmk__str_star_matches)) {
         passes = TRUE;
     } else {
         get_bundle_variant_data(bundle_data, rsc);
 
         for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) {
             pe__bundle_replica_t *replica = gIter->data;
 
             if (replica->ip != NULL && !replica->ip->fns->is_filtered(replica->ip, only_rsc, FALSE)) {
                 passes = TRUE;
                 break;
             } else if (replica->child != NULL && !replica->child->fns->is_filtered(replica->child, only_rsc, FALSE)) {
                 passes = TRUE;
                 break;
             } else if (!replica->container->fns->is_filtered(replica->container, only_rsc, FALSE)) {
                 passes = TRUE;
                 break;
             } else if (replica->remote != NULL && !replica->remote->fns->is_filtered(replica->remote, only_rsc, FALSE)) {
                 passes = TRUE;
                 break;
             }
         }
     }
 
     return !passes;
 }
diff --git a/lib/pengine/clone.c b/lib/pengine/clone.c
index e9bc23d92a..7f6eca0b1b 100644
--- a/lib/pengine/clone.c
+++ b/lib/pengine/clone.c
@@ -1,1342 +1,1356 @@
 /*
  * Copyright 2004-2023 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <stdint.h>
 
 #include <crm/pengine/rules.h>
 #include <crm/pengine/status.h>
 #include <crm/pengine/internal.h>
 #include <pe_status_private.h>
 #include <crm/msg_xml.h>
 #include <crm/common/output.h>
 #include <crm/common/xml_internal.h>
 
 #define VARIANT_CLONE 1
 #include "./variant.h"
 
 #ifdef PCMK__COMPAT_2_0
 #define PROMOTED_INSTANCES   RSC_ROLE_PROMOTED_LEGACY_S "s"
 #define UNPROMOTED_INSTANCES RSC_ROLE_UNPROMOTED_LEGACY_S "s"
 #else
 #define PROMOTED_INSTANCES   RSC_ROLE_PROMOTED_S
 #define UNPROMOTED_INSTANCES RSC_ROLE_UNPROMOTED_S
 #endif
 
 /*!
  * \internal
  * \brief Return the maximum number of clone instances allowed to be run
  *
  * \param[in] clone  Clone or clone instance to check
  *
  * \return Maximum instances for \p clone
  */
 int
 pe__clone_max(const pe_resource_t *clone)
 {
     const clone_variant_data_t *clone_data = NULL;
 
     get_clone_variant_data(clone_data, pe__const_top_resource(clone, false));
     return clone_data->clone_max;
 }
 
 /*!
  * \internal
  * \brief Return the maximum number of clone instances allowed to be promoted
  *
  * \param[in] clone  Promotable clone or clone instance to check
  *
  * \return Maximum promoted instances for \p clone
  */
 int
 pe__clone_promoted_max(const pe_resource_t *clone)
 {
     clone_variant_data_t *clone_data = NULL;
 
     get_clone_variant_data(clone_data, pe__const_top_resource(clone, false));
     return clone_data->promoted_max;
 }
 
 /*!
  * \internal
  * \brief Return the maximum number of clone instances allowed to be promoted
  *
  * \param[in] clone  Promotable clone or clone instance to check
  *
  * \return Maximum promoted instances for \p clone
  */
 int
 pe__clone_promoted_node_max(const pe_resource_t *clone)
 {
     clone_variant_data_t *clone_data = NULL;
 
     get_clone_variant_data(clone_data, pe__const_top_resource(clone, false));
     return clone_data->promoted_node_max;
 }
 
 static GList *
 sorted_hash_table_values(GHashTable *table)
 {
     GList *retval = NULL;
     GHashTableIter iter;
     gpointer key, value;
 
     g_hash_table_iter_init(&iter, table);
     while (g_hash_table_iter_next(&iter, &key, &value)) {
         if (!g_list_find_custom(retval, value, (GCompareFunc) strcmp)) {
             retval = g_list_prepend(retval, (char *) value);
         }
     }
 
     retval = g_list_sort(retval, (GCompareFunc) strcmp);
     return retval;
 }
 
 static GList *
 nodes_with_status(GHashTable *table, const char *status)
 {
     GList *retval = NULL;
     GHashTableIter iter;
     gpointer key, value;
 
     g_hash_table_iter_init(&iter, table);
     while (g_hash_table_iter_next(&iter, &key, &value)) {
         if (!strcmp((char *) value, status)) {
             retval = g_list_prepend(retval, key);
         }
     }
 
     retval = g_list_sort(retval, (GCompareFunc) pcmk__numeric_strcasecmp);
     return retval;
 }
 
 static GString *
 node_list_to_str(const GList *list)
 {
     GString *retval = NULL;
 
     for (const GList *iter = list; iter != NULL; iter = iter->next) {
         pcmk__add_word(&retval, 1024, (const char *) iter->data);
     }
 
     return retval;
 }
 
 static void
 clone_header(pcmk__output_t *out, int *rc, const pe_resource_t *rsc,
-             clone_variant_data_t *clone_data)
+             clone_variant_data_t *clone_data, const char *desc)
 {
     GString *attrs = NULL;
 
     if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
         pcmk__add_separated_word(&attrs, 64, "promotable", ", ");
     }
 
     if (pcmk_is_set(rsc->flags, pe_rsc_unique)) {
         pcmk__add_separated_word(&attrs, 64, "unique", ", ");
     }
 
     if (pe__resource_is_disabled(rsc)) {
         pcmk__add_separated_word(&attrs, 64, "disabled", ", ");
     }
 
     if (pcmk_is_set(rsc->flags, pe_rsc_maintenance)) {
         pcmk__add_separated_word(&attrs, 64, "maintenance", ", ");
 
     } else if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
         pcmk__add_separated_word(&attrs, 64, "unmanaged", ", ");
     }
 
     if (attrs != NULL) {
-        PCMK__OUTPUT_LIST_HEADER(out, FALSE, *rc, "Clone Set: %s [%s] (%s)",
+        PCMK__OUTPUT_LIST_HEADER(out, FALSE, *rc, "Clone Set: %s [%s] (%s)%s%s%s",
                                  rsc->id, ID(clone_data->xml_obj_child),
-                                 (const char *) attrs->str);
+                                 (const char *) attrs->str, desc ? " (" : "",
+                                 desc ? desc : "", desc ? ")" : "");
         g_string_free(attrs, TRUE);
     } else {
-        PCMK__OUTPUT_LIST_HEADER(out, FALSE, *rc, "Clone Set: %s [%s]",
-                                 rsc->id, ID(clone_data->xml_obj_child))
+        PCMK__OUTPUT_LIST_HEADER(out, FALSE, *rc, "Clone Set: %s [%s]%s%s%s",
+                                 rsc->id, ID(clone_data->xml_obj_child),
+                                 desc ? " (" : "", desc ? desc : "",
+                                 desc ? ")" : "");
     }
 }
 
 void
 pe__force_anon(const char *standard, pe_resource_t *rsc, const char *rid,
                pe_working_set_t *data_set)
 {
     if (pe_rsc_is_clone(rsc)) {
         clone_variant_data_t *clone_data = NULL;
 
         get_clone_variant_data(clone_data, rsc);
 
         pe_warn("Ignoring " XML_RSC_ATTR_UNIQUE " for %s because %s resources "
                 "such as %s can be used only as anonymous clones",
                 rsc->id, standard, rid);
 
         clone_data->clone_node_max = 1;
         clone_data->clone_max = QB_MIN(clone_data->clone_max,
                                        g_list_length(data_set->nodes));
     }
 }
 
 pe_resource_t *
 find_clone_instance(const pe_resource_t *rsc, const char *sub_id)
 {
     char *child_id = NULL;
     pe_resource_t *child = NULL;
     const char *child_base = NULL;
     clone_variant_data_t *clone_data = NULL;
 
     get_clone_variant_data(clone_data, rsc);
 
     child_base = ID(clone_data->xml_obj_child);
     child_id = crm_strdup_printf("%s:%s", child_base, sub_id);
     child = pe_find_resource(rsc->children, child_id);
 
     free(child_id);
     return child;
 }
 
 pe_resource_t *
 pe__create_clone_child(pe_resource_t *rsc, pe_working_set_t *data_set)
 {
     gboolean as_orphan = FALSE;
     char *inc_num = NULL;
     char *inc_max = NULL;
     pe_resource_t *child_rsc = NULL;
     xmlNode *child_copy = NULL;
     clone_variant_data_t *clone_data = NULL;
 
     get_clone_variant_data(clone_data, rsc);
 
     CRM_CHECK(clone_data->xml_obj_child != NULL, return FALSE);
 
     if (clone_data->total_clones >= clone_data->clone_max) {
         // If we've already used all available instances, this is an orphan
         as_orphan = TRUE;
     }
 
     // Allocate instance numbers in numerical order (starting at 0)
     inc_num = pcmk__itoa(clone_data->total_clones);
     inc_max = pcmk__itoa(clone_data->clone_max);
 
     child_copy = copy_xml(clone_data->xml_obj_child);
 
     crm_xml_add(child_copy, XML_RSC_ATTR_INCARNATION, inc_num);
 
     if (pe__unpack_resource(child_copy, &child_rsc, rsc,
                             data_set) != pcmk_rc_ok) {
         goto bail;
     }
 /*  child_rsc->globally_unique = rsc->globally_unique; */
 
     CRM_ASSERT(child_rsc);
     clone_data->total_clones += 1;
     pe_rsc_trace(child_rsc, "Setting clone attributes for: %s", child_rsc->id);
     rsc->children = g_list_append(rsc->children, child_rsc);
     if (as_orphan) {
         pe__set_resource_flags_recursive(child_rsc, pe_rsc_orphan);
     }
 
     add_hash_param(child_rsc->meta, XML_RSC_ATTR_INCARNATION_MAX, inc_max);
     pe_rsc_trace(rsc, "Added %s instance %s", rsc->id, child_rsc->id);
 
   bail:
     free(inc_num);
     free(inc_max);
 
     return child_rsc;
 }
 
 gboolean
 clone_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
 {
     int lpc = 0;
     xmlNode *a_child = NULL;
     xmlNode *xml_obj = rsc->xml;
     clone_variant_data_t *clone_data = NULL;
 
     const char *max_clones = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION_MAX);
     const char *max_clones_node = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION_NODEMAX);
 
     pe_rsc_trace(rsc, "Processing resource %s...", rsc->id);
 
     clone_data = calloc(1, sizeof(clone_variant_data_t));
     rsc->variant_opaque = clone_data;
 
     if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
         const char *promoted_max = NULL;
         const char *promoted_node_max = NULL;
 
         promoted_max = g_hash_table_lookup(rsc->meta,
                                            XML_RSC_ATTR_PROMOTED_MAX);
         if (promoted_max == NULL) {
             // @COMPAT deprecated since 2.0.0
             promoted_max = g_hash_table_lookup(rsc->meta,
                                                PCMK_XA_PROMOTED_MAX_LEGACY);
         }
 
         promoted_node_max = g_hash_table_lookup(rsc->meta,
                                                 XML_RSC_ATTR_PROMOTED_NODEMAX);
         if (promoted_node_max == NULL) {
             // @COMPAT deprecated since 2.0.0
             promoted_node_max =
                 g_hash_table_lookup(rsc->meta,
                                     PCMK_XA_PROMOTED_NODE_MAX_LEGACY);
         }
 
         // Use 1 as default but 0 for minimum and invalid
         if (promoted_max == NULL) {
             clone_data->promoted_max = 1;
         } else {
             pcmk__scan_min_int(promoted_max, &(clone_data->promoted_max), 0);
         }
 
         // Use 1 as default but 0 for minimum and invalid
         if (promoted_node_max == NULL) {
             clone_data->promoted_node_max = 1;
         } else {
             pcmk__scan_min_int(promoted_node_max,
                                &(clone_data->promoted_node_max), 0);
         }
     }
 
     // Implied by calloc()
     /* clone_data->xml_obj_child = NULL; */
 
     // Use 1 as default but 0 for minimum and invalid
     if (max_clones_node == NULL) {
         clone_data->clone_node_max = 1;
     } else {
         pcmk__scan_min_int(max_clones_node, &(clone_data->clone_node_max), 0);
     }
 
     /* Use number of nodes (but always at least 1, which is handy for crm_verify
      * for a CIB without nodes) as default, but 0 for minimum and invalid
      */
     if (max_clones == NULL) {
         clone_data->clone_max = QB_MAX(1, g_list_length(data_set->nodes));
     } else {
         pcmk__scan_min_int(max_clones, &(clone_data->clone_max), 0);
     }
 
     if (crm_is_true(g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_ORDERED))) {
         clone_data->flags = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE,
                                                "Clone", rsc->id,
                                                clone_data->flags,
                                                pe__clone_ordered,
                                                "pe__clone_ordered");
     }
 
     if ((rsc->flags & pe_rsc_unique) == 0 && clone_data->clone_node_max > 1) {
         pcmk__config_err("Ignoring " XML_RSC_ATTR_PROMOTED_MAX " for %s "
                          "because anonymous clones support only one instance "
                          "per node", rsc->id);
         clone_data->clone_node_max = 1;
     }
 
     pe_rsc_trace(rsc, "Options for %s", rsc->id);
     pe_rsc_trace(rsc, "\tClone max: %d", clone_data->clone_max);
     pe_rsc_trace(rsc, "\tClone node max: %d", clone_data->clone_node_max);
     pe_rsc_trace(rsc, "\tClone is unique: %s",
                  pe__rsc_bool_str(rsc, pe_rsc_unique));
     pe_rsc_trace(rsc, "\tClone is promotable: %s",
                  pe__rsc_bool_str(rsc, pe_rsc_promotable));
 
     // Clones may contain a single group or primitive
     for (a_child = pcmk__xe_first_child(xml_obj); a_child != NULL;
          a_child = pcmk__xe_next(a_child)) {
 
         if (pcmk__str_any_of((const char *)a_child->name, XML_CIB_TAG_RESOURCE, XML_CIB_TAG_GROUP, NULL)) {
             clone_data->xml_obj_child = a_child;
             break;
         }
     }
 
     if (clone_data->xml_obj_child == NULL) {
         pcmk__config_err("%s has nothing to clone", rsc->id);
         return FALSE;
     }
 
     /*
      * Make clones ever so slightly sticky by default
      *
      * This helps ensure clone instances are not shuffled around the cluster
      * for no benefit in situations when pre-allocation is not appropriate
      */
     if (g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_STICKINESS) == NULL) {
         add_hash_param(rsc->meta, XML_RSC_ATTR_STICKINESS, "1");
     }
 
     /* This ensures that the globally-unique value always exists for children to
      * inherit when being unpacked, as well as in resource agents' environment.
      */
     add_hash_param(rsc->meta, XML_RSC_ATTR_UNIQUE,
                    pe__rsc_bool_str(rsc, pe_rsc_unique));
 
     if (clone_data->clone_max <= 0) {
         /* Create one child instance so that unpack_find_resource() will hook up
          * any orphans up to the parent correctly.
          */
         if (pe__create_clone_child(rsc, data_set) == NULL) {
             return FALSE;
         }
 
     } else {
         // Create a child instance for each available instance number
         for (lpc = 0; lpc < clone_data->clone_max; lpc++) {
             if (pe__create_clone_child(rsc, data_set) == NULL) {
                 return FALSE;
             }
         }
     }
 
     pe_rsc_trace(rsc, "Added %d children to resource %s...", clone_data->clone_max, rsc->id);
     return TRUE;
 }
 
 gboolean
 clone_active(pe_resource_t * rsc, gboolean all)
 {
     GList *gIter = rsc->children;
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
         gboolean child_active = child_rsc->fns->active(child_rsc, all);
 
         if (all == FALSE && child_active) {
             return TRUE;
         } else if (all && child_active == FALSE) {
             return FALSE;
         }
     }
 
     if (all) {
         return TRUE;
     } else {
         return FALSE;
     }
 }
 
 /*!
  * \internal
  * \deprecated This function will be removed in a future release
  */
 static void
 short_print(const char *list, const char *prefix, const char *type,
             const char *suffix, long options, void *print_data)
 {
     if(suffix == NULL) {
         suffix = "";
     }
 
     if (!pcmk__str_empty(list)) {
         if (options & pe_print_html) {
             status_print("<li>");
         }
         status_print("%s%s: [ %s ]%s", prefix, type, list, suffix);
 
         if (options & pe_print_html) {
             status_print("</li>\n");
 
         } else if (options & pe_print_suppres_nl) {
             /* nothing */
         } else if ((options & pe_print_printf) || (options & pe_print_ncurses)) {
             status_print("\n");
         }
 
     }
 }
 
 static const char *
 configured_role_str(pe_resource_t * rsc)
 {
     const char *target_role = g_hash_table_lookup(rsc->meta,
                                                   XML_RSC_ATTR_TARGET_ROLE);
 
     if ((target_role == NULL) && rsc->children && rsc->children->data) {
         target_role = g_hash_table_lookup(((pe_resource_t*)rsc->children->data)->meta,
                                           XML_RSC_ATTR_TARGET_ROLE);
     }
     return target_role;
 }
 
 static enum rsc_role_e
 configured_role(pe_resource_t * rsc)
 {
     const char *target_role = configured_role_str(rsc);
 
     if (target_role) {
         return text2role(target_role);
     }
     return RSC_ROLE_UNKNOWN;
 }
 
 /*!
  * \internal
  * \deprecated This function will be removed in a future release
  */
 static void
 clone_print_xml(pe_resource_t *rsc, const char *pre_text, long options,
                 void *print_data)
 {
     char *child_text = crm_strdup_printf("%s    ", pre_text);
     const char *target_role = configured_role_str(rsc);
     GList *gIter = rsc->children;
 
     status_print("%s<clone ", pre_text);
     status_print(XML_ATTR_ID "=\"%s\" ", rsc->id);
     status_print("multi_state=\"%s\" ",
                  pe__rsc_bool_str(rsc, pe_rsc_promotable));
     status_print("unique=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_unique));
     status_print("managed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_managed));
     status_print("failed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_failed));
     status_print("failure_ignored=\"%s\" ",
                  pe__rsc_bool_str(rsc, pe_rsc_failure_ignored));
     if (target_role) {
         status_print("target_role=\"%s\" ", target_role);
     }
     status_print(">\n");
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         child_rsc->fns->print(child_rsc, child_text, options, print_data);
     }
 
     status_print("%s</clone>\n", pre_text);
     free(child_text);
 }
 
 bool
 is_set_recursive(const pe_resource_t *rsc, long long flag, bool any)
 {
     GList *gIter;
     bool all = !any;
 
     if (pcmk_is_set(rsc->flags, flag)) {
         if(any) {
             return TRUE;
         }
     } else if(all) {
         return FALSE;
     }
 
     for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
         if(is_set_recursive(gIter->data, flag, any)) {
             if(any) {
                 return TRUE;
             }
 
         } else if(all) {
             return FALSE;
         }
     }
 
     if(all) {
         return TRUE;
     }
     return FALSE;
 }
 
 /*!
  * \internal
  * \deprecated This function will be removed in a future release
  */
 void
 clone_print(pe_resource_t *rsc, const char *pre_text, long options,
             void *print_data)
 {
     GString *list_text = NULL;
     char *child_text = NULL;
     GString *stopped_list = NULL;
 
     GList *promoted_list = NULL;
     GList *started_list = NULL;
     GList *gIter = rsc->children;
 
     clone_variant_data_t *clone_data = NULL;
     int active_instances = 0;
 
     if (pre_text == NULL) {
         pre_text = " ";
     }
 
     if (options & pe_print_xml) {
         clone_print_xml(rsc, pre_text, options, print_data);
         return;
     }
 
     get_clone_variant_data(clone_data, rsc);
 
     child_text = crm_strdup_printf("%s    ", pre_text);
 
     status_print("%sClone Set: %s [%s]%s%s%s",
                  pre_text ? pre_text : "", rsc->id, ID(clone_data->xml_obj_child),
                  pcmk_is_set(rsc->flags, pe_rsc_promotable)? " (promotable)" : "",
                  pcmk_is_set(rsc->flags, pe_rsc_unique)? " (unique)" : "",
                  pcmk_is_set(rsc->flags, pe_rsc_managed)? "" : " (unmanaged)");
 
     if (options & pe_print_html) {
         status_print("\n<ul>\n");
 
     } else if ((options & pe_print_log) == 0) {
         status_print("\n");
     }
 
     for (; gIter != NULL; gIter = gIter->next) {
         gboolean print_full = FALSE;
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
         gboolean partially_active = child_rsc->fns->active(child_rsc, FALSE);
 
         if (options & pe_print_clone_details) {
             print_full = TRUE;
         }
 
         if (pcmk_is_set(rsc->flags, pe_rsc_unique)) {
             // Print individual instance when unique (except stopped orphans)
             if (partially_active || !pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
                 print_full = TRUE;
             }
 
         // Everything else in this block is for anonymous clones
 
         } else if (pcmk_is_set(options, pe_print_pending)
                    && (child_rsc->pending_task != NULL)
                    && strcmp(child_rsc->pending_task, "probe")) {
             // Print individual instance when non-probe action is pending
             print_full = TRUE;
 
         } else if (partially_active == FALSE) {
             // List stopped instances when requested (except orphans)
             if (!pcmk_is_set(child_rsc->flags, pe_rsc_orphan)
                 && !pcmk_is_set(options, pe_print_clone_active)) {
 
                 pcmk__add_word(&stopped_list, 1024, child_rsc->id);
             }
 
         } else if (is_set_recursive(child_rsc, pe_rsc_orphan, TRUE)
                    || is_set_recursive(child_rsc, pe_rsc_managed, FALSE) == FALSE
                    || is_set_recursive(child_rsc, pe_rsc_failed, TRUE)) {
 
             // Print individual instance when active orphaned/unmanaged/failed
             print_full = TRUE;
 
         } else if (child_rsc->fns->active(child_rsc, TRUE)) {
             // Instance of fully active anonymous clone
 
             pe_node_t *location = child_rsc->fns->location(child_rsc, NULL, TRUE);
 
             if (location) {
                 // Instance is active on a single node
 
                 enum rsc_role_e a_role = child_rsc->fns->state(child_rsc, TRUE);
 
                 if (location->details->online == FALSE && location->details->unclean) {
                     print_full = TRUE;
 
                 } else if (a_role > RSC_ROLE_UNPROMOTED) {
                     promoted_list = g_list_append(promoted_list, location);
 
                 } else {
                     started_list = g_list_append(started_list, location);
                 }
 
             } else {
                 /* uncolocated group - bleh */
                 print_full = TRUE;
             }
 
         } else {
             // Instance of partially active anonymous clone
             print_full = TRUE;
         }
 
         if (print_full) {
             if (options & pe_print_html) {
                 status_print("<li>\n");
             }
             child_rsc->fns->print(child_rsc, child_text, options, print_data);
             if (options & pe_print_html) {
                 status_print("</li>\n");
             }
         }
     }
 
     /* Promoted */
     promoted_list = g_list_sort(promoted_list, pe__cmp_node_name);
     for (gIter = promoted_list; gIter; gIter = gIter->next) {
         pe_node_t *host = gIter->data;
 
         pcmk__add_word(&list_text, 1024, host->details->uname);
         active_instances++;
     }
 
     if (list_text != NULL) {
         short_print((const char *) list_text->str, child_text,
                     PROMOTED_INSTANCES, NULL, options, print_data);
         g_string_truncate(list_text, 0);
     }
     g_list_free(promoted_list);
 
     /* Started/Unpromoted */
     started_list = g_list_sort(started_list, pe__cmp_node_name);
     for (gIter = started_list; gIter; gIter = gIter->next) {
         pe_node_t *host = gIter->data;
 
         pcmk__add_word(&list_text, 1024, host->details->uname);
         active_instances++;
     }
 
     if (list_text != NULL) {
         if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
             enum rsc_role_e role = configured_role(rsc);
 
             if (role == RSC_ROLE_UNPROMOTED) {
                 short_print((const char *) list_text->str, child_text,
                             UNPROMOTED_INSTANCES " (target-role)", NULL,
                             options, print_data);
             } else {
                 short_print((const char *) list_text->str, child_text,
                             UNPROMOTED_INSTANCES, NULL, options, print_data);
             }
 
         } else {
             short_print((const char *) list_text->str, child_text, "Started",
                         NULL, options, print_data);
         }
     }
 
     g_list_free(started_list);
 
     if (!pcmk_is_set(options, pe_print_clone_active)) {
         const char *state = "Stopped";
         enum rsc_role_e role = configured_role(rsc);
 
         if (role == RSC_ROLE_STOPPED) {
             state = "Stopped (disabled)";
         }
 
         if (!pcmk_is_set(rsc->flags, pe_rsc_unique)
             && (clone_data->clone_max > active_instances)) {
 
             GList *nIter;
             GList *list = g_hash_table_get_values(rsc->allowed_nodes);
 
             /* Custom stopped list for non-unique clones */
             if (stopped_list != NULL) {
                 g_string_truncate(stopped_list, 0);
             }
 
             if (list == NULL) {
                 /* Clusters with symmetrical=false haven't calculated allowed_nodes yet
                  * If we've not probed for them yet, the Stopped list will be empty
                  */
                 list = g_hash_table_get_values(rsc->known_on);
             }
 
             list = g_list_sort(list, pe__cmp_node_name);
             for (nIter = list; nIter != NULL; nIter = nIter->next) {
                 pe_node_t *node = (pe_node_t *)nIter->data;
 
                 if (pe_find_node(rsc->running_on, node->details->uname) == NULL) {
                     pcmk__add_word(&stopped_list, 1024, node->details->uname);
                 }
             }
             g_list_free(list);
         }
 
         if (stopped_list != NULL) {
             short_print((const char *) stopped_list->str, child_text, state,
                         NULL, options, print_data);
         }
     }
 
     if (options & pe_print_html) {
         status_print("</ul>\n");
     }
 
     if (list_text != NULL) {
         g_string_free(list_text, TRUE);
     }
 
     if (stopped_list != NULL) {
         g_string_free(stopped_list, TRUE);
     }
     free(child_text);
 }
 
 PCMK__OUTPUT_ARGS("clone", "uint32_t", "pe_resource_t *", "GList *", "GList *")
 int
 pe__clone_xml(pcmk__output_t *out, va_list args)
 {
     uint32_t show_opts = va_arg(args, uint32_t);
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     GList *only_node = va_arg(args, GList *);
     GList *only_rsc = va_arg(args, GList *);
 
+
+    const char *desc = NULL;
     GList *gIter = rsc->children;
     GList *all = NULL;
     int rc = pcmk_rc_no_output;
     gboolean printed_header = FALSE;
     gboolean print_everything = TRUE;
 
+    
+
     if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
         return rc;
     }
 
     print_everything = pcmk__str_in_list(rsc_printable_id(rsc), only_rsc, pcmk__str_star_matches) ||
                        (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches));
 
     all = g_list_prepend(all, (gpointer) "*");
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         if (pcmk__rsc_filtered_by_node(child_rsc, only_node)) {
             continue;
         }
 
         if (child_rsc->fns->is_filtered(child_rsc, only_rsc, print_everything)) {
             continue;
         }
 
         if (!printed_header) {
             printed_header = TRUE;
 
-            rc = pe__name_and_nvpairs_xml(out, true, "clone", 9,
+            desc = pe__resource_description(rsc, show_opts);
+            
+            rc = pe__name_and_nvpairs_xml(out, true, "clone", 10,
                     "id", rsc->id,
                     "multi_state", pe__rsc_bool_str(rsc, pe_rsc_promotable),
                     "unique", pe__rsc_bool_str(rsc, pe_rsc_unique),
                     "maintenance", pe__rsc_bool_str(rsc, pe_rsc_maintenance),
                     "managed", pe__rsc_bool_str(rsc, pe_rsc_managed),
                     "disabled", pcmk__btoa(pe__resource_is_disabled(rsc)),
                     "failed", pe__rsc_bool_str(rsc, pe_rsc_failed),
                     "failure_ignored", pe__rsc_bool_str(rsc, pe_rsc_failure_ignored),
-                    "target_role", configured_role_str(rsc));
+                    "target_role", configured_role_str(rsc),
+                    "description", desc);
             CRM_ASSERT(rc == pcmk_rc_ok);
         }
 
         out->message(out, crm_map_element_name(child_rsc->xml), show_opts,
                      child_rsc, only_node, all);
     }
 
     if (printed_header) {
         pcmk__output_xml_pop_parent(out);
     }
 
     g_list_free(all);
     return rc;
 }
 
 PCMK__OUTPUT_ARGS("clone", "uint32_t", "pe_resource_t *", "GList *", "GList *")
 int
 pe__clone_default(pcmk__output_t *out, va_list args)
 {
     uint32_t show_opts = va_arg(args, uint32_t);
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     GList *only_node = va_arg(args, GList *);
     GList *only_rsc = va_arg(args, GList *);
 
     GHashTable *stopped = NULL;
 
     GString *list_text = NULL;
 
     GList *promoted_list = NULL;
     GList *started_list = NULL;
     GList *gIter = rsc->children;
 
+    const char *desc = NULL;
+
     clone_variant_data_t *clone_data = NULL;
     int active_instances = 0;
     int rc = pcmk_rc_no_output;
     gboolean print_everything = TRUE;
 
+    desc = pe__resource_description(rsc, show_opts);
+
     get_clone_variant_data(clone_data, rsc);
 
     if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
         return rc;
     }
 
     print_everything = pcmk__str_in_list(rsc_printable_id(rsc), only_rsc, pcmk__str_star_matches) ||
                        (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches));
 
     for (; gIter != NULL; gIter = gIter->next) {
         gboolean print_full = FALSE;
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
         gboolean partially_active = child_rsc->fns->active(child_rsc, FALSE);
 
         if (pcmk__rsc_filtered_by_node(child_rsc, only_node)) {
             continue;
         }
 
         if (child_rsc->fns->is_filtered(child_rsc, only_rsc, print_everything)) {
             continue;
         }
 
         if (pcmk_is_set(show_opts, pcmk_show_clone_detail)) {
             print_full = TRUE;
         }
 
         if (pcmk_is_set(rsc->flags, pe_rsc_unique)) {
             // Print individual instance when unique (except stopped orphans)
             if (partially_active || !pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
                 print_full = TRUE;
             }
 
         // Everything else in this block is for anonymous clones
 
         } else if (pcmk_is_set(show_opts, pcmk_show_pending)
                    && (child_rsc->pending_task != NULL)
                    && strcmp(child_rsc->pending_task, "probe")) {
             // Print individual instance when non-probe action is pending
             print_full = TRUE;
 
         } else if (partially_active == FALSE) {
             // List stopped instances when requested (except orphans)
             if (!pcmk_is_set(child_rsc->flags, pe_rsc_orphan)
                 && !pcmk_is_set(show_opts, pcmk_show_clone_detail)
                 && pcmk_is_set(show_opts, pcmk_show_inactive_rscs)) {
                 if (stopped == NULL) {
                     stopped = pcmk__strkey_table(free, free);
                 }
                 g_hash_table_insert(stopped, strdup(child_rsc->id), strdup("Stopped"));
             }
 
         } else if (is_set_recursive(child_rsc, pe_rsc_orphan, TRUE)
                    || is_set_recursive(child_rsc, pe_rsc_managed, FALSE) == FALSE
                    || is_set_recursive(child_rsc, pe_rsc_failed, TRUE)) {
 
             // Print individual instance when active orphaned/unmanaged/failed
             print_full = TRUE;
 
         } else if (child_rsc->fns->active(child_rsc, TRUE)) {
             // Instance of fully active anonymous clone
 
             pe_node_t *location = child_rsc->fns->location(child_rsc, NULL, TRUE);
 
             if (location) {
                 // Instance is active on a single node
 
                 enum rsc_role_e a_role = child_rsc->fns->state(child_rsc, TRUE);
 
                 if (location->details->online == FALSE && location->details->unclean) {
                     print_full = TRUE;
 
                 } else if (a_role > RSC_ROLE_UNPROMOTED) {
                     promoted_list = g_list_append(promoted_list, location);
 
                 } else {
                     started_list = g_list_append(started_list, location);
                 }
 
             } else {
                 /* uncolocated group - bleh */
                 print_full = TRUE;
             }
 
         } else {
             // Instance of partially active anonymous clone
             print_full = TRUE;
         }
 
         if (print_full) {
             GList *all = NULL;
 
-            clone_header(out, &rc, rsc, clone_data);
+            clone_header(out, &rc, rsc, clone_data, desc);
 
             /* Print every resource that's a child of this clone. */
             all = g_list_prepend(all, (gpointer) "*");
             out->message(out, crm_map_element_name(child_rsc->xml), show_opts,
                          child_rsc, only_node, all);
             g_list_free(all);
         }
     }
 
     if (pcmk_is_set(show_opts, pcmk_show_clone_detail)) {
         PCMK__OUTPUT_LIST_FOOTER(out, rc);
         return pcmk_rc_ok;
     }
 
     /* Promoted */
     promoted_list = g_list_sort(promoted_list, pe__cmp_node_name);
     for (gIter = promoted_list; gIter; gIter = gIter->next) {
         pe_node_t *host = gIter->data;
 
         if (!pcmk__str_in_list(host->details->uname, only_node,
                                pcmk__str_star_matches|pcmk__str_casei)) {
             continue;
         }
 
         pcmk__add_word(&list_text, 1024, host->details->uname);
         active_instances++;
     }
     g_list_free(promoted_list);
 
     if ((list_text != NULL) && (list_text->len > 0)) {
-        clone_header(out, &rc, rsc, clone_data);
+        clone_header(out, &rc, rsc, clone_data, desc);
 
         out->list_item(out, NULL, PROMOTED_INSTANCES ": [ %s ]",
                        (const char *) list_text->str);
         g_string_truncate(list_text, 0);
     }
 
     /* Started/Unpromoted */
     started_list = g_list_sort(started_list, pe__cmp_node_name);
     for (gIter = started_list; gIter; gIter = gIter->next) {
         pe_node_t *host = gIter->data;
 
         if (!pcmk__str_in_list(host->details->uname, only_node,
                                pcmk__str_star_matches|pcmk__str_casei)) {
             continue;
         }
 
         pcmk__add_word(&list_text, 1024, host->details->uname);
         active_instances++;
     }
     g_list_free(started_list);
 
     if ((list_text != NULL) && (list_text->len > 0)) {
-        clone_header(out, &rc, rsc, clone_data);
+        clone_header(out, &rc, rsc, clone_data, desc);
 
         if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
             enum rsc_role_e role = configured_role(rsc);
 
             if (role == RSC_ROLE_UNPROMOTED) {
                 out->list_item(out, NULL,
                                UNPROMOTED_INSTANCES " (target-role): [ %s ]",
                                (const char *) list_text->str);
             } else {
                 out->list_item(out, NULL, UNPROMOTED_INSTANCES ": [ %s ]",
                                (const char *) list_text->str);
             }
 
         } else {
             out->list_item(out, NULL, "Started: [ %s ]",
                            (const char *) list_text->str);
         }
     }
 
     if (list_text != NULL) {
         g_string_free(list_text, TRUE);
     }
 
     if (pcmk_is_set(show_opts, pcmk_show_inactive_rscs)) {
         if (!pcmk_is_set(rsc->flags, pe_rsc_unique)
             && (clone_data->clone_max > active_instances)) {
 
             GList *nIter;
             GList *list = g_hash_table_get_values(rsc->allowed_nodes);
 
             /* Custom stopped table for non-unique clones */
             if (stopped != NULL) {
                 g_hash_table_destroy(stopped);
                 stopped = NULL;
             }
 
             if (list == NULL) {
                 /* Clusters with symmetrical=false haven't calculated allowed_nodes yet
                  * If we've not probed for them yet, the Stopped list will be empty
                  */
                 list = g_hash_table_get_values(rsc->known_on);
             }
 
             list = g_list_sort(list, pe__cmp_node_name);
             for (nIter = list; nIter != NULL; nIter = nIter->next) {
                 pe_node_t *node = (pe_node_t *)nIter->data;
 
                 if (pe_find_node(rsc->running_on, node->details->uname) == NULL &&
                     pcmk__str_in_list(node->details->uname, only_node,
                                       pcmk__str_star_matches|pcmk__str_casei)) {
                     xmlNode *probe_op = pe__failed_probe_for_rsc(rsc, node->details->uname);
                     const char *state = "Stopped";
 
                     if (configured_role(rsc) == RSC_ROLE_STOPPED) {
                         state = "Stopped (disabled)";
                     }
 
                     if (stopped == NULL) {
                         stopped = pcmk__strkey_table(free, free);
                     }
                     if (probe_op != NULL) {
                         int rc;
 
                         pcmk__scan_min_int(crm_element_value(probe_op, XML_LRM_ATTR_RC), &rc, 0);
                         g_hash_table_insert(stopped, strdup(node->details->uname),
                                             crm_strdup_printf("Stopped (%s)", services_ocf_exitcode_str(rc)));
                     } else {
                         g_hash_table_insert(stopped, strdup(node->details->uname),
                                             strdup(state));
                     }
                 }
             }
             g_list_free(list);
         }
 
         if (stopped != NULL) {
             GList *list = sorted_hash_table_values(stopped);
 
-            clone_header(out, &rc, rsc, clone_data);
+            clone_header(out, &rc, rsc, clone_data, desc);
 
             for (GList *status_iter = list; status_iter != NULL; status_iter = status_iter->next) {
                 const char *status = status_iter->data;
                 GList *nodes = nodes_with_status(stopped, status);
                 GString *nodes_str = node_list_to_str(nodes);
 
                 if (nodes_str != NULL) {
                     if (nodes_str->len > 0) {
                         out->list_item(out, NULL, "%s: [ %s ]", status,
                                        (const char *) nodes_str->str);
                     }
                     g_string_free(nodes_str, TRUE);
                 }
 
                 g_list_free(nodes);
             }
 
             g_list_free(list);
             g_hash_table_destroy(stopped);
 
         /* If there are no instances of this clone (perhaps because there are no
          * nodes configured), simply output the clone header by itself.  This can
          * come up in PCS testing.
          */
         } else if (active_instances == 0) {
-            clone_header(out, &rc, rsc, clone_data);
+            clone_header(out, &rc, rsc, clone_data, desc);
             PCMK__OUTPUT_LIST_FOOTER(out, rc);
             return rc;
         }
     }
 
     PCMK__OUTPUT_LIST_FOOTER(out, rc);
     return rc;
 }
 
 void
 clone_free(pe_resource_t * rsc)
 {
     clone_variant_data_t *clone_data = NULL;
 
     get_clone_variant_data(clone_data, rsc);
 
     pe_rsc_trace(rsc, "Freeing %s", rsc->id);
 
     for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         CRM_ASSERT(child_rsc);
         pe_rsc_trace(child_rsc, "Freeing child %s", child_rsc->id);
         free_xml(child_rsc->xml);
         child_rsc->xml = NULL;
         /* There could be a saved unexpanded xml */
         free_xml(child_rsc->orig_xml);
         child_rsc->orig_xml = NULL;
         child_rsc->fns->free(child_rsc);
     }
 
     g_list_free(rsc->children);
 
     if (clone_data) {
         CRM_ASSERT(clone_data->demote_notify == NULL);
         CRM_ASSERT(clone_data->stop_notify == NULL);
         CRM_ASSERT(clone_data->start_notify == NULL);
         CRM_ASSERT(clone_data->promote_notify == NULL);
     }
 
     common_free(rsc);
 }
 
 enum rsc_role_e
 clone_resource_state(const pe_resource_t * rsc, gboolean current)
 {
     enum rsc_role_e clone_role = RSC_ROLE_UNKNOWN;
     GList *gIter = rsc->children;
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
         enum rsc_role_e a_role = child_rsc->fns->state(child_rsc, current);
 
         if (a_role > clone_role) {
             clone_role = a_role;
         }
     }
 
     pe_rsc_trace(rsc, "%s role: %s", rsc->id, role2text(clone_role));
     return clone_role;
 }
 
 /*!
  * \internal
  * \brief Check whether a clone has an instance for every node
  *
  * \param[in] rsc       Clone to check
  * \param[in] data_set  Cluster state
  */
 bool
 pe__is_universal_clone(const pe_resource_t *rsc,
                        const pe_working_set_t *data_set)
 {
     if (pe_rsc_is_clone(rsc)) {
         clone_variant_data_t *clone_data = NULL;
 
         get_clone_variant_data(clone_data, rsc);
         if (clone_data->clone_max == g_list_length(data_set->nodes)) {
             return TRUE;
         }
     }
     return FALSE;
 }
 
 gboolean
 pe__clone_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
                       gboolean check_parent)
 {
     gboolean passes = FALSE;
     clone_variant_data_t *clone_data = NULL;
 
     if (pcmk__str_in_list(rsc_printable_id(rsc), only_rsc, pcmk__str_star_matches)) {
         passes = TRUE;
     } else {
         get_clone_variant_data(clone_data, rsc);
         passes = pcmk__str_in_list(ID(clone_data->xml_obj_child), only_rsc, pcmk__str_star_matches);
 
         if (!passes) {
             for (const GList *iter = rsc->children;
                  iter != NULL; iter = iter->next) {
 
                 const pe_resource_t *child_rsc = NULL;
 
                 child_rsc = (const pe_resource_t *) iter->data;
                 if (!child_rsc->fns->is_filtered(child_rsc, only_rsc, FALSE)) {
                     passes = TRUE;
                     break;
                 }
             }
         }
     }
     return !passes;
 }
 
 const char *
 pe__clone_child_id(const pe_resource_t *rsc)
 {
     clone_variant_data_t *clone_data = NULL;
     get_clone_variant_data(clone_data, rsc);
     return ID(clone_data->xml_obj_child);
 }
 
 /*!
  * \internal
  * \brief Check whether a clone is ordered
  *
  * \param[in] clone  Clone resource to check
  *
  * \return true if clone is ordered, otherwise false
  */
 bool
 pe__clone_is_ordered(const pe_resource_t *clone)
 {
     clone_variant_data_t *clone_data = NULL;
 
     get_clone_variant_data(clone_data, clone);
     return pcmk_is_set(clone_data->flags, pe__clone_ordered);
 }
 
 /*!
  * \internal
  * \brief Set a clone flag
  *
  * \param[in,out] clone  Clone resource to set flag for
  * \param[in]     flag   Clone flag to set
  *
  * \return Standard Pacemaker return code (either pcmk_rc_ok if flag was not
  *         already set or pcmk_rc_already if it was)
  */
 int
 pe__set_clone_flag(pe_resource_t *clone, enum pe__clone_flags flag)
 {
     clone_variant_data_t *clone_data = NULL;
 
     get_clone_variant_data(clone_data, clone);
     if (pcmk_is_set(clone_data->flags, flag)) {
         return pcmk_rc_already;
     }
     clone_data->flags = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE,
                                            "Clone", clone->id,
                                            clone_data->flags, flag, "flag");
     return pcmk_rc_ok;
 }
 
 /*!
  * \internal
  * \brief Create pseudo-actions needed for promotable clones
  *
  * \param[in,out] clone          Promotable clone to create actions for
  * \param[in]     any_promoting  Whether any instances will be promoted
  * \param[in]     any_demoting   Whether any instance will be demoted
  */
 void
 pe__create_promotable_pseudo_ops(pe_resource_t *clone, bool any_promoting,
                                  bool any_demoting)
 {
     pe_action_t *action = NULL;
     pe_action_t *action_complete = NULL;
     clone_variant_data_t *clone_data = NULL;
 
     get_clone_variant_data(clone_data, clone);
 
     // Create a "promote" action for the clone itself
     action = pe__new_rsc_pseudo_action(clone, RSC_PROMOTE, !any_promoting,
                                        true);
 
     // Create a "promoted" action for when all promotions are done
     action_complete = pe__new_rsc_pseudo_action(clone, RSC_PROMOTED,
                                                 !any_promoting, true);
     action_complete->priority = INFINITY;
 
     // Create notification pseudo-actions for promotion
     if (clone_data->promote_notify == NULL) {
         clone_data->promote_notify = pe__clone_notif_pseudo_ops(clone,
                                                                 RSC_PROMOTE,
                                                                 action,
                                                                 action_complete);
     }
 
     // Create a "demote" action for the clone itself
     action = pe__new_rsc_pseudo_action(clone, RSC_DEMOTE, !any_demoting, true);
 
     // Create a "demoted" action for when all demotions are done
     action_complete = pe__new_rsc_pseudo_action(clone, RSC_DEMOTED,
                                                 !any_demoting, true);
     action_complete->priority = INFINITY;
 
     // Create notification pseudo-actions for demotion
     if (clone_data->demote_notify == NULL) {
         clone_data->demote_notify = pe__clone_notif_pseudo_ops(clone,
                                                                RSC_DEMOTE,
                                                                action,
                                                                action_complete);
 
         if (clone_data->promote_notify != NULL) {
             order_actions(clone_data->stop_notify->post_done,
                           clone_data->promote_notify->pre,
                           pe_order_optional);
             order_actions(clone_data->start_notify->post_done,
                           clone_data->promote_notify->pre,
                           pe_order_optional);
             order_actions(clone_data->demote_notify->post_done,
                           clone_data->promote_notify->pre,
                           pe_order_optional);
             order_actions(clone_data->demote_notify->post_done,
                           clone_data->start_notify->pre,
                           pe_order_optional);
             order_actions(clone_data->demote_notify->post_done,
                           clone_data->stop_notify->pre,
                           pe_order_optional);
         }
     }
 }
diff --git a/lib/pengine/group.c b/lib/pengine/group.c
index d7819c2c64..d54b01ab21 100644
--- a/lib/pengine/group.c
+++ b/lib/pengine/group.c
@@ -1,509 +1,521 @@
 /*
  * Copyright 2004-2023 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <stdint.h>
 
 #include <crm/pengine/rules.h>
 #include <crm/pengine/status.h>
 #include <crm/pengine/internal.h>
 #include <crm/msg_xml.h>
 #include <crm/common/output.h>
 #include <crm/common/strings_internal.h>
 #include <crm/common/xml_internal.h>
 #include <pe_status_private.h>
 
 typedef struct group_variant_data_s {
     pe_resource_t *last_child;  // Last group member
     uint32_t flags;             // Group of enum pe__group_flags
 } group_variant_data_t;
 
 /*!
  * \internal
  * \brief Get a group's last member
  *
  * \param[in] group  Group resource to check
  *
  * \return Last member of \p group if any, otherwise NULL
  */
 pe_resource_t *
 pe__last_group_member(const pe_resource_t *group)
 {
     if (group != NULL) {
         CRM_CHECK((group->variant == pe_group)
                   && (group->variant_opaque != NULL), return NULL);
         return ((group_variant_data_t *) group->variant_opaque)->last_child;
     }
     return NULL;
 }
 
 /*!
  * \internal
  * \brief Check whether a group flag is set
  *
  * \param[in] group  Group resource to check
  * \param[in] flags  Flag or flags to check
  *
  * \return true if all \p flags are set for \p group, otherwise false
  */
 bool
 pe__group_flag_is_set(const pe_resource_t *group, uint32_t flags)
 {
     group_variant_data_t *group_data = NULL;
 
     CRM_CHECK((group != NULL) && (group->variant == pe_group)
               && (group->variant_opaque != NULL), return false);
     group_data = (group_variant_data_t *) group->variant_opaque;
     return pcmk_all_flags_set(group_data->flags, flags);
 }
 
 /*!
  * \internal
  * \brief Set a (deprecated) group flag
  *
  * \param[in,out] group   Group resource to check
  * \param[in]     option  Name of boolean configuration option
  * \param[in]     flag    Flag to set if \p option is true (which is default)
  * \param[in]     wo_bit  "Warn once" flag to use for deprecation warning
  */
 static void
 set_group_flag(pe_resource_t *group, const char *option, uint32_t flag,
                uint32_t wo_bit)
 {
     const char *value_s = NULL;
     int value = 0;
 
     value_s = g_hash_table_lookup(group->meta, option);
 
     // We don't actually need the null check but it speeds up the common case
     if ((value_s == NULL) || (crm_str_to_boolean(value_s, &value) < 0)
         || (value != 0)) {
 
         ((group_variant_data_t *) group->variant_opaque)->flags |= flag;
 
     } else {
         pe_warn_once(wo_bit,
                      "Support for the '%s' group meta-attribute is deprecated "
                      "and will be removed in a future release "
                      "(use a resource set instead)", option);
     }
 }
 
 static int
 inactive_resources(pe_resource_t *rsc)
 {
     int retval = 0;
 
     for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         if (!child_rsc->fns->active(child_rsc, TRUE)) {
             retval++;
         }
     }
 
     return retval;
 }
 
 static void
 group_header(pcmk__output_t *out, int *rc, const pe_resource_t *rsc,
-             int n_inactive, bool show_inactive)
+             int n_inactive, bool show_inactive, const char *desc)
 {
     GString *attrs = NULL;
 
     if (n_inactive > 0 && !show_inactive) {
         attrs = g_string_sized_new(64);
         g_string_append_printf(attrs, "%d member%s inactive", n_inactive,
                                pcmk__plural_s(n_inactive));
     }
 
     if (pe__resource_is_disabled(rsc)) {
         pcmk__add_separated_word(&attrs, 64, "disabled", ", ");
     }
 
     if (pcmk_is_set(rsc->flags, pe_rsc_maintenance)) {
         pcmk__add_separated_word(&attrs, 64, "maintenance", ", ");
 
     } else if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
         pcmk__add_separated_word(&attrs, 64, "unmanaged", ", ");
     }
 
     if (attrs != NULL) {
-        PCMK__OUTPUT_LIST_HEADER(out, FALSE, *rc, "Resource Group: %s (%s)",
-                                 rsc->id, (const char *) attrs->str);
+        PCMK__OUTPUT_LIST_HEADER(out, FALSE, *rc, "Resource Group: %s (%s)%s%s%s",
+                                 rsc->id,
+                                 (const char *) attrs->str, desc ? " (" : "",
+                                 desc ? desc : "", desc ? ")" : "");
         g_string_free(attrs, TRUE);
     } else {
-        PCMK__OUTPUT_LIST_HEADER(out, FALSE, *rc, "Resource Group: %s", rsc->id);
+        PCMK__OUTPUT_LIST_HEADER(out, FALSE, *rc, "Resource Group: %s%s%s%s",
+                                 rsc->id,
+                                 desc ? " (" : "", desc ? desc : "",
+                                 desc ? ")" : "");
     }
 }
 
 static bool
 skip_child_rsc(pe_resource_t *rsc, pe_resource_t *child, gboolean parent_passes,
                GList *only_rsc, uint32_t show_opts)
 {
     bool star_list = pcmk__list_of_1(only_rsc) &&
                      pcmk__str_eq("*", g_list_first(only_rsc)->data, pcmk__str_none);
     bool child_filtered = child->fns->is_filtered(child, only_rsc, FALSE);
     bool child_active = child->fns->active(child, FALSE);
     bool show_inactive = pcmk_is_set(show_opts, pcmk_show_inactive_rscs);
 
     /* If the resource is in only_rsc by name (so, ignoring "*") then allow
      * it regardless of if it's active or not.
      */
     if (!star_list && !child_filtered) {
         return false;
 
     } else if (!child_filtered && (child_active || show_inactive)) {
         return false;
 
     } else if (parent_passes && (child_active || show_inactive)) {
         return false;
 
     }
 
     return true;
 }
 
 gboolean
 group_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
 {
     xmlNode *xml_obj = rsc->xml;
     xmlNode *xml_native_rsc = NULL;
     group_variant_data_t *group_data = NULL;
     const char *clone_id = NULL;
 
     pe_rsc_trace(rsc, "Processing resource %s...", rsc->id);
 
     group_data = calloc(1, sizeof(group_variant_data_t));
     group_data->last_child = NULL;
     rsc->variant_opaque = group_data;
 
     // @COMPAT These are deprecated since 2.1.5
     set_group_flag(rsc, XML_RSC_ATTR_ORDERED, pe__group_ordered,
                    pe_wo_group_order);
     set_group_flag(rsc, "collocated", pe__group_colocated, pe_wo_group_coloc);
 
     clone_id = crm_element_value(rsc->xml, XML_RSC_ATTR_INCARNATION);
 
     for (xml_native_rsc = pcmk__xe_first_child(xml_obj); xml_native_rsc != NULL;
          xml_native_rsc = pcmk__xe_next(xml_native_rsc)) {
 
         if (pcmk__str_eq((const char *)xml_native_rsc->name,
                          XML_CIB_TAG_RESOURCE, pcmk__str_none)) {
             pe_resource_t *new_rsc = NULL;
 
             crm_xml_add(xml_native_rsc, XML_RSC_ATTR_INCARNATION, clone_id);
             if (pe__unpack_resource(xml_native_rsc, &new_rsc, rsc,
                                     data_set) != pcmk_rc_ok) {
                 continue;
             }
 
             rsc->children = g_list_append(rsc->children, new_rsc);
             group_data->last_child = new_rsc;
             pe_rsc_trace(rsc, "Added %s member %s", rsc->id, new_rsc->id);
         }
     }
 
     if (rsc->children == NULL) {
         /* The schema does not allow empty groups, but if validation is
          * disabled, we allow them (members can be added later).
          *
          * @COMPAT At a major release bump, we should consider this a failure so
          *         that group methods can assume children is not NULL, and there
          *         are no strange effects from phantom groups due to their
          *         presence or meta-attributes.
          */
         pcmk__config_warn("Group %s will be ignored because it does not have "
                           "any members", rsc->id);
     }
     return TRUE;
 }
 
 gboolean
 group_active(pe_resource_t * rsc, gboolean all)
 {
     gboolean c_all = TRUE;
     gboolean c_any = FALSE;
     GList *gIter = rsc->children;
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         if (child_rsc->fns->active(child_rsc, all)) {
             c_any = TRUE;
         } else {
             c_all = FALSE;
         }
     }
 
     if (c_any == FALSE) {
         return FALSE;
     } else if (all && c_all == FALSE) {
         return FALSE;
     }
     return TRUE;
 }
 
 /*!
  * \internal
  * \deprecated This function will be removed in a future release
  */
 static void
 group_print_xml(pe_resource_t *rsc, const char *pre_text, long options,
                 void *print_data)
 {
     GList *gIter = rsc->children;
     char *child_text = crm_strdup_printf("%s     ", pre_text);
 
     status_print("%s<group " XML_ATTR_ID "=\"%s\" ", pre_text, rsc->id);
     status_print("number_resources=\"%d\" ", g_list_length(rsc->children));
     status_print(">\n");
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         child_rsc->fns->print(child_rsc, child_text, options, print_data);
     }
 
     status_print("%s</group>\n", pre_text);
     free(child_text);
 }
 
 /*!
  * \internal
  * \deprecated This function will be removed in a future release
  */
 void
 group_print(pe_resource_t *rsc, const char *pre_text, long options,
             void *print_data)
 {
     char *child_text = NULL;
     GList *gIter = rsc->children;
 
     if (pre_text == NULL) {
         pre_text = " ";
     }
 
     if (options & pe_print_xml) {
         group_print_xml(rsc, pre_text, options, print_data);
         return;
     }
 
     child_text = crm_strdup_printf("%s    ", pre_text);
 
     status_print("%sResource Group: %s", pre_text ? pre_text : "", rsc->id);
 
     if (options & pe_print_html) {
         status_print("\n<ul>\n");
 
     } else if ((options & pe_print_log) == 0) {
         status_print("\n");
     }
 
     if (options & pe_print_brief) {
         print_rscs_brief(rsc->children, child_text, options, print_data, TRUE);
 
     } else {
         for (; gIter != NULL; gIter = gIter->next) {
             pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
             if (options & pe_print_html) {
                 status_print("<li>\n");
             }
             child_rsc->fns->print(child_rsc, child_text, options, print_data);
             if (options & pe_print_html) {
                 status_print("</li>\n");
             }
         }
     }
 
     if (options & pe_print_html) {
         status_print("</ul>\n");
     }
     free(child_text);
 }
 
 PCMK__OUTPUT_ARGS("group", "uint32_t", "pe_resource_t *", "GList *", "GList *")
 int
 pe__group_xml(pcmk__output_t *out, va_list args)
 {
     uint32_t show_opts = va_arg(args, uint32_t);
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     GList *only_node = va_arg(args, GList *);
     GList *only_rsc = va_arg(args, GList *);
-
+    
+    const char *desc = NULL;
     GList *gIter = rsc->children;
 
     int rc = pcmk_rc_no_output;
 
     gboolean parent_passes = pcmk__str_in_list(rsc_printable_id(rsc), only_rsc, pcmk__str_star_matches) ||
                              (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches));
 
+    desc = pe__resource_description(rsc, show_opts);
+
     if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
         return rc;
     }
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         if (skip_child_rsc(rsc, child_rsc, parent_passes, only_rsc, show_opts)) {
             continue;
         }
 
         if (rc == pcmk_rc_no_output) {
             char *count = pcmk__itoa(g_list_length(gIter));
             const char *maint_s = pe__rsc_bool_str(rsc, pe_rsc_maintenance);
             const char *managed_s = pe__rsc_bool_str(rsc, pe_rsc_managed);
             const char *disabled_s = pcmk__btoa(pe__resource_is_disabled(rsc));
 
             rc = pe__name_and_nvpairs_xml(out, true, "group", 5,
                                           XML_ATTR_ID, rsc->id,
                                           "number_resources", count,
                                           "maintenance", maint_s,
                                           "managed", managed_s,
-                                          "disabled", disabled_s);
+                                          "disabled", disabled_s,
+                                          "description", desc);
             free(count);
             CRM_ASSERT(rc == pcmk_rc_ok);
         }
 
         out->message(out, crm_map_element_name(child_rsc->xml), show_opts, child_rsc,
-					 only_node, only_rsc);
+                     only_node, only_rsc);
     }
 
     if (rc == pcmk_rc_ok) {
         pcmk__output_xml_pop_parent(out);
     }
 
     return rc;
 }
 
 PCMK__OUTPUT_ARGS("group", "uint32_t", "pe_resource_t *", "GList *", "GList *")
 int
 pe__group_default(pcmk__output_t *out, va_list args)
 {
     uint32_t show_opts = va_arg(args, uint32_t);
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     GList *only_node = va_arg(args, GList *);
     GList *only_rsc = va_arg(args, GList *);
 
+    const char *desc = NULL;
     int rc = pcmk_rc_no_output;
 
     gboolean parent_passes = pcmk__str_in_list(rsc_printable_id(rsc), only_rsc, pcmk__str_star_matches) ||
                              (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches));
 
     gboolean active = rsc->fns->active(rsc, TRUE);
     gboolean partially_active = rsc->fns->active(rsc, FALSE);
 
+    desc = pe__resource_description(rsc, show_opts);
+
     if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
         return rc;
     }
 
     if (pcmk_is_set(show_opts, pcmk_show_brief)) {
         GList *rscs = pe__filter_rsc_list(rsc->children, only_rsc);
 
         if (rscs != NULL) {
             group_header(out, &rc, rsc, !active && partially_active ? inactive_resources(rsc) : 0,
-                         pcmk_is_set(show_opts, pcmk_show_inactive_rscs));
+                         pcmk_is_set(show_opts, pcmk_show_inactive_rscs), desc);
             pe__rscs_brief_output(out, rscs, show_opts | pcmk_show_inactive_rscs);
 
             rc = pcmk_rc_ok;
             g_list_free(rscs);
         }
 
     } else {
         for (GList *gIter = rsc->children; gIter; gIter = gIter->next) {
             pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
             if (skip_child_rsc(rsc, child_rsc, parent_passes, only_rsc, show_opts)) {
                 continue;
             }
 
             group_header(out, &rc, rsc, !active && partially_active ? inactive_resources(rsc) : 0,
-                         pcmk_is_set(show_opts, pcmk_show_inactive_rscs));
+                         pcmk_is_set(show_opts, pcmk_show_inactive_rscs), desc);
             out->message(out, crm_map_element_name(child_rsc->xml), show_opts,
                          child_rsc, only_node, only_rsc);
         }
     }
 
 	PCMK__OUTPUT_LIST_FOOTER(out, rc);
 
     return rc;
 }
 
 void
 group_free(pe_resource_t * rsc)
 {
     CRM_CHECK(rsc != NULL, return);
 
     pe_rsc_trace(rsc, "Freeing %s", rsc->id);
 
     for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         CRM_ASSERT(child_rsc);
         pe_rsc_trace(child_rsc, "Freeing child %s", child_rsc->id);
         child_rsc->fns->free(child_rsc);
     }
 
     pe_rsc_trace(rsc, "Freeing child list");
     g_list_free(rsc->children);
 
     common_free(rsc);
 }
 
 enum rsc_role_e
 group_resource_state(const pe_resource_t * rsc, gboolean current)
 {
     enum rsc_role_e group_role = RSC_ROLE_UNKNOWN;
     GList *gIter = rsc->children;
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
         enum rsc_role_e role = child_rsc->fns->state(child_rsc, current);
 
         if (role > group_role) {
             group_role = role;
         }
     }
 
     pe_rsc_trace(rsc, "%s role: %s", rsc->id, role2text(group_role));
     return group_role;
 }
 
 gboolean
 pe__group_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
                       gboolean check_parent)
 {
     gboolean passes = FALSE;
 
     if (check_parent
         && pcmk__str_in_list(rsc_printable_id(pe__const_top_resource(rsc,
                                                                      false)),
                              only_rsc, pcmk__str_star_matches)) {
         passes = TRUE;
     } else if (pcmk__str_in_list(rsc_printable_id(rsc), only_rsc, pcmk__str_star_matches)) {
         passes = TRUE;
     } else if (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches)) {
         passes = TRUE;
     } else {
         for (const GList *iter = rsc->children;
              iter != NULL; iter = iter->next) {
 
             const pe_resource_t *child_rsc = (const pe_resource_t *) iter->data;
 
             if (!child_rsc->fns->is_filtered(child_rsc, only_rsc, FALSE)) {
                 passes = TRUE;
                 break;
             }
         }
     }
 
     return !passes;
 }
diff --git a/lib/pengine/native.c b/lib/pengine/native.c
index 572269cf2c..b3f903985e 100644
--- a/lib/pengine/native.c
+++ b/lib/pengine/native.c
@@ -1,1405 +1,1414 @@
 /*
  * Copyright 2004-2023 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <stdint.h>
 
 #include <crm/common/output.h>
 #include <crm/pengine/rules.h>
 #include <crm/pengine/status.h>
 #include <crm/pengine/complex.h>
 #include <crm/pengine/internal.h>
 #include <crm/msg_xml.h>
 #include <pe_status_private.h>
 
 #ifdef PCMK__COMPAT_2_0
 #define PROVIDER_SEP "::"
 #else
 #define PROVIDER_SEP ":"
 #endif
 
 /*!
  * \internal
  * \brief Check whether a resource is active on multiple nodes
  */
 static bool
 is_multiply_active(const pe_resource_t *rsc)
 {
     unsigned int count = 0;
 
     if (rsc->variant == pe_native) {
         pe__find_active_requires(rsc, &count);
     }
     return count > 1;
 }
 
 static void
 native_priority_to_node(pe_resource_t * rsc, pe_node_t * node, gboolean failed)
 {
     int priority = 0;
 
     if ((rsc->priority == 0) || (failed == TRUE)) {
         return;
     }
 
     if (rsc->role == RSC_ROLE_PROMOTED) {
         // Promoted instance takes base priority + 1
         priority = rsc->priority + 1;
 
     } else {
         priority = rsc->priority;
     }
 
     node->details->priority += priority;
     pe_rsc_trace(rsc, "%s now has priority %d with %s'%s' (priority: %d%s)",
                  pe__node_name(node), node->details->priority,
                  (rsc->role == RSC_ROLE_PROMOTED)? "promoted " : "",
                  rsc->id, rsc->priority,
                  (rsc->role == RSC_ROLE_PROMOTED)? " + 1" : "");
 
     /* Priority of a resource running on a guest node is added to the cluster
      * node as well. */
     if (node->details->remote_rsc
         && node->details->remote_rsc->container) {
         GList *gIter = node->details->remote_rsc->container->running_on;
 
         for (; gIter != NULL; gIter = gIter->next) {
             pe_node_t *a_node = gIter->data;
 
             a_node->details->priority += priority;
             pe_rsc_trace(rsc, "%s now has priority %d with %s'%s' (priority: %d%s) "
                          "from guest node %s",
                          pe__node_name(a_node), a_node->details->priority,
                          (rsc->role == RSC_ROLE_PROMOTED)? "promoted " : "",
                          rsc->id, rsc->priority,
                          (rsc->role == RSC_ROLE_PROMOTED)? " + 1" : "",
                          pe__node_name(node));
         }
     }
 }
 
 void
 native_add_running(pe_resource_t * rsc, pe_node_t * node, pe_working_set_t * data_set, gboolean failed)
 {
     GList *gIter = rsc->running_on;
 
     CRM_CHECK(node != NULL, return);
     for (; gIter != NULL; gIter = gIter->next) {
         pe_node_t *a_node = (pe_node_t *) gIter->data;
 
         CRM_CHECK(a_node != NULL, return);
         if (pcmk__str_eq(a_node->details->id, node->details->id, pcmk__str_casei)) {
             return;
         }
     }
 
     pe_rsc_trace(rsc, "Adding %s to %s %s", rsc->id, pe__node_name(node),
                  pcmk_is_set(rsc->flags, pe_rsc_managed)? "" : "(unmanaged)");
 
     rsc->running_on = g_list_append(rsc->running_on, node);
     if (rsc->variant == pe_native) {
         node->details->running_rsc = g_list_append(node->details->running_rsc, rsc);
 
         native_priority_to_node(rsc, node, failed);
     }
 
     if (rsc->variant == pe_native && node->details->maintenance) {
         pe__clear_resource_flags(rsc, pe_rsc_managed);
         pe__set_resource_flags(rsc, pe_rsc_maintenance);
     }
 
     if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
         pe_resource_t *p = rsc->parent;
 
         pe_rsc_info(rsc, "resource %s isn't managed", rsc->id);
         resource_location(rsc, node, INFINITY, "not_managed_default", data_set);
 
         while(p && node->details->online) {
             /* add without the additional location constraint */
             p->running_on = g_list_append(p->running_on, node);
             p = p->parent;
         }
         return;
     }
 
     if (is_multiply_active(rsc)) {
         switch (rsc->recovery_type) {
             case recovery_stop_only:
                 {
                     GHashTableIter gIter;
                     pe_node_t *local_node = NULL;
 
                     /* make sure it doesn't come up again */
                     if (rsc->allowed_nodes != NULL) {
                         g_hash_table_destroy(rsc->allowed_nodes);
                     }
                     rsc->allowed_nodes = pe__node_list2table(data_set->nodes);
                     g_hash_table_iter_init(&gIter, rsc->allowed_nodes);
                     while (g_hash_table_iter_next(&gIter, NULL, (void **)&local_node)) {
                         local_node->weight = -INFINITY;
                     }
                 }
                 break;
             case recovery_block:
                 pe__clear_resource_flags(rsc, pe_rsc_managed);
                 pe__set_resource_flags(rsc, pe_rsc_block);
 
                 /* If the resource belongs to a group or bundle configured with
                  * multiple-active=block, block the entire entity.
                  */
                 if (rsc->parent
                     && (rsc->parent->variant == pe_group || rsc->parent->variant == pe_container)
                     && rsc->parent->recovery_type == recovery_block) {
                     GList *gIter = rsc->parent->children;
 
                     for (; gIter != NULL; gIter = gIter->next) {
                         pe_resource_t *child = (pe_resource_t *) gIter->data;
 
                         pe__clear_resource_flags(child, pe_rsc_managed);
                         pe__set_resource_flags(child, pe_rsc_block);
                     }
                 }
                 break;
             default: // recovery_stop_start, recovery_stop_unexpected
                 /* The scheduler will do the right thing because the relevant
                  * variables and flags are set when unpacking the history.
                  */
                 break;
         }
         crm_debug("%s is active on multiple nodes including %s: %s",
                   rsc->id, pe__node_name(node),
                   recovery2text(rsc->recovery_type));
 
     } else {
         pe_rsc_trace(rsc, "Resource %s is active on %s",
                      rsc->id, pe__node_name(node));
     }
 
     if (rsc->parent != NULL) {
         native_add_running(rsc->parent, node, data_set, FALSE);
     }
 }
 
 static void
 recursive_clear_unique(pe_resource_t *rsc, gpointer user_data)
 {
     pe__clear_resource_flags(rsc, pe_rsc_unique);
     add_hash_param(rsc->meta, XML_RSC_ATTR_UNIQUE, XML_BOOLEAN_FALSE);
     g_list_foreach(rsc->children, (GFunc) recursive_clear_unique, NULL);
 }
 
 gboolean
 native_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
 {
     pe_resource_t *parent = uber_parent(rsc);
     const char *standard = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
     uint32_t ra_caps = pcmk_get_ra_caps(standard);
 
     pe_rsc_trace(rsc, "Processing resource %s...", rsc->id);
 
     // Only some agent standards support unique and promotable clones
     if (!pcmk_is_set(ra_caps, pcmk_ra_cap_unique)
         && pcmk_is_set(rsc->flags, pe_rsc_unique) && pe_rsc_is_clone(parent)) {
 
         /* @COMPAT We should probably reject this situation as an error (as we
          * do for promotable below) rather than warn and convert, but that would
          * be a backward-incompatible change that we should probably do with a
          * transform at a schema major version bump.
          */
         pe__force_anon(standard, parent, rsc->id, data_set);
 
         /* Clear globally-unique on the parent and all its descendents unpacked
          * so far (clearing the parent should make any future children unpacking
          * correct). We have to clear this resource explicitly because it isn't
          * hooked into the parent's children yet.
          */
         recursive_clear_unique(parent, NULL);
         recursive_clear_unique(rsc, NULL);
     }
     if (!pcmk_is_set(ra_caps, pcmk_ra_cap_promotable)
         && pcmk_is_set(parent->flags, pe_rsc_promotable)) {
 
         pe_err("Resource %s is of type %s and therefore "
                "cannot be used as a promotable clone resource",
                rsc->id, standard);
         return FALSE;
     }
     return TRUE;
 }
 
 static bool
 rsc_is_on_node(pe_resource_t *rsc, const pe_node_t *node, int flags)
 {
     pe_rsc_trace(rsc, "Checking whether %s is on %s",
                  rsc->id, pe__node_name(node));
 
     if (pcmk_is_set(flags, pe_find_current) && rsc->running_on) {
 
         for (GList *iter = rsc->running_on; iter; iter = iter->next) {
             pe_node_t *loc = (pe_node_t *) iter->data;
 
             if (loc->details == node->details) {
                 return true;
             }
         }
 
     } else if (pcmk_is_set(flags, pe_find_inactive)
                && (rsc->running_on == NULL)) {
         return true;
 
     } else if (!pcmk_is_set(flags, pe_find_current) && rsc->allocated_to
                && (rsc->allocated_to->details == node->details)) {
         return true;
     }
     return false;
 }
 
 pe_resource_t *
 native_find_rsc(pe_resource_t * rsc, const char *id, const pe_node_t *on_node,
                 int flags)
 {
     bool match = false;
     pe_resource_t *result = NULL;
 
     CRM_CHECK(id && rsc && rsc->id, return NULL);
 
     if (flags & pe_find_clone) {
         const char *rid = ID(rsc->xml);
 
         if (!pe_rsc_is_clone(pe__const_top_resource(rsc, false))) {
             match = false;
 
         } else if (!strcmp(id, rsc->id) || pcmk__str_eq(id, rid, pcmk__str_none)) {
             match = true;
         }
 
     } else if (!strcmp(id, rsc->id)) {
         match = true;
 
     } else if (pcmk_is_set(flags, pe_find_renamed)
                && rsc->clone_name && strcmp(rsc->clone_name, id) == 0) {
         match = true;
 
     } else if (pcmk_is_set(flags, pe_find_any)
                || (pcmk_is_set(flags, pe_find_anon)
                    && !pcmk_is_set(rsc->flags, pe_rsc_unique))) {
         match = pe_base_name_eq(rsc, id);
     }
 
     if (match && on_node) {
         if (!rsc_is_on_node(rsc, on_node, flags)) {
             match = false;
         }
     }
 
     if (match) {
         return rsc;
     }
 
     for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child = (pe_resource_t *) gIter->data;
 
         result = rsc->fns->find_rsc(child, id, on_node, flags);
         if (result) {
             return result;
         }
     }
     return NULL;
 }
 
 // create is ignored
 char *
 native_parameter(pe_resource_t * rsc, pe_node_t * node, gboolean create, const char *name,
                  pe_working_set_t * data_set)
 {
     char *value_copy = NULL;
     const char *value = NULL;
     GHashTable *params = NULL;
 
     CRM_CHECK(rsc != NULL, return NULL);
     CRM_CHECK(name != NULL && strlen(name) != 0, return NULL);
 
     pe_rsc_trace(rsc, "Looking up %s in %s", name, rsc->id);
     params = pe_rsc_params(rsc, node, data_set);
     value = g_hash_table_lookup(params, name);
     if (value == NULL) {
         /* try meta attributes instead */
         value = g_hash_table_lookup(rsc->meta, name);
     }
     pcmk__str_update(&value_copy, value);
     return value_copy;
 }
 
 gboolean
 native_active(pe_resource_t * rsc, gboolean all)
 {
     for (GList *gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
         pe_node_t *a_node = (pe_node_t *) gIter->data;
 
         if (a_node->details->unclean) {
             pe_rsc_trace(rsc, "Resource %s: %s is unclean",
                          rsc->id, pe__node_name(a_node));
             return TRUE;
         } else if (a_node->details->online == FALSE && pcmk_is_set(rsc->flags, pe_rsc_managed)) {
             pe_rsc_trace(rsc, "Resource %s: %s is offline",
                          rsc->id, pe__node_name(a_node));
         } else {
             pe_rsc_trace(rsc, "Resource %s active on %s",
                          rsc->id, pe__node_name(a_node));
             return TRUE;
         }
     }
     return FALSE;
 }
 
 struct print_data_s {
     long options;
     void *print_data;
 };
 
 static const char *
 native_pending_state(const pe_resource_t *rsc)
 {
     const char *pending_state = NULL;
 
     if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_START, pcmk__str_casei)) {
         pending_state = "Starting";
 
     } else if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_STOP, pcmk__str_casei)) {
         pending_state = "Stopping";
 
     } else if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_MIGRATE, pcmk__str_casei)) {
         pending_state = "Migrating";
 
     } else if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_MIGRATED, pcmk__str_casei)) {
        /* Work might be done in here. */
         pending_state = "Migrating";
 
     } else if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_PROMOTE, pcmk__str_casei)) {
         pending_state = "Promoting";
 
     } else if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_DEMOTE, pcmk__str_casei)) {
         pending_state = "Demoting";
     }
 
     return pending_state;
 }
 
 static const char *
 native_pending_task(const pe_resource_t *rsc)
 {
     const char *pending_task = NULL;
 
     if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_STATUS, pcmk__str_casei)) {
         pending_task = "Monitoring";
 
     /* Pending probes are not printed, even if pending
      * operations are requested. If someone ever requests that
      * behavior, uncomment this and the corresponding part of
      * unpack.c:unpack_rsc_op().
      */
     /*
     } else if (pcmk__str_eq(rsc->pending_task, "probe", pcmk__str_casei)) {
         pending_task = "Checking";
     */
     }
 
     return pending_task;
 }
 
 static enum rsc_role_e
 native_displayable_role(const pe_resource_t *rsc)
 {
     enum rsc_role_e role = rsc->role;
 
     if ((role == RSC_ROLE_STARTED)
         && pcmk_is_set(pe__const_top_resource(rsc, false)->flags,
                        pe_rsc_promotable)) {
 
         role = RSC_ROLE_UNPROMOTED;
     }
     return role;
 }
 
 static const char *
 native_displayable_state(const pe_resource_t *rsc, bool print_pending)
 {
     const char *rsc_state = NULL;
 
     if (print_pending) {
         rsc_state = native_pending_state(rsc);
     }
     if (rsc_state == NULL) {
         rsc_state = role2text(native_displayable_role(rsc));
     }
     return rsc_state;
 }
 
 /*!
  * \internal
  * \deprecated This function will be removed in a future release
  */
 static void
 native_print_xml(pe_resource_t *rsc, const char *pre_text, long options,
                  void *print_data)
 {
     const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
     const char *prov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER);
     const char *rsc_state = native_displayable_state(rsc, pcmk_is_set(options, pe_print_pending));
     const char *target_role = NULL;
 
     /* resource information. */
     status_print("%s<resource ", pre_text);
     status_print(XML_ATTR_ID "=\"%s\" ", rsc_printable_id(rsc));
     status_print("resource_agent=\"%s%s%s:%s\" ", class,
                  ((prov == NULL)? "" : PROVIDER_SEP),
                  ((prov == NULL)? "" : prov),
                  crm_element_value(rsc->xml, XML_ATTR_TYPE));
 
     status_print("role=\"%s\" ", rsc_state);
     if (rsc->meta) {
         target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
     }
     if (target_role) {
         status_print("target_role=\"%s\" ", target_role);
     }
     status_print("active=\"%s\" ", pcmk__btoa(rsc->fns->active(rsc, TRUE)));
     status_print("orphaned=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_orphan));
     status_print("blocked=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_block));
     status_print("managed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_managed));
     status_print("failed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_failed));
     status_print("failure_ignored=\"%s\" ",
                  pe__rsc_bool_str(rsc, pe_rsc_failure_ignored));
     status_print("nodes_running_on=\"%d\" ", g_list_length(rsc->running_on));
 
     if (options & pe_print_pending) {
         const char *pending_task = native_pending_task(rsc);
 
         if (pending_task) {
             status_print("pending=\"%s\" ", pending_task);
         }
     }
 
     /* print out the nodes this resource is running on */
     if (options & pe_print_rsconly) {
         status_print("/>\n");
         /* do nothing */
     } else if (rsc->running_on != NULL) {
         GList *gIter = rsc->running_on;
 
         status_print(">\n");
         for (; gIter != NULL; gIter = gIter->next) {
             pe_node_t *node = (pe_node_t *) gIter->data;
 
             status_print("%s    <node name=\"%s\" " XML_ATTR_ID "=\"%s\" "
                          "cached=\"%s\"/>\n",
                          pre_text, pcmk__s(node->details->uname, ""),
                          node->details->id, pcmk__btoa(!node->details->online));
         }
         status_print("%s</resource>\n", pre_text);
     } else {
         status_print("/>\n");
     }
 }
 
 // Append a flag to resource description string's flags list
 static bool
 add_output_flag(GString *s, const char *flag_desc, bool have_flags)
 {
     g_string_append(s, (have_flags? ", " : " ("));
     g_string_append(s, flag_desc);
     return true;
 }
 
 // Append a node name to resource description string's node list
 static bool
 add_output_node(GString *s, const char *node, bool have_nodes)
 {
     g_string_append(s, (have_nodes? " " : " [ "));
     g_string_append(s, node);
     return true;
 }
 
 /*!
  * \internal
  * \brief Create a string description of a resource
  *
  * \param[in] rsc          Resource to describe
  * \param[in] name         Desired identifier for the resource
  * \param[in] node         If not NULL, node that resource is "on"
  * \param[in] show_opts    Bitmask of pcmk_show_opt_e.
  * \param[in] target_role  Resource's target role
  * \param[in] show_nodes   Whether to display nodes when multiply active
  *
  * \return Newly allocated string description of resource
  * \note Caller must free the result with g_free().
  */
 gchar *
 pcmk__native_output_string(const pe_resource_t *rsc, const char *name,
                            const pe_node_t *node, uint32_t show_opts,
                            const char *target_role, bool show_nodes)
 {
     const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
     const char *provider = NULL;
     const char *kind = crm_element_value(rsc->xml, XML_ATTR_TYPE);
     GString *outstr = NULL;
     bool have_flags = false;
 
     if (rsc->variant != pe_native) {
         return NULL;
     }
 
     CRM_CHECK(name != NULL, name = "unknown");
     CRM_CHECK(kind != NULL, kind = "unknown");
     CRM_CHECK(class != NULL, class = "unknown");
 
     if (pcmk_is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_provider)) {
         provider = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER);
     }
 
     if ((node == NULL) && (rsc->lock_node != NULL)) {
         node = rsc->lock_node;
     }
-    if (pcmk_is_set(show_opts, pcmk_show_rsc_only)
+    if (pcmk_any_flags_set(show_opts, pcmk_show_rsc_only)
         || pcmk__list_of_multiple(rsc->running_on)) {
         node = NULL;
     }
 
     outstr = g_string_sized_new(128);
 
     // Resource name and agent
     pcmk__g_strcat(outstr,
                    name, "\t(", class, ((provider == NULL)? "" : PROVIDER_SEP),
                    pcmk__s(provider, ""), ":", kind, "):\t", NULL);
 
     // State on node
     if (pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
         g_string_append(outstr, " ORPHANED");
     }
     if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
         enum rsc_role_e role = native_displayable_role(rsc);
 
         g_string_append(outstr, " FAILED");
         if (role > RSC_ROLE_UNPROMOTED) {
             pcmk__add_word(&outstr, 0, role2text(role));
         }
     } else {
         bool show_pending = pcmk_is_set(show_opts, pcmk_show_pending);
 
         pcmk__add_word(&outstr, 0, native_displayable_state(rsc, show_pending));
     }
     if (node) {
         pcmk__add_word(&outstr, 0, pe__node_name(node));
     }
 
     // Failed probe operation
     if (native_displayable_role(rsc) == RSC_ROLE_STOPPED) {
         xmlNode *probe_op = pe__failed_probe_for_rsc(rsc, node ? node->details->uname : NULL);
         if (probe_op != NULL) {
             int rc;
 
             pcmk__scan_min_int(crm_element_value(probe_op, XML_LRM_ATTR_RC), &rc, 0);
             pcmk__g_strcat(outstr, " (", services_ocf_exitcode_str(rc), ") ",
                            NULL);
         }
     }
 
     // Flags, as: (<flag> [...])
     if (node && !(node->details->online) && node->details->unclean) {
         have_flags = add_output_flag(outstr, "UNCLEAN", have_flags);
     }
     if (node && (node == rsc->lock_node)) {
         have_flags = add_output_flag(outstr, "LOCKED", have_flags);
     }
     if (pcmk_is_set(show_opts, pcmk_show_pending)) {
         const char *pending_task = native_pending_task(rsc);
 
         if (pending_task) {
             have_flags = add_output_flag(outstr, pending_task, have_flags);
         }
     }
     if (target_role) {
         enum rsc_role_e target_role_e = text2role(target_role);
 
         /* Only show target role if it limits our abilities (i.e. ignore
          * Started, as it is the default anyways, and doesn't prevent the
          * resource from becoming promoted).
          */
         if (target_role_e == RSC_ROLE_STOPPED) {
             have_flags = add_output_flag(outstr, "disabled", have_flags);
 
         } else if (pcmk_is_set(pe__const_top_resource(rsc, false)->flags,
                                pe_rsc_promotable)
                    && target_role_e == RSC_ROLE_UNPROMOTED) {
             have_flags = add_output_flag(outstr, "target-role:", have_flags);
             g_string_append(outstr, target_role);
         }
     }
 
     // Blocked or maintenance implies unmanaged
     if (pcmk_any_flags_set(rsc->flags, pe_rsc_block|pe_rsc_maintenance)) {
         if (pcmk_is_set(rsc->flags, pe_rsc_block)) {
             have_flags = add_output_flag(outstr, "blocked", have_flags);
 
         } else if (pcmk_is_set(rsc->flags, pe_rsc_maintenance)) {
             have_flags = add_output_flag(outstr, "maintenance", have_flags);
         }
     } else if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
         have_flags = add_output_flag(outstr, "unmanaged", have_flags);
     }
 
     if (pcmk_is_set(rsc->flags, pe_rsc_failure_ignored)) {
         have_flags = add_output_flag(outstr, "failure ignored", have_flags);
     }
+
+
     if (have_flags) {
         g_string_append_c(outstr, ')');
     }
 
     // User-supplied description
-    if (pcmk_is_set(show_opts, pcmk_show_rsc_only)
+    if (pcmk_any_flags_set(show_opts, pcmk_show_rsc_only|pcmk_show_description)
         || pcmk__list_of_multiple(rsc->running_on)) {
         const char *desc = crm_element_value(rsc->xml, XML_ATTR_DESC);
 
         if (desc) {
-            pcmk__add_word(&outstr, 0, desc);
+            g_string_append(outstr, " (");
+            g_string_append(outstr, desc);
+            g_string_append(outstr, ")");
+
         }
     }
 
     if (show_nodes && !pcmk_is_set(show_opts, pcmk_show_rsc_only)
         && pcmk__list_of_multiple(rsc->running_on)) {
         bool have_nodes = false;
 
         for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) {
             pe_node_t *n = (pe_node_t *) iter->data;
 
             have_nodes = add_output_node(outstr, n->details->uname, have_nodes);
         }
         if (have_nodes) {
             g_string_append(outstr, " ]");
         }
     }
 
     return g_string_free(outstr, FALSE);
 }
 
 int
 pe__common_output_html(pcmk__output_t *out, const pe_resource_t *rsc,
                        const char *name, const pe_node_t *node,
                        uint32_t show_opts)
 {
     const char *kind = crm_element_value(rsc->xml, XML_ATTR_TYPE);
     const char *target_role = NULL;
 
     xmlNodePtr list_node = NULL;
     const char *cl = NULL;
 
     CRM_ASSERT(rsc->variant == pe_native);
     CRM_ASSERT(kind != NULL);
 
     if (rsc->meta) {
         const char *is_internal = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INTERNAL_RSC);
 
         if (crm_is_true(is_internal)
             && !pcmk_is_set(show_opts, pcmk_show_implicit_rscs)) {
 
             crm_trace("skipping print of internal resource %s", rsc->id);
             return pcmk_rc_no_output;
         }
         target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
     }
 
     if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
         cl = "rsc-managed";
 
     } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
         cl = "rsc-failed";
 
     } else if (rsc->variant == pe_native && (rsc->running_on == NULL)) {
         cl = "rsc-failed";
 
     } else if (pcmk__list_of_multiple(rsc->running_on)) {
         cl = "rsc-multiple";
 
     } else if (pcmk_is_set(rsc->flags, pe_rsc_failure_ignored)) {
         cl = "rsc-failure-ignored";
 
     } else {
         cl = "rsc-ok";
     }
 
     {
         gchar *s = pcmk__native_output_string(rsc, name, node, show_opts,
                                               target_role, true);
 
         list_node = pcmk__output_create_html_node(out, "li", NULL, NULL, NULL);
         pcmk_create_html_node(list_node, "span", NULL, cl, s);
         g_free(s);
     }
 
     return pcmk_rc_ok;
 }
 
 int
 pe__common_output_text(pcmk__output_t *out, const pe_resource_t *rsc,
                        const char *name, const pe_node_t *node,
                        uint32_t show_opts)
 {
     const char *target_role = NULL;
 
     CRM_ASSERT(rsc->variant == pe_native);
 
     if (rsc->meta) {
         const char *is_internal = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INTERNAL_RSC);
 
         if (crm_is_true(is_internal)
             && !pcmk_is_set(show_opts, pcmk_show_implicit_rscs)) {
 
             crm_trace("skipping print of internal resource %s", rsc->id);
             return pcmk_rc_no_output;
         }
         target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
     }
 
     {
         gchar *s = pcmk__native_output_string(rsc, name, node, show_opts,
                                               target_role, true);
 
         out->list_item(out, NULL, "%s", s);
         g_free(s);
     }
 
     return pcmk_rc_ok;
 }
 
 /*!
  * \internal
  * \deprecated This function will be removed in a future release
  */
 void
 common_print(pe_resource_t *rsc, const char *pre_text, const char *name,
              const pe_node_t *node, long options, void *print_data)
 {
     const char *target_role = NULL;
 
     CRM_ASSERT(rsc->variant == pe_native);
 
     if (rsc->meta) {
         const char *is_internal = g_hash_table_lookup(rsc->meta,
                                                       XML_RSC_ATTR_INTERNAL_RSC);
 
         if (crm_is_true(is_internal)
             && !pcmk_is_set(options, pe_print_implicit)) {
 
             crm_trace("skipping print of internal resource %s", rsc->id);
             return;
         }
         target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
     }
 
     if (options & pe_print_xml) {
         native_print_xml(rsc, pre_text, options, print_data);
         return;
     }
 
     if ((pre_text == NULL) && (options & pe_print_printf)) {
         pre_text = " ";
     }
 
     if (options & pe_print_html) {
         if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
             status_print("<font color=\"yellow\">");
 
         } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
             status_print("<font color=\"red\">");
 
         } else if (rsc->running_on == NULL) {
             status_print("<font color=\"red\">");
 
         } else if (pcmk__list_of_multiple(rsc->running_on)) {
             status_print("<font color=\"orange\">");
 
         } else if (pcmk_is_set(rsc->flags, pe_rsc_failure_ignored)) {
             status_print("<font color=\"yellow\">");
 
         } else {
             status_print("<font color=\"green\">");
         }
     }
 
     {
         gchar *resource_s = pcmk__native_output_string(rsc, name, node, options,
                                                        target_role, false);
         status_print("%s%s", (pre_text? pre_text : ""), resource_s);
         g_free(resource_s);
     }
 
     if (pcmk_is_set(options, pe_print_html)) {
         status_print(" </font> ");
     }
 
     if (!pcmk_is_set(options, pe_print_rsconly)
         && pcmk__list_of_multiple(rsc->running_on)) {
 
         GList *gIter = rsc->running_on;
         int counter = 0;
 
         if (options & pe_print_html) {
             status_print("<ul>\n");
         } else if ((options & pe_print_printf)
                    || (options & pe_print_ncurses)) {
             status_print("[");
         }
 
         for (; gIter != NULL; gIter = gIter->next) {
             pe_node_t *n = (pe_node_t *) gIter->data;
 
             counter++;
 
             if (options & pe_print_html) {
                 status_print("<li>\n%s", pe__node_name(n));
 
             } else if ((options & pe_print_printf)
                        || (options & pe_print_ncurses)) {
                 status_print(" %s", pe__node_name(n));
 
             } else if ((options & pe_print_log)) {
                 status_print("\t%d : %s", counter, pe__node_name(n));
 
             } else {
                 status_print("%s", pe__node_name(n));
             }
             if (options & pe_print_html) {
                 status_print("</li>\n");
 
             }
         }
 
         if (options & pe_print_html) {
             status_print("</ul>\n");
         } else if ((options & pe_print_printf)
                    || (options & pe_print_ncurses)) {
             status_print(" ]");
         }
     }
 
     if (options & pe_print_html) {
         status_print("<br/>\n");
     } else if (options & pe_print_suppres_nl) {
         /* nothing */
     } else if ((options & pe_print_printf) || (options & pe_print_ncurses)) {
         status_print("\n");
     }
 }
 
 /*!
  * \internal
  * \deprecated This function will be removed in a future release
  */
 void
 native_print(pe_resource_t *rsc, const char *pre_text, long options,
              void *print_data)
 {
     const pe_node_t *node = NULL;
 
     CRM_ASSERT(rsc->variant == pe_native);
     if (options & pe_print_xml) {
         native_print_xml(rsc, pre_text, options, print_data);
         return;
     }
 
     node = pe__current_node(rsc);
 
     if (node == NULL) {
         // This is set only if a non-probe action is pending on this node
         node = rsc->pending_node;
     }
 
     common_print(rsc, pre_text, rsc_printable_id(rsc), node, options, print_data);
 }
 
 PCMK__OUTPUT_ARGS("primitive", "uint32_t", "pe_resource_t *", "GList *", "GList *")
 int
 pe__resource_xml(pcmk__output_t *out, va_list args)
 {
     uint32_t show_opts = va_arg(args, uint32_t);
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     GList *only_node G_GNUC_UNUSED = va_arg(args, GList *);
     GList *only_rsc = va_arg(args, GList *);
 
     bool print_pending = pcmk_is_set(show_opts, pcmk_show_pending);
     const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
     const char *prov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER);
     const char *rsc_state = native_displayable_state(rsc, print_pending);
 
+    const char *desc = NULL;
     char ra_name[LINE_MAX];
     char *nodes_running_on = NULL;
     const char *lock_node_name = NULL;
     int rc = pcmk_rc_no_output;
     const char *target_role = NULL;
 
+    desc = pe__resource_description(rsc, show_opts);
+
     if (rsc->meta != NULL) {
        target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
     }
 
     CRM_ASSERT(rsc->variant == pe_native);
 
     if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
         return pcmk_rc_no_output;
     }
 
     /* resource information. */
     snprintf(ra_name, LINE_MAX, "%s%s%s:%s", class,
             ((prov == NULL)? "" : PROVIDER_SEP), ((prov == NULL)? "" : prov),
             crm_element_value(rsc->xml, XML_ATTR_TYPE));
 
     nodes_running_on = pcmk__itoa(g_list_length(rsc->running_on));
 
     if (rsc->lock_node != NULL) {
         lock_node_name = rsc->lock_node->details->uname;
     }
 
-    rc = pe__name_and_nvpairs_xml(out, true, "resource", 14,
+    rc = pe__name_and_nvpairs_xml(out, true, "resource", 15,
              "id", rsc_printable_id(rsc),
              "resource_agent", ra_name,
              "role", rsc_state,
              "target_role", target_role,
              "active", pcmk__btoa(rsc->fns->active(rsc, TRUE)),
              "orphaned", pe__rsc_bool_str(rsc, pe_rsc_orphan),
              "blocked", pe__rsc_bool_str(rsc, pe_rsc_block),
              "maintenance", pe__rsc_bool_str(rsc, pe_rsc_maintenance),
              "managed", pe__rsc_bool_str(rsc, pe_rsc_managed),
              "failed", pe__rsc_bool_str(rsc, pe_rsc_failed),
              "failure_ignored", pe__rsc_bool_str(rsc, pe_rsc_failure_ignored),
              "nodes_running_on", nodes_running_on,
              "pending", (print_pending? native_pending_task(rsc) : NULL),
-             "locked_to", lock_node_name);
+             "locked_to", lock_node_name,
+             "description", desc);
     free(nodes_running_on);
 
     CRM_ASSERT(rc == pcmk_rc_ok);
 
     if (rsc->running_on != NULL) {
         GList *gIter = rsc->running_on;
 
         for (; gIter != NULL; gIter = gIter->next) {
             pe_node_t *node = (pe_node_t *) gIter->data;
 
             rc = pe__name_and_nvpairs_xml(out, false, "node", 3,
                      "name", node->details->uname,
                      "id", node->details->id,
                      "cached", pcmk__btoa(node->details->online));
             CRM_ASSERT(rc == pcmk_rc_ok);
         }
     }
 
     pcmk__output_xml_pop_parent(out);
     return rc;
 }
 
 PCMK__OUTPUT_ARGS("primitive", "uint32_t", "pe_resource_t *", "GList *", "GList *")
 int
 pe__resource_html(pcmk__output_t *out, va_list args)
 {
     uint32_t show_opts = va_arg(args, uint32_t);
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     GList *only_node G_GNUC_UNUSED = va_arg(args, GList *);
     GList *only_rsc = va_arg(args, GList *);
 
     const pe_node_t *node = pe__current_node(rsc);
 
     if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
         return pcmk_rc_no_output;
     }
 
     CRM_ASSERT(rsc->variant == pe_native);
 
     if (node == NULL) {
         // This is set only if a non-probe action is pending on this node
         node = rsc->pending_node;
     }
     return pe__common_output_html(out, rsc, rsc_printable_id(rsc), node, show_opts);
 }
 
 PCMK__OUTPUT_ARGS("primitive", "uint32_t", "pe_resource_t *", "GList *", "GList *")
 int
 pe__resource_text(pcmk__output_t *out, va_list args)
 {
     uint32_t show_opts = va_arg(args, uint32_t);
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     GList *only_node G_GNUC_UNUSED = va_arg(args, GList *);
     GList *only_rsc = va_arg(args, GList *);
 
     const pe_node_t *node = pe__current_node(rsc);
 
     CRM_ASSERT(rsc->variant == pe_native);
 
     if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
         return pcmk_rc_no_output;
     }
 
     if (node == NULL) {
         // This is set only if a non-probe action is pending on this node
         node = rsc->pending_node;
     }
     return pe__common_output_text(out, rsc, rsc_printable_id(rsc), node, show_opts);
 }
 
 void
 native_free(pe_resource_t * rsc)
 {
     pe_rsc_trace(rsc, "Freeing resource action list (not the data)");
     common_free(rsc);
 }
 
 enum rsc_role_e
 native_resource_state(const pe_resource_t * rsc, gboolean current)
 {
     enum rsc_role_e role = rsc->next_role;
 
     if (current) {
         role = rsc->role;
     }
     pe_rsc_trace(rsc, "%s state: %s", rsc->id, role2text(role));
     return role;
 }
 
 /*!
  * \internal
  * \brief List nodes where a resource (or any of its children) is
  *
  * \param[in]  rsc      Resource to check
  * \param[out] list     List to add result to
  * \param[in]  current  0 = where allocated, 1 = where running,
  *                      2 = where running or pending
  *
  * \return If list contains only one node, that node, or NULL otherwise
  */
 pe_node_t *
 native_location(const pe_resource_t *rsc, GList **list, int current)
 {
     pe_node_t *one = NULL;
     GList *result = NULL;
 
     if (rsc->children) {
         GList *gIter = rsc->children;
 
         for (; gIter != NULL; gIter = gIter->next) {
             pe_resource_t *child = (pe_resource_t *) gIter->data;
 
             child->fns->location(child, &result, current);
         }
 
     } else if (current) {
 
         if (rsc->running_on) {
             result = g_list_copy(rsc->running_on);
         }
         if ((current == 2) && rsc->pending_node
             && !pe_find_node_id(result, rsc->pending_node->details->id)) {
                 result = g_list_append(result, rsc->pending_node);
         }
 
     } else if (current == FALSE && rsc->allocated_to) {
         result = g_list_append(NULL, rsc->allocated_to);
     }
 
     if (result && (result->next == NULL)) {
         one = result->data;
     }
 
     if (list) {
         GList *gIter = result;
 
         for (; gIter != NULL; gIter = gIter->next) {
             pe_node_t *node = (pe_node_t *) gIter->data;
 
             if (*list == NULL || pe_find_node_id(*list, node->details->id) == NULL) {
                 *list = g_list_append(*list, node);
             }
         }
     }
 
     g_list_free(result);
     return one;
 }
 
 static void
 get_rscs_brief(GList *rsc_list, GHashTable * rsc_table, GHashTable * active_table)
 {
     GList *gIter = rsc_list;
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *rsc = (pe_resource_t *) gIter->data;
 
         const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
         const char *kind = crm_element_value(rsc->xml, XML_ATTR_TYPE);
 
         int offset = 0;
         char buffer[LINE_MAX];
 
         int *rsc_counter = NULL;
         int *active_counter = NULL;
 
         if (rsc->variant != pe_native) {
             continue;
         }
 
         offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", class);
         if (pcmk_is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_provider)) {
             const char *prov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER);
 
             if (prov != NULL) {
                 offset += snprintf(buffer + offset, LINE_MAX - offset,
                                    PROVIDER_SEP "%s", prov);
             }
         }
         offset += snprintf(buffer + offset, LINE_MAX - offset, ":%s", kind);
         CRM_LOG_ASSERT(offset > 0);
 
         if (rsc_table) {
             rsc_counter = g_hash_table_lookup(rsc_table, buffer);
             if (rsc_counter == NULL) {
                 rsc_counter = calloc(1, sizeof(int));
                 *rsc_counter = 0;
                 g_hash_table_insert(rsc_table, strdup(buffer), rsc_counter);
             }
             (*rsc_counter)++;
         }
 
         if (active_table) {
             GList *gIter2 = rsc->running_on;
 
             for (; gIter2 != NULL; gIter2 = gIter2->next) {
                 pe_node_t *node = (pe_node_t *) gIter2->data;
                 GHashTable *node_table = NULL;
 
                 if (node->details->unclean == FALSE && node->details->online == FALSE &&
                     pcmk_is_set(rsc->flags, pe_rsc_managed)) {
                     continue;
                 }
 
                 node_table = g_hash_table_lookup(active_table, node->details->uname);
                 if (node_table == NULL) {
                     node_table = pcmk__strkey_table(free, free);
                     g_hash_table_insert(active_table, strdup(node->details->uname), node_table);
                 }
 
                 active_counter = g_hash_table_lookup(node_table, buffer);
                 if (active_counter == NULL) {
                     active_counter = calloc(1, sizeof(int));
                     *active_counter = 0;
                     g_hash_table_insert(node_table, strdup(buffer), active_counter);
                 }
                 (*active_counter)++;
             }
         }
     }
 }
 
 static void
 destroy_node_table(gpointer data)
 {
     GHashTable *node_table = data;
 
     if (node_table) {
         g_hash_table_destroy(node_table);
     }
 }
 
 /*!
  * \internal
  * \deprecated This function will be removed in a future release
  */
 void
 print_rscs_brief(GList *rsc_list, const char *pre_text, long options,
                  void *print_data, gboolean print_all)
 {
     GHashTable *rsc_table = pcmk__strkey_table(free, free);
     GHashTable *active_table = pcmk__strkey_table(free, destroy_node_table);
     GHashTableIter hash_iter;
     char *type = NULL;
     int *rsc_counter = NULL;
 
     get_rscs_brief(rsc_list, rsc_table, active_table);
 
     g_hash_table_iter_init(&hash_iter, rsc_table);
     while (g_hash_table_iter_next(&hash_iter, (gpointer *)&type, (gpointer *)&rsc_counter)) {
         GHashTableIter hash_iter2;
         char *node_name = NULL;
         GHashTable *node_table = NULL;
         int active_counter_all = 0;
 
         g_hash_table_iter_init(&hash_iter2, active_table);
         while (g_hash_table_iter_next(&hash_iter2, (gpointer *)&node_name, (gpointer *)&node_table)) {
             int *active_counter = g_hash_table_lookup(node_table, type);
 
             if (active_counter == NULL || *active_counter == 0) {
                 continue;
 
             } else {
                 active_counter_all += *active_counter;
             }
 
             if (options & pe_print_rsconly) {
                 node_name = NULL;
             }
 
             if (options & pe_print_html) {
                 status_print("<li>\n");
             }
 
             if (print_all) {
                 status_print("%s%d/%d\t(%s):\tActive %s\n", pre_text ? pre_text : "",
                              active_counter ? *active_counter : 0,
                              rsc_counter ? *rsc_counter : 0, type,
                              active_counter && (*active_counter > 0) && node_name ? node_name : "");
             } else {
                 status_print("%s%d\t(%s):\tActive %s\n", pre_text ? pre_text : "",
                              active_counter ? *active_counter : 0, type,
                              active_counter && (*active_counter > 0) && node_name ? node_name : "");
             }
 
             if (options & pe_print_html) {
                 status_print("</li>\n");
             }
         }
 
         if (print_all && active_counter_all == 0) {
             if (options & pe_print_html) {
                 status_print("<li>\n");
             }
 
             status_print("%s%d/%d\t(%s):\tActive\n", pre_text ? pre_text : "",
                          active_counter_all,
                          rsc_counter ? *rsc_counter : 0, type);
 
             if (options & pe_print_html) {
                 status_print("</li>\n");
             }
         }
     }
 
     if (rsc_table) {
         g_hash_table_destroy(rsc_table);
         rsc_table = NULL;
     }
     if (active_table) {
         g_hash_table_destroy(active_table);
         active_table = NULL;
     }
 }
 
 int
 pe__rscs_brief_output(pcmk__output_t *out, GList *rsc_list, uint32_t show_opts)
 {
     GHashTable *rsc_table = pcmk__strkey_table(free, free);
     GHashTable *active_table = pcmk__strkey_table(free, destroy_node_table);
     GList *sorted_rscs;
     int rc = pcmk_rc_no_output;
 
     get_rscs_brief(rsc_list, rsc_table, active_table);
 
     /* Make a list of the rsc_table keys so that it can be sorted.  This is to make sure
      * output order stays consistent between systems.
      */
     sorted_rscs = g_hash_table_get_keys(rsc_table);
     sorted_rscs = g_list_sort(sorted_rscs, (GCompareFunc) strcmp);
 
     for (GList *gIter = sorted_rscs; gIter; gIter = gIter->next) {
         char *type = (char *) gIter->data;
         int *rsc_counter = g_hash_table_lookup(rsc_table, type);
 
         GList *sorted_nodes = NULL;
         int active_counter_all = 0;
 
         /* Also make a list of the active_table keys so it can be sorted.  If there's
          * more than one instance of a type of resource running, we need the nodes to
          * be sorted to make sure output order stays consistent between systems.
          */
         sorted_nodes = g_hash_table_get_keys(active_table);
         sorted_nodes = g_list_sort(sorted_nodes, (GCompareFunc) pcmk__numeric_strcasecmp);
 
         for (GList *gIter2 = sorted_nodes; gIter2; gIter2 = gIter2->next) {
             char *node_name = (char *) gIter2->data;
             GHashTable *node_table = g_hash_table_lookup(active_table, node_name);
             int *active_counter = NULL;
 
             if (node_table == NULL) {
                 continue;
             }
 
             active_counter = g_hash_table_lookup(node_table, type);
 
             if (active_counter == NULL || *active_counter == 0) {
                 continue;
 
             } else {
                 active_counter_all += *active_counter;
             }
 
             if (pcmk_is_set(show_opts, pcmk_show_rsc_only)) {
                 node_name = NULL;
             }
 
             if (pcmk_is_set(show_opts, pcmk_show_inactive_rscs)) {
                 out->list_item(out, NULL, "%d/%d\t(%s):\tActive %s",
                                *active_counter,
                                rsc_counter ? *rsc_counter : 0, type,
                                (*active_counter > 0) && node_name ? node_name : "");
             } else {
                 out->list_item(out, NULL, "%d\t(%s):\tActive %s",
                                *active_counter, type,
                                (*active_counter > 0) && node_name ? node_name : "");
             }
 
             rc = pcmk_rc_ok;
         }
 
         if (pcmk_is_set(show_opts, pcmk_show_inactive_rscs) && active_counter_all == 0) {
             out->list_item(out, NULL, "%d/%d\t(%s):\tActive",
                            active_counter_all,
                            rsc_counter ? *rsc_counter : 0, type);
             rc = pcmk_rc_ok;
         }
 
         if (sorted_nodes) {
             g_list_free(sorted_nodes);
         }
     }
 
     if (rsc_table) {
         g_hash_table_destroy(rsc_table);
         rsc_table = NULL;
     }
     if (active_table) {
         g_hash_table_destroy(active_table);
         active_table = NULL;
     }
     if (sorted_rscs) {
         g_list_free(sorted_rscs);
     }
 
     return rc;
 }
 
 gboolean
 pe__native_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
                        gboolean check_parent)
 {
     if (pcmk__str_in_list(rsc_printable_id(rsc), only_rsc, pcmk__str_star_matches) ||
         pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches)) {
         return FALSE;
     } else if (check_parent && rsc->parent) {
         const pe_resource_t *up = pe__const_top_resource(rsc, true);
 
         return up->fns->is_filtered(up, only_rsc, FALSE);
     }
 
     return TRUE;
 }
diff --git a/lib/pengine/pe_output.c b/lib/pengine/pe_output.c
index 921ddf7c63..46c76fbd93 100644
--- a/lib/pengine/pe_output.c
+++ b/lib/pengine/pe_output.c
@@ -1,3101 +1,3113 @@
 /*
  * Copyright 2019-2023 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 #include <stdint.h>
 #include <crm/common/xml_internal.h>
 #include <crm/common/output.h>
 #include <crm/cib/util.h>
 #include <crm/msg_xml.h>
 #include <crm/pengine/internal.h>
 
+const char *
+pe__resource_description(const pe_resource_t *rsc, uint32_t show_opts)
+{
+    const char * desc = NULL;
+    // User-supplied description
+    if (pcmk_any_flags_set(show_opts, pcmk_show_rsc_only|pcmk_show_description)
+        || pcmk__list_of_multiple(rsc->running_on)) {
+        desc = crm_element_value(rsc->xml, XML_ATTR_DESC);
+    }
+    return desc;
+}
+
 /* Never display node attributes whose name starts with one of these prefixes */
 #define FILTER_STR { PCMK__FAIL_COUNT_PREFIX, PCMK__LAST_FAILURE_PREFIX,   \
                      "shutdown", "terminate", "standby", "#", NULL }
 
 static int
 compare_attribute(gconstpointer a, gconstpointer b)
 {
     int rc;
 
     rc = strcmp((const char *)a, (const char *)b);
 
     return rc;
 }
 
 /*!
  * \internal
  * \brief Determine whether extended information about an attribute should be added.
  *
  * \param[in]     node            Node that ran this resource
  * \param[in,out] rsc_list        List of resources for this node
  * \param[in,out] data_set        Cluster working set
  * \param[in]     attrname        Attribute to find
  * \param[out]    expected_score  Expected value for this attribute
  *
  * \return true if extended information should be printed, false otherwise
  * \note Currently, extended information is only supported for ping/pingd
  *       resources, for which a message will be printed if connectivity is lost
  *       or degraded.
  */
 static bool
 add_extra_info(const pe_node_t *node, GList *rsc_list, pe_working_set_t *data_set,
                const char *attrname, int *expected_score)
 {
     GList *gIter = NULL;
 
     for (gIter = rsc_list; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *rsc = (pe_resource_t *) gIter->data;
         const char *type = g_hash_table_lookup(rsc->meta, "type");
         const char *name = NULL;
         GHashTable *params = NULL;
 
         if (rsc->children != NULL) {
             if (add_extra_info(node, rsc->children, data_set, attrname,
                                expected_score)) {
                 return true;
             }
         }
 
         if (!pcmk__strcase_any_of(type, "ping", "pingd", NULL)) {
             continue;
         }
 
         params = pe_rsc_params(rsc, node, data_set);
         name = g_hash_table_lookup(params, "name");
 
         if (name == NULL) {
             name = "pingd";
         }
 
         /* To identify the resource with the attribute name. */
         if (pcmk__str_eq(name, attrname, pcmk__str_casei)) {
             int host_list_num = 0;
             const char *hosts = g_hash_table_lookup(params, "host_list");
             const char *multiplier = g_hash_table_lookup(params, "multiplier");
             int multiplier_i;
 
             if (hosts) {
                 char **host_list = g_strsplit(hosts, " ", 0);
                 host_list_num = g_strv_length(host_list);
                 g_strfreev(host_list);
             }
 
             if ((multiplier == NULL)
                 || (pcmk__scan_min_int(multiplier, &multiplier_i,
                                        INT_MIN) != pcmk_rc_ok)) {
                 /* The ocf:pacemaker:ping resource agent defaults multiplier to
                  * 1. The agent currently does not handle invalid text, but it
                  * should, and this would be a reasonable choice ...
                  */
                 multiplier_i = 1;
             }
             *expected_score = host_list_num * multiplier_i;
 
             return true;
         }
     }
     return false;
 }
 
 static GList *
 filter_attr_list(GList *attr_list, char *name)
 {
     int i;
     const char *filt_str[] = FILTER_STR;
 
     CRM_CHECK(name != NULL, return attr_list);
 
     /* filtering automatic attributes */
     for (i = 0; filt_str[i] != NULL; i++) {
         if (g_str_has_prefix(name, filt_str[i])) {
             return attr_list;
         }
     }
 
     return g_list_insert_sorted(attr_list, name, compare_attribute);
 }
 
 static GList *
 get_operation_list(xmlNode *rsc_entry) {
     GList *op_list = NULL;
     xmlNode *rsc_op = NULL;
 
     for (rsc_op = pcmk__xe_first_child(rsc_entry); rsc_op != NULL;
          rsc_op = pcmk__xe_next(rsc_op)) {
         const char *task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK);
         const char *interval_ms_s = crm_element_value(rsc_op,
                                                       XML_LRM_ATTR_INTERVAL_MS);
         const char *op_rc = crm_element_value(rsc_op, XML_LRM_ATTR_RC);
         int op_rc_i;
 
         pcmk__scan_min_int(op_rc, &op_rc_i, 0);
 
         /* Display 0-interval monitors as "probe" */
         if (pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_casei)
             && pcmk__str_eq(interval_ms_s, "0", pcmk__str_null_matches | pcmk__str_casei)) {
             task = "probe";
         }
 
         /* Ignore notifies and some probes */
         if (pcmk__str_eq(task, CRMD_ACTION_NOTIFY, pcmk__str_casei) || (pcmk__str_eq(task, "probe", pcmk__str_casei) && (op_rc_i == 7))) {
             continue;
         }
 
         if (pcmk__str_eq((const char *)rsc_op->name, XML_LRM_TAG_RSC_OP, pcmk__str_none)) {
             op_list = g_list_append(op_list, rsc_op);
         }
     }
 
     op_list = g_list_sort(op_list, sort_op_by_callid);
     return op_list;
 }
 
 static void
 add_dump_node(gpointer key, gpointer value, gpointer user_data)
 {
     xmlNodePtr node = user_data;
     pcmk_create_xml_text_node(node, (const char *) key, (const char *) value);
 }
 
 static void
 append_dump_text(gpointer key, gpointer value, gpointer user_data)
 {
     char **dump_text = user_data;
     char *new_text = crm_strdup_printf("%s %s=%s",
                                        *dump_text, (char *)key, (char *)value);
 
     free(*dump_text);
     *dump_text = new_text;
 }
 
 static const char *
 get_cluster_stack(pe_working_set_t *data_set)
 {
     xmlNode *stack = get_xpath_object("//nvpair[@name='cluster-infrastructure']",
                                       data_set->input, LOG_DEBUG);
     return stack? crm_element_value(stack, XML_NVPAIR_ATTR_VALUE) : "unknown";
 }
 
 static char *
 last_changed_string(const char *last_written, const char *user,
                     const char *client, const char *origin) {
     if (last_written != NULL || user != NULL || client != NULL || origin != NULL) {
         return crm_strdup_printf("%s%s%s%s%s%s%s",
                                  last_written ? last_written : "",
                                  user ? " by " : "",
                                  user ? user : "",
                                  client ? " via " : "",
                                  client ? client : "",
                                  origin ? " on " : "",
                                  origin ? origin : "");
     } else {
         return strdup("");
     }
 }
 
 static char *
 op_history_string(xmlNode *xml_op, const char *task, const char *interval_ms_s,
                   int rc, bool print_timing) {
     const char *call = crm_element_value(xml_op, XML_LRM_ATTR_CALLID);
     char *interval_str = NULL;
     char *buf = NULL;
 
     if (interval_ms_s && !pcmk__str_eq(interval_ms_s, "0", pcmk__str_casei)) {
         char *pair = pcmk__format_nvpair("interval", interval_ms_s, "ms");
         interval_str = crm_strdup_printf(" %s", pair);
         free(pair);
     }
 
     if (print_timing) {
         char *last_change_str = NULL;
         char *exec_str = NULL;
         char *queue_str = NULL;
 
         const char *value = NULL;
 
         time_t epoch = 0;
 
         if ((crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE, &epoch) == pcmk_ok)
             && (epoch > 0)) {
             char *epoch_str = pcmk__epoch2str(&epoch, 0);
 
             last_change_str = crm_strdup_printf(" %s=\"%s\"",
                                                 XML_RSC_OP_LAST_CHANGE,
                                                 pcmk__s(epoch_str, ""));
             free(epoch_str);
         }
 
         value = crm_element_value(xml_op, XML_RSC_OP_T_EXEC);
         if (value) {
             char *pair = pcmk__format_nvpair(XML_RSC_OP_T_EXEC, value, "ms");
             exec_str = crm_strdup_printf(" %s", pair);
             free(pair);
         }
 
         value = crm_element_value(xml_op, XML_RSC_OP_T_QUEUE);
         if (value) {
             char *pair = pcmk__format_nvpair(XML_RSC_OP_T_QUEUE, value, "ms");
             queue_str = crm_strdup_printf(" %s", pair);
             free(pair);
         }
 
         buf = crm_strdup_printf("(%s) %s:%s%s%s%s rc=%d (%s)", call, task,
                                 interval_str ? interval_str : "",
                                 last_change_str ? last_change_str : "",
                                 exec_str ? exec_str : "",
                                 queue_str ? queue_str : "",
                                 rc, services_ocf_exitcode_str(rc));
 
         if (last_change_str) {
             free(last_change_str);
         }
 
         if (exec_str) {
             free(exec_str);
         }
 
         if (queue_str) {
             free(queue_str);
         }
     } else {
         buf = crm_strdup_printf("(%s) %s%s%s", call, task,
                                 interval_str ? ":" : "",
                                 interval_str ? interval_str : "");
     }
 
     if (interval_str) {
         free(interval_str);
     }
 
     return buf;
 }
 
 static char *
 resource_history_string(pe_resource_t *rsc, const char *rsc_id, bool all,
                         int failcount, time_t last_failure) {
     char *buf = NULL;
 
     if (rsc == NULL) {
         buf = crm_strdup_printf("%s: orphan", rsc_id);
     } else if (all || failcount || last_failure > 0) {
         char *failcount_s = NULL;
         char *lastfail_s = NULL;
 
         if (failcount > 0) {
             failcount_s = crm_strdup_printf(" %s=%d", PCMK__FAIL_COUNT_PREFIX,
                                             failcount);
         } else {
             failcount_s = strdup("");
         }
         if (last_failure > 0) {
             buf = pcmk__epoch2str(&last_failure, 0);
             lastfail_s = crm_strdup_printf(" %s='%s'",
                                            PCMK__LAST_FAILURE_PREFIX, buf);
             free(buf);
         }
 
         buf = crm_strdup_printf("%s: migration-threshold=%d%s%s",
                                 rsc_id, rsc->migration_threshold, failcount_s,
                                 lastfail_s? lastfail_s : "");
         free(failcount_s);
         free(lastfail_s);
     } else {
         buf = crm_strdup_printf("%s:", rsc_id);
     }
 
     return buf;
 }
 
 static const char *
 get_node_feature_set(pe_node_t *node) {
     const char *feature_set = NULL;
 
     if (node->details->online && !pe__is_guest_or_remote_node(node)) {
         feature_set = g_hash_table_lookup(node->details->attrs,
                                           CRM_ATTR_FEATURE_SET);
         /* The feature set attribute is present since 3.15.1. If it is missing
          * then the node must be running an earlier version. */
         if (feature_set == NULL) {
             feature_set = "<3.15.1";
         }
     }
     return feature_set;
 }
 
 static bool
 is_mixed_version(pe_working_set_t *data_set) {
     const char *feature_set = NULL;
     for (GList *gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
         pe_node_t *node = gIter->data;
         const char *node_feature_set = get_node_feature_set(node);
         if (node_feature_set != NULL) {
             if (feature_set == NULL) {
                 feature_set = node_feature_set;
             } else if (strcmp(feature_set, node_feature_set) != 0) {
                 return true;
             }
         }
     }
     return false;
 }
 
 static char *
 formatted_xml_buf(pe_resource_t *rsc, bool raw)
 {
     if (raw) {
         return dump_xml_formatted(rsc->orig_xml ? rsc->orig_xml : rsc->xml);
     } else {
         return dump_xml_formatted(rsc->xml);
     }
 }
 
 PCMK__OUTPUT_ARGS("cluster-summary", "pe_working_set_t *",
                   "enum pcmk_pacemakerd_state", "uint32_t", "uint32_t")
 static int
 cluster_summary(pcmk__output_t *out, va_list args) {
     pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
     enum pcmk_pacemakerd_state pcmkd_state =
         (enum pcmk_pacemakerd_state) va_arg(args, int);
     uint32_t section_opts = va_arg(args, uint32_t);
     uint32_t show_opts = va_arg(args, uint32_t);
 
     int rc = pcmk_rc_no_output;
     const char *stack_s = get_cluster_stack(data_set);
 
     if (pcmk_is_set(section_opts, pcmk_section_stack)) {
         PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
         out->message(out, "cluster-stack", stack_s, pcmkd_state);
     }
 
     if (pcmk_is_set(section_opts, pcmk_section_dc)) {
         xmlNode *dc_version = get_xpath_object("//nvpair[@name='dc-version']",
                                                data_set->input, LOG_DEBUG);
         const char *dc_version_s = dc_version?
                                    crm_element_value(dc_version, XML_NVPAIR_ATTR_VALUE)
                                    : NULL;
         const char *quorum = crm_element_value(data_set->input, XML_ATTR_HAVE_QUORUM);
         char *dc_name = data_set->dc_node ? pe__node_display_name(data_set->dc_node, pcmk_is_set(show_opts, pcmk_show_node_id)) : NULL;
         bool mixed_version = is_mixed_version(data_set);
 
         PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
         out->message(out, "cluster-dc", data_set->dc_node, quorum,
                      dc_version_s, dc_name, mixed_version);
         free(dc_name);
     }
 
     if (pcmk_is_set(section_opts, pcmk_section_times)) {
         const char *last_written = crm_element_value(data_set->input, XML_CIB_ATTR_WRITTEN);
         const char *user = crm_element_value(data_set->input, XML_ATTR_UPDATE_USER);
         const char *client = crm_element_value(data_set->input, XML_ATTR_UPDATE_CLIENT);
         const char *origin = crm_element_value(data_set->input, XML_ATTR_UPDATE_ORIG);
 
         PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
         out->message(out, "cluster-times",
                      data_set->localhost, last_written, user, client, origin);
     }
 
     if (pcmk_is_set(section_opts, pcmk_section_counts)) {
         PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
         out->message(out, "cluster-counts", g_list_length(data_set->nodes),
                      data_set->ninstances, data_set->disabled_resources,
                      data_set->blocked_resources);
     }
 
     if (pcmk_is_set(section_opts, pcmk_section_options)) {
         PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
         out->message(out, "cluster-options", data_set);
     }
 
     PCMK__OUTPUT_LIST_FOOTER(out, rc);
 
     if (pcmk_is_set(section_opts, pcmk_section_maint_mode)) {
         if (out->message(out, "maint-mode", data_set->flags) == pcmk_rc_ok) {
             rc = pcmk_rc_ok;
         }
     }
 
     return rc;
 }
 
 PCMK__OUTPUT_ARGS("cluster-summary", "pe_working_set_t *",
                   "enum pcmk_pacemakerd_state", "uint32_t", "uint32_t")
 static int
 cluster_summary_html(pcmk__output_t *out, va_list args) {
     pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
     enum pcmk_pacemakerd_state pcmkd_state =
         (enum pcmk_pacemakerd_state) va_arg(args, int);
     uint32_t section_opts = va_arg(args, uint32_t);
     uint32_t show_opts = va_arg(args, uint32_t);
 
     int rc = pcmk_rc_no_output;
     const char *stack_s = get_cluster_stack(data_set);
 
     if (pcmk_is_set(section_opts, pcmk_section_stack)) {
         PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
         out->message(out, "cluster-stack", stack_s, pcmkd_state);
     }
 
     /* Always print DC if none, even if not requested */
     if (data_set->dc_node == NULL || pcmk_is_set(section_opts, pcmk_section_dc)) {
         xmlNode *dc_version = get_xpath_object("//nvpair[@name='dc-version']",
                                                data_set->input, LOG_DEBUG);
         const char *dc_version_s = dc_version?
                                    crm_element_value(dc_version, XML_NVPAIR_ATTR_VALUE)
                                    : NULL;
         const char *quorum = crm_element_value(data_set->input, XML_ATTR_HAVE_QUORUM);
         char *dc_name = data_set->dc_node ? pe__node_display_name(data_set->dc_node, pcmk_is_set(show_opts, pcmk_show_node_id)) : NULL;
         bool mixed_version = is_mixed_version(data_set);
 
         PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
         out->message(out, "cluster-dc", data_set->dc_node, quorum,
                      dc_version_s, dc_name, mixed_version);
         free(dc_name);
     }
 
     if (pcmk_is_set(section_opts, pcmk_section_times)) {
         const char *last_written = crm_element_value(data_set->input, XML_CIB_ATTR_WRITTEN);
         const char *user = crm_element_value(data_set->input, XML_ATTR_UPDATE_USER);
         const char *client = crm_element_value(data_set->input, XML_ATTR_UPDATE_CLIENT);
         const char *origin = crm_element_value(data_set->input, XML_ATTR_UPDATE_ORIG);
 
         PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
         out->message(out, "cluster-times",
                      data_set->localhost, last_written, user, client, origin);
     }
 
     if (pcmk_is_set(section_opts, pcmk_section_counts)) {
         PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
         out->message(out, "cluster-counts", g_list_length(data_set->nodes),
                      data_set->ninstances, data_set->disabled_resources,
                      data_set->blocked_resources);
     }
 
     if (pcmk_is_set(section_opts, pcmk_section_options)) {
         /* Kind of a hack - close the list we may have opened earlier in this
          * function so we can put all the options into their own list.  We
          * only want to do this on HTML output, though.
          */
         PCMK__OUTPUT_LIST_FOOTER(out, rc);
 
         out->begin_list(out, NULL, NULL, "Config Options");
         out->message(out, "cluster-options", data_set);
     }
 
     PCMK__OUTPUT_LIST_FOOTER(out, rc);
 
     if (pcmk_is_set(section_opts, pcmk_section_maint_mode)) {
         if (out->message(out, "maint-mode", data_set->flags) == pcmk_rc_ok) {
             rc = pcmk_rc_ok;
         }
     }
 
     return rc;
 }
 
 char *
 pe__node_display_name(pe_node_t *node, bool print_detail)
 {
     char *node_name;
     const char *node_host = NULL;
     const char *node_id = NULL;
     int name_len;
 
     CRM_ASSERT((node != NULL) && (node->details != NULL) && (node->details->uname != NULL));
 
     /* Host is displayed only if this is a guest node and detail is requested */
     if (print_detail && pe__is_guest_node(node)) {
         const pe_resource_t *container = node->details->remote_rsc->container;
         const pe_node_t *host_node = pe__current_node(container);
 
         if (host_node && host_node->details) {
             node_host = host_node->details->uname;
         }
         if (node_host == NULL) {
             node_host = ""; /* so we at least get "uname@" to indicate guest */
         }
     }
 
     /* Node ID is displayed if different from uname and detail is requested */
     if (print_detail && !pcmk__str_eq(node->details->uname, node->details->id, pcmk__str_casei)) {
         node_id = node->details->id;
     }
 
     /* Determine name length */
     name_len = strlen(node->details->uname) + 1;
     if (node_host) {
         name_len += strlen(node_host) + 1; /* "@node_host" */
     }
     if (node_id) {
         name_len += strlen(node_id) + 3; /* + " (node_id)" */
     }
 
     /* Allocate and populate display name */
     node_name = malloc(name_len);
     CRM_ASSERT(node_name != NULL);
     strcpy(node_name, node->details->uname);
     if (node_host) {
         strcat(node_name, "@");
         strcat(node_name, node_host);
     }
     if (node_id) {
         strcat(node_name, " (");
         strcat(node_name, node_id);
         strcat(node_name, ")");
     }
     return node_name;
 }
 
 int
 pe__name_and_nvpairs_xml(pcmk__output_t *out, bool is_list, const char *tag_name
                          , size_t pairs_count, ...)
 {
     xmlNodePtr xml_node = NULL;
     va_list args;
 
     CRM_ASSERT(tag_name != NULL);
 
     xml_node = pcmk__output_xml_peek_parent(out);
     CRM_ASSERT(xml_node != NULL);
     xml_node = is_list
         ? create_xml_node(xml_node, tag_name)
         : xmlNewChild(xml_node, NULL, (pcmkXmlStr) tag_name, NULL);
 
     va_start(args, pairs_count);
     while(pairs_count--) {
         const char *param_name = va_arg(args, const char *);
         const char *param_value = va_arg(args, const char *);
         if (param_name && param_value) {
             crm_xml_add(xml_node, param_name, param_value);
         }
     };
     va_end(args);
 
     if (is_list) {
         pcmk__output_xml_push_parent(out, xml_node);
     }
     return pcmk_rc_ok;
 }
 
 static const char *
 role_desc(enum rsc_role_e role)
 {
     if (role == RSC_ROLE_PROMOTED) {
 #ifdef PCMK__COMPAT_2_0
         return "as " RSC_ROLE_PROMOTED_LEGACY_S " ";
 #else
         return "in " RSC_ROLE_PROMOTED_S " role ";
 #endif
     }
     return "";
 }
 
 PCMK__OUTPUT_ARGS("ban", "pe_node_t *", "pe__location_t *", "uint32_t")
 static int
 ban_html(pcmk__output_t *out, va_list args) {
     pe_node_t *pe_node = va_arg(args, pe_node_t *);
     pe__location_t *location = va_arg(args, pe__location_t *);
     uint32_t show_opts = va_arg(args, uint32_t);
 
     char *node_name = pe__node_display_name(pe_node,
                                             pcmk_is_set(show_opts, pcmk_show_node_id));
     char *buf = crm_strdup_printf("%s\tprevents %s from running %son %s",
                                   location->id, location->rsc_lh->id,
                                   role_desc(location->role_filter), node_name);
 
     pcmk__output_create_html_node(out, "li", NULL, NULL, buf);
 
     free(node_name);
     free(buf);
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("ban", "pe_node_t *", "pe__location_t *", "uint32_t")
 static int
 ban_text(pcmk__output_t *out, va_list args) {
     pe_node_t *pe_node = va_arg(args, pe_node_t *);
     pe__location_t *location = va_arg(args, pe__location_t *);
     uint32_t show_opts = va_arg(args, uint32_t);
 
     char *node_name = pe__node_display_name(pe_node,
                                             pcmk_is_set(show_opts, pcmk_show_node_id));
     out->list_item(out, NULL, "%s\tprevents %s from running %son %s",
                    location->id, location->rsc_lh->id,
                    role_desc(location->role_filter), node_name);
 
     free(node_name);
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("ban", "pe_node_t *", "pe__location_t *", "uint32_t")
 static int
 ban_xml(pcmk__output_t *out, va_list args) {
     pe_node_t *pe_node = va_arg(args, pe_node_t *);
     pe__location_t *location = va_arg(args, pe__location_t *);
     uint32_t show_opts G_GNUC_UNUSED = va_arg(args, uint32_t);
 
     const char *promoted_only = pcmk__btoa(location->role_filter == RSC_ROLE_PROMOTED);
     char *weight_s = pcmk__itoa(pe_node->weight);
 
     pcmk__output_create_xml_node(out, "ban",
                                  "id", location->id,
                                  "resource", location->rsc_lh->id,
                                  "node", pe_node->details->uname,
                                  "weight", weight_s,
                                  "promoted-only", promoted_only,
                                  /* This is a deprecated alias for
                                   * promoted_only. Removing it will break
                                   * backward compatibility of the API schema,
                                   * which will require an API schema major
                                   * version bump.
                                   */
                                  "master_only", promoted_only,
                                  NULL);
 
     free(weight_s);
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("ban-list", "pe_working_set_t *", "const char *", "GList *",
                   "uint32_t", "bool")
 static int
 ban_list(pcmk__output_t *out, va_list args) {
     pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
     const char *prefix = va_arg(args, const char *);
     GList *only_rsc = va_arg(args, GList *);
     uint32_t show_opts = va_arg(args, uint32_t);
     bool print_spacer = va_arg(args, int);
 
     GList *gIter, *gIter2;
     int rc = pcmk_rc_no_output;
 
     /* Print each ban */
     for (gIter = data_set->placement_constraints; gIter != NULL; gIter = gIter->next) {
         pe__location_t *location = gIter->data;
         const pe_resource_t *rsc = location->rsc_lh;
 
         if (prefix != NULL && !g_str_has_prefix(location->id, prefix)) {
             continue;
         }
 
         if (!pcmk__str_in_list(rsc_printable_id(rsc), only_rsc,
                                pcmk__str_star_matches)
             && !pcmk__str_in_list(rsc_printable_id(pe__const_top_resource(rsc, false)),
                                   only_rsc, pcmk__str_star_matches)) {
             continue;
         }
 
         for (gIter2 = location->node_list_rh; gIter2 != NULL; gIter2 = gIter2->next) {
             pe_node_t *node = (pe_node_t *) gIter2->data;
 
             if (node->weight < 0) {
                 PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc, "Negative Location Constraints");
                 out->message(out, "ban", node, location, show_opts);
             }
         }
     }
 
     PCMK__OUTPUT_LIST_FOOTER(out, rc);
     return rc;
 }
 
 PCMK__OUTPUT_ARGS("cluster-counts", "unsigned int", "int", "int", "int")
 static int
 cluster_counts_html(pcmk__output_t *out, va_list args) {
     unsigned int nnodes = va_arg(args, unsigned int);
     int nresources = va_arg(args, int);
     int ndisabled = va_arg(args, int);
     int nblocked = va_arg(args, int);
 
     xmlNodePtr nodes_node = pcmk__output_create_xml_node(out, "li", NULL);
     xmlNodePtr resources_node = pcmk__output_create_xml_node(out, "li", NULL);
 
     char *nnodes_str = crm_strdup_printf("%d node%s configured",
                                          nnodes, pcmk__plural_s(nnodes));
 
     pcmk_create_html_node(nodes_node, "span", NULL, NULL, nnodes_str);
     free(nnodes_str);
 
     if (ndisabled && nblocked) {
         char *s = crm_strdup_printf("%d resource instance%s configured (%d ",
                                     nresources, pcmk__plural_s(nresources),
                                     ndisabled);
         pcmk_create_html_node(resources_node, "span", NULL, NULL, s);
         free(s);
 
         pcmk_create_html_node(resources_node, "span", NULL, "bold", "DISABLED");
 
         s = crm_strdup_printf(", %d ", nblocked);
         pcmk_create_html_node(resources_node, "span", NULL, NULL, s);
         free(s);
 
         pcmk_create_html_node(resources_node, "span", NULL, "bold", "BLOCKED");
         pcmk_create_html_node(resources_node, "span", NULL, NULL,
                               " from further action due to failure)");
     } else if (ndisabled && !nblocked) {
         char *s = crm_strdup_printf("%d resource instance%s configured (%d ",
                                     nresources, pcmk__plural_s(nresources),
                                     ndisabled);
         pcmk_create_html_node(resources_node, "span", NULL, NULL, s);
         free(s);
 
         pcmk_create_html_node(resources_node, "span", NULL, "bold", "DISABLED");
         pcmk_create_html_node(resources_node, "span", NULL, NULL, ")");
     } else if (!ndisabled && nblocked) {
         char *s = crm_strdup_printf("%d resource instance%s configured (%d ",
                                     nresources, pcmk__plural_s(nresources),
                                     nblocked);
         pcmk_create_html_node(resources_node, "span", NULL, NULL, s);
         free(s);
 
         pcmk_create_html_node(resources_node, "span", NULL, "bold", "BLOCKED");
         pcmk_create_html_node(resources_node, "span", NULL, NULL,
                               " from further action due to failure)");
     } else {
         char *s = crm_strdup_printf("%d resource instance%s configured",
                                     nresources, pcmk__plural_s(nresources));
         pcmk_create_html_node(resources_node, "span", NULL, NULL, s);
         free(s);
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("cluster-counts", "unsigned int", "int", "int", "int")
 static int
 cluster_counts_text(pcmk__output_t *out, va_list args) {
     unsigned int nnodes = va_arg(args, unsigned int);
     int nresources = va_arg(args, int);
     int ndisabled = va_arg(args, int);
     int nblocked = va_arg(args, int);
 
     out->list_item(out, NULL, "%d node%s configured",
                    nnodes, pcmk__plural_s(nnodes));
 
     if (ndisabled && nblocked) {
         out->list_item(out, NULL, "%d resource instance%s configured "
                                   "(%d DISABLED, %d BLOCKED from "
                                   "further action due to failure)",
                        nresources, pcmk__plural_s(nresources), ndisabled,
                        nblocked);
     } else if (ndisabled && !nblocked) {
         out->list_item(out, NULL, "%d resource instance%s configured "
                                   "(%d DISABLED)",
                        nresources, pcmk__plural_s(nresources), ndisabled);
     } else if (!ndisabled && nblocked) {
         out->list_item(out, NULL, "%d resource instance%s configured "
                                   "(%d BLOCKED from further action "
                                   "due to failure)",
                        nresources, pcmk__plural_s(nresources), nblocked);
     } else {
         out->list_item(out, NULL, "%d resource instance%s configured",
                        nresources, pcmk__plural_s(nresources));
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("cluster-counts", "unsigned int", "int", "int", "int")
 static int
 cluster_counts_xml(pcmk__output_t *out, va_list args) {
     unsigned int nnodes = va_arg(args, unsigned int);
     int nresources = va_arg(args, int);
     int ndisabled = va_arg(args, int);
     int nblocked = va_arg(args, int);
 
     xmlNodePtr nodes_node = pcmk__output_create_xml_node(out, "nodes_configured", NULL);
     xmlNodePtr resources_node = pcmk__output_create_xml_node(out, "resources_configured", NULL);
 
     char *s = pcmk__itoa(nnodes);
     crm_xml_add(nodes_node, "number", s);
     free(s);
 
     s = pcmk__itoa(nresources);
     crm_xml_add(resources_node, "number", s);
     free(s);
 
     s = pcmk__itoa(ndisabled);
     crm_xml_add(resources_node, "disabled", s);
     free(s);
 
     s = pcmk__itoa(nblocked);
     crm_xml_add(resources_node, "blocked", s);
     free(s);
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("cluster-dc", "pe_node_t *", "const char *", "const char *",
                   "char *", "int")
 static int
 cluster_dc_html(pcmk__output_t *out, va_list args) {
     pe_node_t *dc = va_arg(args, pe_node_t *);
     const char *quorum = va_arg(args, const char *);
     const char *dc_version_s = va_arg(args, const char *);
     char *dc_name = va_arg(args, char *);
     bool mixed_version = va_arg(args, int);
 
     xmlNodePtr node = pcmk__output_create_xml_node(out, "li", NULL);
 
     pcmk_create_html_node(node, "span", NULL, "bold", "Current DC: ");
 
     if (dc) {
         char *buf = crm_strdup_printf("%s (version %s) -", dc_name,
                                       dc_version_s ? dc_version_s : "unknown");
         pcmk_create_html_node(node, "span", NULL, NULL, buf);
         free(buf);
 
         if (mixed_version) {
             pcmk_create_html_node(node, "span", NULL, "warning",
                                   " MIXED-VERSION");
         }
         pcmk_create_html_node(node, "span", NULL, NULL, " partition");
         if (crm_is_true(quorum)) {
             pcmk_create_html_node(node, "span", NULL, NULL, " with");
         } else {
             pcmk_create_html_node(node, "span", NULL, "warning", " WITHOUT");
         }
         pcmk_create_html_node(node, "span", NULL, NULL, " quorum");
     } else {
         pcmk_create_html_node(node, "span", NULL, "warning", "NONE");
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("cluster-dc", "pe_node_t *", "const char *", "const char *",
                   "char *", "int")
 static int
 cluster_dc_text(pcmk__output_t *out, va_list args) {
     pe_node_t *dc = va_arg(args, pe_node_t *);
     const char *quorum = va_arg(args, const char *);
     const char *dc_version_s = va_arg(args, const char *);
     char *dc_name = va_arg(args, char *);
     bool mixed_version = va_arg(args, int);
 
     if (dc) {
         out->list_item(out, "Current DC",
                        "%s (version %s) - %spartition %s quorum",
                        dc_name, dc_version_s ? dc_version_s : "unknown",
                        mixed_version ? "MIXED-VERSION " : "",
                        crm_is_true(quorum) ? "with" : "WITHOUT");
     } else {
         out->list_item(out, "Current DC", "NONE");
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("cluster-dc", "pe_node_t *", "const char *", "const char *",
                   "char *", "int")
 static int
 cluster_dc_xml(pcmk__output_t *out, va_list args) {
     pe_node_t *dc = va_arg(args, pe_node_t *);
     const char *quorum = va_arg(args, const char *);
     const char *dc_version_s = va_arg(args, const char *);
     char *dc_name G_GNUC_UNUSED = va_arg(args, char *);
     bool mixed_version = va_arg(args, int);
 
     if (dc) {
         pcmk__output_create_xml_node(out, "current_dc",
                                      "present", "true",
                                      "version", dc_version_s ? dc_version_s : "",
                                      "name", dc->details->uname,
                                      "id", dc->details->id,
                                      "with_quorum", pcmk__btoa(crm_is_true(quorum)),
                                      "mixed_version", pcmk__btoa(mixed_version),
                                      NULL);
     } else {
         pcmk__output_create_xml_node(out, "current_dc",
                                      "present", "false",
                                      NULL);
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("maint-mode", "unsigned long long int")
 static int
 cluster_maint_mode_text(pcmk__output_t *out, va_list args) {
     unsigned long long flags = va_arg(args, unsigned long long);
 
     if (pcmk_is_set(flags, pe_flag_maintenance_mode)) {
         pcmk__formatted_printf(out, "\n              *** Resource management is DISABLED ***\n");
         pcmk__formatted_printf(out, "  The cluster will not attempt to start, stop or recover services\n");
         return pcmk_rc_ok;
     } else if (pcmk_is_set(flags, pe_flag_stop_everything)) {
         pcmk__formatted_printf(out, "\n    *** Resource management is DISABLED ***\n");
         pcmk__formatted_printf(out, "  The cluster will keep all resources stopped\n");
         return pcmk_rc_ok;
     } else {
         return pcmk_rc_no_output;
     }
 }
 
 PCMK__OUTPUT_ARGS("cluster-options", "pe_working_set_t *")
 static int
 cluster_options_html(pcmk__output_t *out, va_list args) {
     pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
 
     out->list_item(out, NULL, "STONITH of failed nodes %s",
                    pcmk_is_set(data_set->flags, pe_flag_stonith_enabled) ? "enabled" : "disabled");
 
     out->list_item(out, NULL, "Cluster is %s",
                    pcmk_is_set(data_set->flags, pe_flag_symmetric_cluster) ? "symmetric" : "asymmetric");
 
     switch (data_set->no_quorum_policy) {
         case no_quorum_freeze:
             out->list_item(out, NULL, "No quorum policy: Freeze resources");
             break;
 
         case no_quorum_stop:
             out->list_item(out, NULL, "No quorum policy: Stop ALL resources");
             break;
 
         case no_quorum_demote:
             out->list_item(out, NULL, "No quorum policy: Demote promotable "
                            "resources and stop all other resources");
             break;
 
         case no_quorum_ignore:
             out->list_item(out, NULL, "No quorum policy: Ignore");
             break;
 
         case no_quorum_suicide:
             out->list_item(out, NULL, "No quorum policy: Suicide");
             break;
     }
 
     if (pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)) {
         xmlNodePtr node = pcmk__output_create_xml_node(out, "li", NULL);
 
         pcmk_create_html_node(node, "span", NULL, NULL, "Resource management: ");
         pcmk_create_html_node(node, "span", NULL, "bold", "DISABLED");
         pcmk_create_html_node(node, "span", NULL, NULL,
                               " (the cluster will not attempt to start, stop, or recover services)");
     } else if (pcmk_is_set(data_set->flags, pe_flag_stop_everything)) {
         xmlNodePtr node = pcmk__output_create_xml_node(out, "li", NULL);
 
         pcmk_create_html_node(node, "span", NULL, NULL, "Resource management: ");
         pcmk_create_html_node(node, "span", NULL, "bold", "STOPPED");
         pcmk_create_html_node(node, "span", NULL, NULL,
                               " (the cluster will keep all resources stopped)");
     } else {
         out->list_item(out, NULL, "Resource management: enabled");
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("cluster-options", "pe_working_set_t *")
 static int
 cluster_options_log(pcmk__output_t *out, va_list args) {
     pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
 
     if (pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)) {
         return out->info(out, "Resource management is DISABLED.  The cluster will not attempt to start, stop or recover services.");
     } else if (pcmk_is_set(data_set->flags, pe_flag_stop_everything)) {
         return out->info(out, "Resource management is DISABLED.  The cluster has stopped all resources.");
     } else {
         return pcmk_rc_no_output;
     }
 }
 
 PCMK__OUTPUT_ARGS("cluster-options", "pe_working_set_t *")
 static int
 cluster_options_text(pcmk__output_t *out, va_list args) {
     pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
 
     out->list_item(out, NULL, "STONITH of failed nodes %s",
                    pcmk_is_set(data_set->flags, pe_flag_stonith_enabled) ? "enabled" : "disabled");
 
     out->list_item(out, NULL, "Cluster is %s",
                    pcmk_is_set(data_set->flags, pe_flag_symmetric_cluster) ? "symmetric" : "asymmetric");
 
     switch (data_set->no_quorum_policy) {
         case no_quorum_freeze:
             out->list_item(out, NULL, "No quorum policy: Freeze resources");
             break;
 
         case no_quorum_stop:
             out->list_item(out, NULL, "No quorum policy: Stop ALL resources");
             break;
 
         case no_quorum_demote:
             out->list_item(out, NULL, "No quorum policy: Demote promotable "
                            "resources and stop all other resources");
             break;
 
         case no_quorum_ignore:
             out->list_item(out, NULL, "No quorum policy: Ignore");
             break;
 
         case no_quorum_suicide:
             out->list_item(out, NULL, "No quorum policy: Suicide");
             break;
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("cluster-options", "pe_working_set_t *")
 static int
 cluster_options_xml(pcmk__output_t *out, va_list args) {
     pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
 
     const char *no_quorum_policy = NULL;
     char *stonith_timeout_str = pcmk__itoa(data_set->stonith_timeout);
     char *priority_fencing_delay_str = pcmk__itoa(data_set->priority_fencing_delay * 1000);
 
     switch (data_set->no_quorum_policy) {
         case no_quorum_freeze:
             no_quorum_policy = "freeze";
             break;
 
         case no_quorum_stop:
             no_quorum_policy = "stop";
             break;
 
         case no_quorum_demote:
             no_quorum_policy = "demote";
             break;
 
         case no_quorum_ignore:
             no_quorum_policy = "ignore";
             break;
 
         case no_quorum_suicide:
             no_quorum_policy = "suicide";
             break;
     }
 
     pcmk__output_create_xml_node(out, "cluster_options",
                                  "stonith-enabled", pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)),
                                  "symmetric-cluster", pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_symmetric_cluster)),
                                  "no-quorum-policy", no_quorum_policy,
                                  "maintenance-mode", pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)),
                                  "stop-all-resources", pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_stop_everything)),
                                  "stonith-timeout-ms", stonith_timeout_str,
                                  "priority-fencing-delay-ms", priority_fencing_delay_str,
                                  NULL);
     free(stonith_timeout_str);
     free(priority_fencing_delay_str);
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("cluster-stack", "const char *", "enum pcmk_pacemakerd_state")
 static int
 cluster_stack_html(pcmk__output_t *out, va_list args) {
     const char *stack_s = va_arg(args, const char *);
     enum pcmk_pacemakerd_state pcmkd_state =
         (enum pcmk_pacemakerd_state) va_arg(args, int);
 
     xmlNodePtr node = pcmk__output_create_xml_node(out, "li", NULL);
 
     pcmk_create_html_node(node, "span", NULL, "bold", "Stack: ");
     pcmk_create_html_node(node, "span", NULL, NULL, stack_s);
 
     if (pcmkd_state != pcmk_pacemakerd_state_invalid) {
         pcmk_create_html_node(node, "span", NULL, NULL, " (");
         pcmk_create_html_node(node, "span", NULL, NULL,
                               pcmk__pcmkd_state_enum2friendly(pcmkd_state));
         pcmk_create_html_node(node, "span", NULL, NULL, ")");
     }
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("cluster-stack", "const char *", "enum pcmk_pacemakerd_state")
 static int
 cluster_stack_text(pcmk__output_t *out, va_list args) {
     const char *stack_s = va_arg(args, const char *);
     enum pcmk_pacemakerd_state pcmkd_state =
         (enum pcmk_pacemakerd_state) va_arg(args, int);
 
     if (pcmkd_state != pcmk_pacemakerd_state_invalid) {
         out->list_item(out, "Stack", "%s (%s)",
                        stack_s, pcmk__pcmkd_state_enum2friendly(pcmkd_state));
     } else {
         out->list_item(out, "Stack", "%s", stack_s);
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("cluster-stack", "const char *", "enum pcmk_pacemakerd_state")
 static int
 cluster_stack_xml(pcmk__output_t *out, va_list args) {
     const char *stack_s = va_arg(args, const char *);
     enum pcmk_pacemakerd_state pcmkd_state =
         (enum pcmk_pacemakerd_state) va_arg(args, int);
 
     const char *state_s = NULL;
 
     if (pcmkd_state != pcmk_pacemakerd_state_invalid) {
         state_s = pcmk_pacemakerd_api_daemon_state_enum2text(pcmkd_state);
     }
 
     pcmk__output_create_xml_node(out, "stack",
                                  "type", stack_s,
                                  "pacemakerd-state", state_s,
                                  NULL);
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("cluster-times", "const char *", "const char *",
                   "const char *", "const char *", "const char *")
 static int
 cluster_times_html(pcmk__output_t *out, va_list args) {
     const char *our_nodename = va_arg(args, const char *);
     const char *last_written = va_arg(args, const char *);
     const char *user = va_arg(args, const char *);
     const char *client = va_arg(args, const char *);
     const char *origin = va_arg(args, const char *);
 
     xmlNodePtr updated_node = pcmk__output_create_xml_node(out, "li", NULL);
     xmlNodePtr changed_node = pcmk__output_create_xml_node(out, "li", NULL);
 
     char *time_s = pcmk__epoch2str(NULL, 0);
 
     pcmk_create_html_node(updated_node, "span", NULL, "bold", "Last updated: ");
     pcmk_create_html_node(updated_node, "span", NULL, NULL, time_s);
 
     if (our_nodename != NULL) {
         pcmk_create_html_node(updated_node, "span", NULL, NULL, " on ");
         pcmk_create_html_node(updated_node, "span", NULL, NULL, our_nodename);
     }
 
     free(time_s);
     time_s = last_changed_string(last_written, user, client, origin);
 
     pcmk_create_html_node(changed_node, "span", NULL, "bold", "Last change: ");
     pcmk_create_html_node(changed_node, "span", NULL, NULL, time_s);
 
     free(time_s);
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("cluster-times", "const char *", "const char *",
                   "const char *", "const char *", "const char *")
 static int
 cluster_times_xml(pcmk__output_t *out, va_list args) {
     const char *our_nodename = va_arg(args, const char *);
     const char *last_written = va_arg(args, const char *);
     const char *user = va_arg(args, const char *);
     const char *client = va_arg(args, const char *);
     const char *origin = va_arg(args, const char *);
 
     char *time_s = pcmk__epoch2str(NULL, 0);
 
     pcmk__output_create_xml_node(out, "last_update",
                                  "time", time_s,
                                  "origin", our_nodename,
                                  NULL);
 
     pcmk__output_create_xml_node(out, "last_change",
                                  "time", last_written ? last_written : "",
                                  "user", user ? user : "",
                                  "client", client ? client : "",
                                  "origin", origin ? origin : "",
                                  NULL);
 
     free(time_s);
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("cluster-times", "const char *", "const char *",
                   "const char *", "const char *", "const char *")
 static int
 cluster_times_text(pcmk__output_t *out, va_list args) {
     const char *our_nodename = va_arg(args, const char *);
     const char *last_written = va_arg(args, const char *);
     const char *user = va_arg(args, const char *);
     const char *client = va_arg(args, const char *);
     const char *origin = va_arg(args, const char *);
 
     char *time_s = pcmk__epoch2str(NULL, 0);
 
     out->list_item(out, "Last updated", "%s%s%s",
                    time_s, (our_nodename != NULL)? " on " : "",
                    pcmk__s(our_nodename, ""));
 
     free(time_s);
     time_s = last_changed_string(last_written, user, client, origin);
 
     out->list_item(out, "Last change", " %s", time_s);
 
     free(time_s);
     return pcmk_rc_ok;
 }
 
 /*!
  * \internal
  * \brief Display a failed action in less-technical natural language
  *
  * \param[in,out] out          Output object to use for display
  * \param[in]     xml_op       XML containing failed action
  * \param[in]     op_key       Operation key of failed action
  * \param[in]     node_name    Where failed action occurred
  * \param[in]     rc           OCF exit code of failed action
  * \param[in]     status       Execution status of failed action
  * \param[in]     exit_reason  Exit reason given for failed action
  * \param[in]     exec_time    String containing execution time in milliseconds
  */
 static void
 failed_action_friendly(pcmk__output_t *out, const xmlNode *xml_op,
                        const char *op_key, const char *node_name, int rc,
                        int status, const char *exit_reason,
                        const char *exec_time)
 {
     char *rsc_id = NULL;
     char *task = NULL;
     guint interval_ms = 0;
     time_t last_change_epoch = 0;
     GString *str = NULL;
 
     if (pcmk__str_empty(op_key)
         || !parse_op_key(op_key, &rsc_id, &task, &interval_ms)) {
         rsc_id = strdup("unknown resource");
         task = strdup("unknown action");
         interval_ms = 0;
     }
     CRM_ASSERT((rsc_id != NULL) && (task != NULL));
 
     str = g_string_sized_new(256); // Should be sufficient for most messages
 
     pcmk__g_strcat(str, rsc_id, " ", NULL);
 
     if (interval_ms != 0) {
         pcmk__g_strcat(str, pcmk__readable_interval(interval_ms), "-interval ",
                        NULL);
     }
     pcmk__g_strcat(str, crm_action_str(task, interval_ms), " on ", node_name,
                    NULL);
 
     if (status == PCMK_EXEC_DONE) {
         pcmk__g_strcat(str, " returned '", services_ocf_exitcode_str(rc), "'",
                        NULL);
         if (!pcmk__str_empty(exit_reason)) {
             pcmk__g_strcat(str, " (", exit_reason, ")", NULL);
         }
 
     } else {
         pcmk__g_strcat(str, " could not be executed (",
                        pcmk_exec_status_str(status), NULL);
         if (!pcmk__str_empty(exit_reason)) {
             pcmk__g_strcat(str, ": ", exit_reason, NULL);
         }
         g_string_append_c(str, ')');
     }
 
 
     if (crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE,
                                 &last_change_epoch) == pcmk_ok) {
         char *s = pcmk__epoch2str(&last_change_epoch, 0);
 
         pcmk__g_strcat(str, " at ", s, NULL);
         free(s);
     }
     if (!pcmk__str_empty(exec_time)) {
         int exec_time_ms = 0;
 
         if ((pcmk__scan_min_int(exec_time, &exec_time_ms, 0) == pcmk_rc_ok)
             && (exec_time_ms > 0)) {
 
             pcmk__g_strcat(str, " after ",
                            pcmk__readable_interval(exec_time_ms), NULL);
         }
     }
 
     out->list_item(out, NULL, "%s", str->str);
     g_string_free(str, TRUE);
     free(rsc_id);
     free(task);
 }
 
 /*!
  * \internal
  * \brief Display a failed action with technical details
  *
  * \param[in,out] out          Output object to use for display
  * \param[in]     xml_op       XML containing failed action
  * \param[in]     op_key       Operation key of failed action
  * \param[in]     node_name    Where failed action occurred
  * \param[in]     rc           OCF exit code of failed action
  * \param[in]     status       Execution status of failed action
  * \param[in]     exit_reason  Exit reason given for failed action
  * \param[in]     exec_time    String containing execution time in milliseconds
  */
 static void
 failed_action_technical(pcmk__output_t *out, const xmlNode *xml_op,
                         const char *op_key, const char *node_name, int rc,
                         int status, const char *exit_reason,
                         const char *exec_time)
 {
     const char *call_id = crm_element_value(xml_op, XML_LRM_ATTR_CALLID);
     const char *queue_time = crm_element_value(xml_op, XML_RSC_OP_T_QUEUE);
     const char *exit_status = services_ocf_exitcode_str(rc);
     const char *lrm_status = pcmk_exec_status_str(status);
     time_t last_change_epoch = 0;
     GString *str = NULL;
 
     if (pcmk__str_empty(op_key)) {
         op_key = "unknown operation";
     }
     if (pcmk__str_empty(exit_status)) {
         exit_status = "unknown exit status";
     }
     if (pcmk__str_empty(call_id)) {
         call_id = "unknown";
     }
 
     str = g_string_sized_new(256);
 
     g_string_append_printf(str, "%s on %s '%s' (%d): call=%s, status='%s'",
                            op_key, node_name, exit_status, rc, call_id,
                            lrm_status);
 
     if (!pcmk__str_empty(exit_reason)) {
         pcmk__g_strcat(str, ", exitreason='", exit_reason, "'", NULL);
     }
 
     if (crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE,
                                 &last_change_epoch) == pcmk_ok) {
         char *last_change_str = pcmk__epoch2str(&last_change_epoch, 0);
 
         pcmk__g_strcat(str,
                        ", " XML_RSC_OP_LAST_CHANGE "="
                        "'", last_change_str, "'", NULL);
         free(last_change_str);
     }
     if (!pcmk__str_empty(queue_time)) {
         pcmk__g_strcat(str, ", queued=", queue_time, "ms", NULL);
     }
     if (!pcmk__str_empty(exec_time)) {
         pcmk__g_strcat(str, ", exec=", exec_time, "ms", NULL);
     }
 
     out->list_item(out, NULL, "%s", str->str);
     g_string_free(str, TRUE);
 }
 
 PCMK__OUTPUT_ARGS("failed-action", "xmlNodePtr", "uint32_t")
 static int
 failed_action_default(pcmk__output_t *out, va_list args)
 {
     xmlNodePtr xml_op = va_arg(args, xmlNodePtr);
     uint32_t show_opts = va_arg(args, uint32_t);
 
     const char *op_key = crm_element_value(xml_op, XML_LRM_ATTR_TASK_KEY);
     const char *node_name = crm_element_value(xml_op, XML_ATTR_UNAME);
     const char *exit_reason = crm_element_value(xml_op,
                                                 XML_LRM_ATTR_EXIT_REASON);
     const char *exec_time = crm_element_value(xml_op, XML_RSC_OP_T_EXEC);
 
     int rc;
     int status;
 
     pcmk__scan_min_int(crm_element_value(xml_op, XML_LRM_ATTR_RC), &rc, 0);
 
     pcmk__scan_min_int(crm_element_value(xml_op, XML_LRM_ATTR_OPSTATUS),
                        &status, 0);
 
     if (pcmk__str_empty(op_key)) {
         op_key = ID(xml_op);
     }
     if (pcmk__str_empty(node_name)) {
         node_name = "unknown node";
     }
 
     if (pcmk_is_set(show_opts, pcmk_show_failed_detail)) {
         failed_action_technical(out, xml_op, op_key, node_name, rc, status,
                                 exit_reason, exec_time);
     } else {
         failed_action_friendly(out, xml_op, op_key, node_name, rc, status,
                                exit_reason, exec_time);
     }
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("failed-action", "xmlNodePtr", "uint32_t")
 static int
 failed_action_xml(pcmk__output_t *out, va_list args) {
     xmlNodePtr xml_op = va_arg(args, xmlNodePtr);
     uint32_t show_opts G_GNUC_UNUSED = va_arg(args, uint32_t);
 
     const char *op_key = crm_element_value(xml_op, XML_LRM_ATTR_TASK_KEY);
     int rc;
     int status;
     const char *exit_reason = crm_element_value(xml_op, XML_LRM_ATTR_EXIT_REASON);
 
     time_t epoch = 0;
     char *rc_s = NULL;
     char *reason_s = crm_xml_escape(exit_reason ? exit_reason : "none");
     xmlNodePtr node = NULL;
 
     pcmk__scan_min_int(crm_element_value(xml_op, XML_LRM_ATTR_RC), &rc, 0);
     pcmk__scan_min_int(crm_element_value(xml_op, XML_LRM_ATTR_OPSTATUS),
                        &status, 0);
 
     rc_s = pcmk__itoa(rc);
     node = pcmk__output_create_xml_node(out, "failure",
                                         (op_key == NULL)? "id" : "op_key",
                                         (op_key == NULL)? ID(xml_op) : op_key,
                                         "node", crm_element_value(xml_op, XML_ATTR_UNAME),
                                         "exitstatus", services_ocf_exitcode_str(rc),
                                         "exitreason", pcmk__s(reason_s, ""),
                                         "exitcode", rc_s,
                                         "call", crm_element_value(xml_op, XML_LRM_ATTR_CALLID),
                                         "status", pcmk_exec_status_str(status),
                                         NULL);
     free(rc_s);
 
     if ((crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE,
                                  &epoch) == pcmk_ok) && (epoch > 0)) {
         guint interval_ms = 0;
         char *interval_ms_s = NULL;
         char *rc_change = pcmk__epoch2str(&epoch,
                                           crm_time_log_date
                                           |crm_time_log_timeofday
                                           |crm_time_log_with_timezone);
 
         crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms);
         interval_ms_s = crm_strdup_printf("%u", interval_ms);
 
         pcmk__xe_set_props(node, XML_RSC_OP_LAST_CHANGE, rc_change,
                            "queued", crm_element_value(xml_op, XML_RSC_OP_T_QUEUE),
                            "exec", crm_element_value(xml_op, XML_RSC_OP_T_EXEC),
                            "interval", interval_ms_s,
                            "task", crm_element_value(xml_op, XML_LRM_ATTR_TASK),
                            NULL);
 
         free(interval_ms_s);
         free(rc_change);
     }
 
     free(reason_s);
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("failed-action-list", "pe_working_set_t *", "GList *",
                   "GList *", "uint32_t", "bool")
 static int
 failed_action_list(pcmk__output_t *out, va_list args) {
     pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
     GList *only_node = va_arg(args, GList *);
     GList *only_rsc = va_arg(args, GList *);
     uint32_t show_opts = va_arg(args, uint32_t);
     bool print_spacer = va_arg(args, int);
 
     xmlNode *xml_op = NULL;
     int rc = pcmk_rc_no_output;
 
     const char *id = NULL;
 
     if (xmlChildElementCount(data_set->failed) == 0) {
         return rc;
     }
 
     for (xml_op = pcmk__xml_first_child(data_set->failed); xml_op != NULL;
          xml_op = pcmk__xml_next(xml_op)) {
         char *rsc = NULL;
 
         if (!pcmk__str_in_list(crm_element_value(xml_op, XML_ATTR_UNAME), only_node,
                                pcmk__str_star_matches|pcmk__str_casei)) {
             continue;
         }
 
         if (pcmk_xe_mask_probe_failure(xml_op)) {
             continue;
         }
 
         id = crm_element_value(xml_op, XML_LRM_ATTR_TASK_KEY);
         if (!parse_op_key(id ? id : ID(xml_op), &rsc, NULL, NULL)) {
             continue;
         }
 
         if (!pcmk__str_in_list(rsc, only_rsc, pcmk__str_star_matches)) {
             free(rsc);
             continue;
         }
 
         free(rsc);
 
         PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc, "Failed Resource Actions");
         out->message(out, "failed-action", xml_op, show_opts);
     }
 
     PCMK__OUTPUT_LIST_FOOTER(out, rc);
     return rc;
 }
 
 static void
 status_node(pe_node_t *node, xmlNodePtr parent, uint32_t show_opts)
 {
     int health = pe__node_health(node);
 
     // Cluster membership
     if (node->details->online) {
         pcmk_create_html_node(parent, "span", NULL, "online", " online");
     } else {
         pcmk_create_html_node(parent, "span", NULL, "offline", " OFFLINE");
     }
 
     // Standby mode
     if (node->details->standby_onfail && (node->details->running_rsc != NULL)) {
         pcmk_create_html_node(parent, "span", NULL, "standby",
                               " (in standby due to on-fail,"
                               " with active resources)");
     } else if (node->details->standby_onfail) {
         pcmk_create_html_node(parent, "span", NULL, "standby",
                               " (in standby due to on-fail)");
     } else if (node->details->standby && (node->details->running_rsc != NULL)) {
         pcmk_create_html_node(parent, "span", NULL, "standby",
                               " (in standby, with active resources)");
     } else if (node->details->standby) {
         pcmk_create_html_node(parent, "span", NULL, "standby", " (in standby)");
     }
 
     // Maintenance mode
     if (node->details->maintenance) {
         pcmk_create_html_node(parent, "span", NULL, "maint",
                               " (in maintenance mode)");
     }
 
     // Node health
     if (health < 0) {
         pcmk_create_html_node(parent, "span", NULL, "health_red",
                               " (health is RED)");
     } else if (health == 0) {
         pcmk_create_html_node(parent, "span", NULL, "health_yellow",
                               " (health is YELLOW)");
     }
 
     // Feature set
     if (pcmk_is_set(show_opts, pcmk_show_feature_set)) {
         const char *feature_set = get_node_feature_set(node);
         if (feature_set != NULL) {
             char *buf = crm_strdup_printf(", feature set %s", feature_set);
             pcmk_create_html_node(parent, "span", NULL, NULL, buf);
             free(buf);
         }
     }
 }
 
 PCMK__OUTPUT_ARGS("node", "pe_node_t *", "uint32_t", "bool",
                   "GList *", "GList *")
 static int
 node_html(pcmk__output_t *out, va_list args) {
     pe_node_t *node = va_arg(args, pe_node_t *);
     uint32_t show_opts = va_arg(args, uint32_t);
     bool full = va_arg(args, int);
     GList *only_node = va_arg(args, GList *);
     GList *only_rsc = va_arg(args, GList *);
 
     char *node_name = pe__node_display_name(node, pcmk_is_set(show_opts, pcmk_show_node_id));
 
     if (full) {
         xmlNodePtr item_node;
 
         if (pcmk_all_flags_set(show_opts, pcmk_show_brief | pcmk_show_rscs_by_node)) {
             GList *rscs = pe__filter_rsc_list(node->details->running_rsc, only_rsc);
 
             out->begin_list(out, NULL, NULL, "%s:", node_name);
             item_node = pcmk__output_xml_create_parent(out, "li", NULL);
             pcmk_create_html_node(item_node, "span", NULL, NULL, "Status:");
             status_node(node, item_node, show_opts);
 
             if (rscs != NULL) {
                 uint32_t new_show_opts = (show_opts | pcmk_show_rsc_only) & ~pcmk_show_inactive_rscs;
                 out->begin_list(out, NULL, NULL, "Resources");
                 pe__rscs_brief_output(out, rscs, new_show_opts);
                 out->end_list(out);
             }
 
             pcmk__output_xml_pop_parent(out);
             out->end_list(out);
 
         } else if (pcmk_is_set(show_opts, pcmk_show_rscs_by_node)) {
             GList *lpc2 = NULL;
             int rc = pcmk_rc_no_output;
 
             out->begin_list(out, NULL, NULL, "%s:", node_name);
             item_node = pcmk__output_xml_create_parent(out, "li", NULL);
             pcmk_create_html_node(item_node, "span", NULL, NULL, "Status:");
             status_node(node, item_node, show_opts);
 
             for (lpc2 = node->details->running_rsc; lpc2 != NULL; lpc2 = lpc2->next) {
                 pe_resource_t *rsc = (pe_resource_t *) lpc2->data;
                 PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Resources");
 
                 show_opts |= pcmk_show_rsc_only;
                 out->message(out, crm_map_element_name(rsc->xml), show_opts,
                              rsc, only_node, only_rsc);
             }
 
             PCMK__OUTPUT_LIST_FOOTER(out, rc);
             pcmk__output_xml_pop_parent(out);
             out->end_list(out);
 
         } else {
             char *buf = crm_strdup_printf("%s:", node_name);
 
             item_node = pcmk__output_create_xml_node(out, "li", NULL);
             pcmk_create_html_node(item_node, "span", NULL, "bold", buf);
             status_node(node, item_node, show_opts);
 
             free(buf);
         }
     } else {
         out->begin_list(out, NULL, NULL, "%s:", node_name);
     }
 
     free(node_name);
     return pcmk_rc_ok;
 }
 
 /*!
  * \internal
  * \brief Get a human-friendly textual description of a node's status
  *
  * \param[in] node  Node to check
  *
  * \return String representation of node's status
  */
 static const char *
 node_text_status(const pe_node_t *node)
 {
     if (node->details->unclean) {
         if (node->details->online) {
             return "UNCLEAN (online)";
 
         } else if (node->details->pending) {
             return "UNCLEAN (pending)";
 
         } else {
             return "UNCLEAN (offline)";
         }
 
     } else if (node->details->pending) {
         return "pending";
 
     } else if (node->details->standby_onfail && node->details->online) {
         return "standby (on-fail)";
 
     } else if (node->details->standby) {
         if (node->details->online) {
             if (node->details->running_rsc) {
                 return "standby (with active resources)";
             } else {
                 return "standby";
             }
         } else {
             return "OFFLINE (standby)";
         }
 
     } else if (node->details->maintenance) {
         if (node->details->online) {
             return "maintenance";
         } else {
             return "OFFLINE (maintenance)";
         }
 
     } else if (node->details->online) {
         return "online";
     }
 
     return "OFFLINE";
 }
 
 PCMK__OUTPUT_ARGS("node", "pe_node_t *", "uint32_t", "bool", "GList *", "GList *")
 static int
 node_text(pcmk__output_t *out, va_list args) {
     pe_node_t *node = va_arg(args, pe_node_t *);
     uint32_t show_opts = va_arg(args, uint32_t);
     bool full = va_arg(args, int);
     GList *only_node = va_arg(args, GList *);
     GList *only_rsc = va_arg(args, GList *);
 
     if (full) {
         char *node_name = pe__node_display_name(node, pcmk_is_set(show_opts, pcmk_show_node_id));
         GString *str = g_string_sized_new(64);
         int health = pe__node_health(node);
 
         // Create a summary line with node type, name, and status
         if (pe__is_guest_node(node)) {
             g_string_append(str, "GuestNode");
         } else if (pe__is_remote_node(node)) {
             g_string_append(str, "RemoteNode");
         } else {
             g_string_append(str, "Node");
         }
         pcmk__g_strcat(str, " ", node_name, ": ", node_text_status(node), NULL);
 
         if (health < 0) {
             g_string_append(str, " (health is RED)");
         } else if (health == 0) {
             g_string_append(str, " (health is YELLOW)");
         }
         if (pcmk_is_set(show_opts, pcmk_show_feature_set)) {
             const char *feature_set = get_node_feature_set(node);
             if (feature_set != NULL) {
                 pcmk__g_strcat(str, ", feature set ", feature_set, NULL);
             }
         }
 
         /* If we're grouping by node, print its resources */
         if (pcmk_is_set(show_opts, pcmk_show_rscs_by_node)) {
             if (pcmk_is_set(show_opts, pcmk_show_brief)) {
                 GList *rscs = pe__filter_rsc_list(node->details->running_rsc, only_rsc);
 
                 if (rscs != NULL) {
                     uint32_t new_show_opts = (show_opts | pcmk_show_rsc_only) & ~pcmk_show_inactive_rscs;
                     out->begin_list(out, NULL, NULL, "%s", str->str);
                     out->begin_list(out, NULL, NULL, "Resources");
 
                     pe__rscs_brief_output(out, rscs, new_show_opts);
 
                     out->end_list(out);
                     out->end_list(out);
 
                     g_list_free(rscs);
                 }
 
             } else {
                 GList *gIter2 = NULL;
 
                 out->begin_list(out, NULL, NULL, "%s", str->str);
                 out->begin_list(out, NULL, NULL, "Resources");
 
                 for (gIter2 = node->details->running_rsc; gIter2 != NULL; gIter2 = gIter2->next) {
                     pe_resource_t *rsc = (pe_resource_t *) gIter2->data;
 
                     show_opts |= pcmk_show_rsc_only;
                     out->message(out, crm_map_element_name(rsc->xml), show_opts,
                                  rsc, only_node, only_rsc);
                 }
 
                 out->end_list(out);
                 out->end_list(out);
             }
         } else {
             out->list_item(out, NULL, "%s", str->str);
         }
 
         g_string_free(str, TRUE);
         free(node_name);
     } else {
         char *node_name = pe__node_display_name(node, pcmk_is_set(show_opts, pcmk_show_node_id));
         out->begin_list(out, NULL, NULL, "Node: %s", node_name);
         free(node_name);
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("node", "pe_node_t *", "uint32_t", "bool", "GList *", "GList *")
 static int
 node_xml(pcmk__output_t *out, va_list args) {
     pe_node_t *node = va_arg(args, pe_node_t *);
     uint32_t show_opts G_GNUC_UNUSED = va_arg(args, uint32_t);
     bool full = va_arg(args, int);
     GList *only_node = va_arg(args, GList *);
     GList *only_rsc = va_arg(args, GList *);
 
     if (full) {
         const char *node_type = "unknown";
         char *length_s = pcmk__itoa(g_list_length(node->details->running_rsc));
         int health = pe__node_health(node);
         const char *health_s = NULL;
         const char *feature_set;
 
         switch (node->details->type) {
             case node_member:
                 node_type = "member";
                 break;
             case node_remote:
                 node_type = "remote";
                 break;
             case node_ping:
                 node_type = "ping";
                 break;
         }
 
         if (health < 0) {
             health_s = "red";
         } else if (health == 0) {
             health_s = "yellow";
         } else {
             health_s = "green";
         }
 
         feature_set = get_node_feature_set(node);
 
         pe__name_and_nvpairs_xml(out, true, "node", 15,
                                  "name", node->details->uname,
                                  "id", node->details->id,
                                  "online", pcmk__btoa(node->details->online),
                                  "standby", pcmk__btoa(node->details->standby),
                                  "standby_onfail", pcmk__btoa(node->details->standby_onfail),
                                  "maintenance", pcmk__btoa(node->details->maintenance),
                                  "pending", pcmk__btoa(node->details->pending),
                                  "unclean", pcmk__btoa(node->details->unclean),
                                  "health", health_s,
                                  "feature_set", feature_set,
                                  "shutdown", pcmk__btoa(node->details->shutdown),
                                  "expected_up", pcmk__btoa(node->details->expected_up),
                                  "is_dc", pcmk__btoa(node->details->is_dc),
                                  "resources_running", length_s,
                                  "type", node_type);
 
         if (pe__is_guest_node(node)) {
             xmlNodePtr xml_node = pcmk__output_xml_peek_parent(out);
             crm_xml_add(xml_node, "id_as_resource", node->details->remote_rsc->container->id);
         }
 
         if (pcmk_is_set(show_opts, pcmk_show_rscs_by_node)) {
             GList *lpc = NULL;
 
             for (lpc = node->details->running_rsc; lpc != NULL; lpc = lpc->next) {
                 pe_resource_t *rsc = (pe_resource_t *) lpc->data;
 
                 show_opts |= pcmk_show_rsc_only;
                 out->message(out, crm_map_element_name(rsc->xml), show_opts,
                              rsc, only_node, only_rsc);
             }
         }
 
         free(length_s);
 
         out->end_list(out);
     } else {
         pcmk__output_xml_create_parent(out, "node",
                                        "name", node->details->uname,
                                        NULL);
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("node-attribute", "const char *", "const char *", "bool", "int")
 static int
 node_attribute_text(pcmk__output_t *out, va_list args) {
     const char *name = va_arg(args, const char *);
     const char *value = va_arg(args, const char *);
     bool add_extra = va_arg(args, int);
     int expected_score = va_arg(args, int);
 
     if (add_extra) {
         int v;
 
         if (value == NULL) {
             v = 0;
         } else {
             pcmk__scan_min_int(value, &v, INT_MIN);
         }
         if (v <= 0) {
             out->list_item(out, NULL, "%-32s\t: %-10s\t: Connectivity is lost", name, value);
         } else if (v < expected_score) {
             out->list_item(out, NULL, "%-32s\t: %-10s\t: Connectivity is degraded (Expected=%d)", name, value, expected_score);
         } else {
             out->list_item(out, NULL, "%-32s\t: %-10s", name, value);
         }
     } else {
         out->list_item(out, NULL, "%-32s\t: %-10s", name, value);
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("node-attribute", "const char *", "const char *", "bool", "int")
 static int
 node_attribute_html(pcmk__output_t *out, va_list args) {
     const char *name = va_arg(args, const char *);
     const char *value = va_arg(args, const char *);
     bool add_extra = va_arg(args, int);
     int expected_score = va_arg(args, int);
 
     if (add_extra) {
         int v;
         char *s = crm_strdup_printf("%s: %s", name, value);
         xmlNodePtr item_node = pcmk__output_create_xml_node(out, "li", NULL);
 
         if (value == NULL) {
             v = 0;
         } else {
             pcmk__scan_min_int(value, &v, INT_MIN);
         }
 
         pcmk_create_html_node(item_node, "span", NULL, NULL, s);
         free(s);
 
         if (v <= 0) {
             pcmk_create_html_node(item_node, "span", NULL, "bold", "(connectivity is lost)");
         } else if (v < expected_score) {
             char *buf = crm_strdup_printf("(connectivity is degraded -- expected %d", expected_score);
             pcmk_create_html_node(item_node, "span", NULL, "bold", buf);
             free(buf);
         }
     } else {
         out->list_item(out, NULL, "%s: %s", name, value);
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("node-and-op", "pe_working_set_t *", "xmlNodePtr")
 static int
 node_and_op(pcmk__output_t *out, va_list args) {
     pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
     xmlNodePtr xml_op = va_arg(args, xmlNodePtr);
 
     pe_resource_t *rsc = NULL;
     gchar *node_str = NULL;
     char *last_change_str = NULL;
 
     const char *op_rsc = crm_element_value(xml_op, "resource");
     const char *op_key = crm_element_value(xml_op, XML_LRM_ATTR_TASK_KEY);
     int status;
     time_t last_change = 0;
 
     pcmk__scan_min_int(crm_element_value(xml_op, XML_LRM_ATTR_OPSTATUS),
                        &status, PCMK_EXEC_UNKNOWN);
 
     rsc = pe_find_resource(data_set->resources, op_rsc);
 
     if (rsc) {
         const pe_node_t *node = pe__current_node(rsc);
         const char *target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
         uint32_t show_opts = pcmk_show_rsc_only | pcmk_show_pending;
 
         if (node == NULL) {
             node = rsc->pending_node;
         }
 
         node_str = pcmk__native_output_string(rsc, rsc_printable_id(rsc), node,
                                               show_opts, target_role, false);
     } else {
         node_str = crm_strdup_printf("Unknown resource %s", op_rsc);
     }
 
     if (crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE,
                                 &last_change) == pcmk_ok) {
         last_change_str = crm_strdup_printf(", %s='%s', exec=%sms",
                                             XML_RSC_OP_LAST_CHANGE,
                                             pcmk__trim(ctime(&last_change)),
                                             crm_element_value(xml_op, XML_RSC_OP_T_EXEC));
     }
 
     out->list_item(out, NULL, "%s: %s (node=%s, call=%s, rc=%s%s): %s",
                    node_str, op_key ? op_key : ID(xml_op),
                    crm_element_value(xml_op, XML_ATTR_UNAME),
                    crm_element_value(xml_op, XML_LRM_ATTR_CALLID),
                    crm_element_value(xml_op, XML_LRM_ATTR_RC),
                    last_change_str ? last_change_str : "",
                    pcmk_exec_status_str(status));
 
     g_free(node_str);
     free(last_change_str);
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("node-and-op", "pe_working_set_t *", "xmlNodePtr")
 static int
 node_and_op_xml(pcmk__output_t *out, va_list args) {
     pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
     xmlNodePtr xml_op = va_arg(args, xmlNodePtr);
 
     pe_resource_t *rsc = NULL;
     const char *op_rsc = crm_element_value(xml_op, "resource");
     const char *op_key = crm_element_value(xml_op, XML_LRM_ATTR_TASK_KEY);
     int status;
     time_t last_change = 0;
     xmlNode *node = NULL;
 
     pcmk__scan_min_int(crm_element_value(xml_op, XML_LRM_ATTR_OPSTATUS),
                        &status, PCMK_EXEC_UNKNOWN);
     node = pcmk__output_create_xml_node(out, "operation",
                                         "op", op_key ? op_key : ID(xml_op),
                                         "node", crm_element_value(xml_op, XML_ATTR_UNAME),
                                         "call", crm_element_value(xml_op, XML_LRM_ATTR_CALLID),
                                         "rc", crm_element_value(xml_op, XML_LRM_ATTR_RC),
                                         "status", pcmk_exec_status_str(status),
                                         NULL);
 
     rsc = pe_find_resource(data_set->resources, op_rsc);
 
     if (rsc) {
         const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
         const char *kind = crm_element_value(rsc->xml, XML_ATTR_TYPE);
         char *agent_tuple = NULL;
 
         agent_tuple = crm_strdup_printf("%s:%s:%s", class,
                                         pcmk_is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_provider) ? crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER) : "",
                                         kind);
 
         pcmk__xe_set_props(node, "rsc", rsc_printable_id(rsc),
                            "agent", agent_tuple,
                            NULL);
         free(agent_tuple);
     }
 
     if (crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE,
                                 &last_change) == pcmk_ok) {
         pcmk__xe_set_props(node, XML_RSC_OP_LAST_CHANGE,
                            pcmk__trim(ctime(&last_change)),
                            XML_RSC_OP_T_EXEC, crm_element_value(xml_op, XML_RSC_OP_T_EXEC),
                            NULL);
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("node-attribute", "const char *", "const char *", "bool", "int")
 static int
 node_attribute_xml(pcmk__output_t *out, va_list args) {
     const char *name = va_arg(args, const char *);
     const char *value = va_arg(args, const char *);
     bool add_extra = va_arg(args, int);
     int expected_score = va_arg(args, int);
 
     xmlNodePtr node = pcmk__output_create_xml_node(out, "attribute",
                                                    "name", name,
                                                    "value", value,
                                                    NULL);
 
     if (add_extra) {
         char *buf = pcmk__itoa(expected_score);
         crm_xml_add(node, "expected", buf);
         free(buf);
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("node-attribute-list", "pe_working_set_t *", "uint32_t",
                   "bool", "GList *", "GList *")
 static int
 node_attribute_list(pcmk__output_t *out, va_list args) {
     pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
     uint32_t show_opts = va_arg(args, uint32_t);
     bool print_spacer = va_arg(args, int);
     GList *only_node = va_arg(args, GList *);
     GList *only_rsc = va_arg(args, GList *);
 
     int rc = pcmk_rc_no_output;
 
     /* Display each node's attributes */
     for (GList *gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
         pe_node_t *node = gIter->data;
 
         GList *attr_list = NULL;
         GHashTableIter iter;
         gpointer key;
 
         if (!node || !node->details || !node->details->online) {
             continue;
         }
 
         g_hash_table_iter_init(&iter, node->details->attrs);
         while (g_hash_table_iter_next (&iter, &key, NULL)) {
             attr_list = filter_attr_list(attr_list, key);
         }
 
         if (attr_list == NULL) {
             continue;
         }
 
         if (!pcmk__str_in_list(node->details->uname, only_node, pcmk__str_star_matches|pcmk__str_casei)) {
             g_list_free(attr_list);
             continue;
         }
 
         PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc, "Node Attributes");
 
         out->message(out, "node", node, show_opts, false, only_node, only_rsc);
 
         for (GList *aIter = attr_list; aIter != NULL; aIter = aIter->next) {
             const char *name = aIter->data;
             const char *value = NULL;
             int expected_score = 0;
             bool add_extra = false;
 
             value = pe_node_attribute_raw(node, name);
 
             add_extra = add_extra_info(node, node->details->running_rsc,
                                        data_set, name, &expected_score);
 
             /* Print attribute name and value */
             out->message(out, "node-attribute", name, value, add_extra,
                          expected_score);
         }
 
         g_list_free(attr_list);
         out->end_list(out);
     }
 
     PCMK__OUTPUT_LIST_FOOTER(out, rc);
     return rc;
 }
 
 PCMK__OUTPUT_ARGS("node-capacity", "const pe_node_t *", "const char *")
 static int
 node_capacity(pcmk__output_t *out, va_list args)
 {
     const pe_node_t *node = va_arg(args, pe_node_t *);
     const char *comment = va_arg(args, const char *);
 
     char *dump_text = crm_strdup_printf("%s: %s capacity:",
                                         comment, pe__node_name(node));
 
     g_hash_table_foreach(node->details->utilization, append_dump_text, &dump_text);
     out->list_item(out, NULL, "%s", dump_text);
     free(dump_text);
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("node-capacity", "const pe_node_t *", "const char *")
 static int
 node_capacity_xml(pcmk__output_t *out, va_list args)
 {
     const pe_node_t *node = va_arg(args, pe_node_t *);
     const char *comment = va_arg(args, const char *);
 
     xmlNodePtr xml_node = pcmk__output_create_xml_node(out, "capacity",
                                                        "node", node->details->uname,
                                                        "comment", comment,
                                                        NULL);
     g_hash_table_foreach(node->details->utilization, add_dump_node, xml_node);
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("node-history-list", "pe_working_set_t *", "pe_node_t *", "xmlNodePtr",
                   "GList *", "GList *", "uint32_t", "uint32_t")
 static int
 node_history_list(pcmk__output_t *out, va_list args) {
     pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
     pe_node_t *node = va_arg(args, pe_node_t *);
     xmlNode *node_state = va_arg(args, xmlNode *);
     GList *only_node = va_arg(args, GList *);
     GList *only_rsc = va_arg(args, GList *);
     uint32_t section_opts = va_arg(args, uint32_t);
     uint32_t show_opts = va_arg(args, uint32_t);
 
     xmlNode *lrm_rsc = NULL;
     xmlNode *rsc_entry = NULL;
     int rc = pcmk_rc_no_output;
 
     lrm_rsc = find_xml_node(node_state, XML_CIB_TAG_LRM, FALSE);
     lrm_rsc = find_xml_node(lrm_rsc, XML_LRM_TAG_RESOURCES, FALSE);
 
     /* Print history of each of the node's resources */
     for (rsc_entry = first_named_child(lrm_rsc, XML_LRM_TAG_RESOURCE);
          rsc_entry != NULL; rsc_entry = crm_next_same_xml(rsc_entry)) {
         const char *rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID);
         pe_resource_t *rsc = pe_find_resource(data_set->resources, rsc_id);
         const pe_resource_t *parent = pe__const_top_resource(rsc, false);
 
         /* We can't use is_filtered here to filter group resources.  For is_filtered,
          * we have to decide whether to check the parent or not.  If we check the
          * parent, all elements of a group will always be printed because that's how
          * is_filtered works for groups.  If we do not check the parent, sometimes
          * this will filter everything out.
          *
          * For other resource types, is_filtered is okay.
          */
         if (parent->variant == pe_group) {
             if (!pcmk__str_in_list(rsc_printable_id(rsc), only_rsc,
                                    pcmk__str_star_matches)
                 && !pcmk__str_in_list(rsc_printable_id(parent), only_rsc,
                                       pcmk__str_star_matches)) {
                 continue;
             }
         } else {
             if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
                 continue;
             }
         }
 
         if (!pcmk_is_set(section_opts, pcmk_section_operations)) {
             time_t last_failure = 0;
             int failcount = pe_get_failcount(node, rsc, &last_failure, pe_fc_default,
                                              NULL);
 
             if (failcount <= 0) {
                 continue;
             }
 
             if (rc == pcmk_rc_no_output) {
                 rc = pcmk_rc_ok;
                 out->message(out, "node", node, show_opts, false, only_node,
                              only_rsc);
             }
 
             out->message(out, "resource-history", rsc, rsc_id, false,
                          failcount, last_failure, false);
         } else {
             GList *op_list = get_operation_list(rsc_entry);
             pe_resource_t *rsc = pe_find_resource(data_set->resources,
                                                   crm_element_value(rsc_entry, XML_ATTR_ID));
 
             if (op_list == NULL) {
                 continue;
             }
 
             if (rc == pcmk_rc_no_output) {
                 rc = pcmk_rc_ok;
                 out->message(out, "node", node, show_opts, false, only_node,
                              only_rsc);
             }
 
             out->message(out, "resource-operation-list", data_set, rsc, node,
                          op_list, show_opts);
         }
     }
 
     PCMK__OUTPUT_LIST_FOOTER(out, rc);
     return rc;
 }
 
 PCMK__OUTPUT_ARGS("node-list", "GList *", "GList *", "GList *", "uint32_t", "bool")
 static int
 node_list_html(pcmk__output_t *out, va_list args) {
     GList *nodes = va_arg(args, GList *);
     GList *only_node = va_arg(args, GList *);
     GList *only_rsc = va_arg(args, GList *);
     uint32_t show_opts = va_arg(args, uint32_t);
     bool print_spacer G_GNUC_UNUSED = va_arg(args, int);
 
     int rc = pcmk_rc_no_output;
 
     for (GList *gIter = nodes; gIter != NULL; gIter = gIter->next) {
         pe_node_t *node = (pe_node_t *) gIter->data;
 
         if (!pcmk__str_in_list(node->details->uname, only_node,
                                pcmk__str_star_matches|pcmk__str_casei)) {
             continue;
         }
 
         PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Node List");
 
         out->message(out, "node", node, show_opts, true, only_node, only_rsc);
     }
 
     PCMK__OUTPUT_LIST_FOOTER(out, rc);
     return rc;
 }
 
 PCMK__OUTPUT_ARGS("node-list", "GList *", "GList *", "GList *", "uint32_t", "bool")
 static int
 node_list_text(pcmk__output_t *out, va_list args) {
     GList *nodes = va_arg(args, GList *);
     GList *only_node = va_arg(args, GList *);
     GList *only_rsc = va_arg(args, GList *);
     uint32_t show_opts = va_arg(args, uint32_t);
     bool print_spacer = va_arg(args, int);
 
     /* space-separated lists of node names */
     GString *online_nodes = NULL;
     GString *online_remote_nodes = NULL;
     GString *online_guest_nodes = NULL;
     GString *offline_nodes = NULL;
     GString *offline_remote_nodes = NULL;
 
     int rc = pcmk_rc_no_output;
 
     for (GList *gIter = nodes; gIter != NULL; gIter = gIter->next) {
         pe_node_t *node = (pe_node_t *) gIter->data;
         char *node_name = pe__node_display_name(node, pcmk_is_set(show_opts, pcmk_show_node_id));
 
         if (!pcmk__str_in_list(node->details->uname, only_node,
                                pcmk__str_star_matches|pcmk__str_casei)) {
             free(node_name);
             continue;
         }
 
         PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc, "Node List");
 
         // Determine whether to display node individually or in a list
         if (node->details->unclean || node->details->pending
             || (node->details->standby_onfail && node->details->online)
             || node->details->standby || node->details->maintenance
             || pcmk_is_set(show_opts, pcmk_show_rscs_by_node)
             || pcmk_is_set(show_opts, pcmk_show_feature_set)
             || (pe__node_health(node) <= 0)) {
             // Display node individually
 
         } else if (node->details->online) {
             // Display online node in a list
             if (pe__is_guest_node(node)) {
                 pcmk__add_word(&online_guest_nodes, 1024, node_name);
 
             } else if (pe__is_remote_node(node)) {
                 pcmk__add_word(&online_remote_nodes, 1024, node_name);
 
             } else {
                 pcmk__add_word(&online_nodes, 1024, node_name);
             }
             free(node_name);
             continue;
 
         } else {
             // Display offline node in a list
             if (pe__is_remote_node(node)) {
                 pcmk__add_word(&offline_remote_nodes, 1024, node_name);
 
             } else if (pe__is_guest_node(node)) {
                 /* ignore offline guest nodes */
 
             } else {
                 pcmk__add_word(&offline_nodes, 1024, node_name);
             }
             free(node_name);
             continue;
         }
 
         /* If we get here, node is in bad state, or we're grouping by node */
         out->message(out, "node", node, show_opts, true, only_node, only_rsc);
         free(node_name);
     }
 
     /* If we're not grouping by node, summarize nodes by status */
     if (online_nodes != NULL) {
         out->list_item(out, "Online", "[ %s ]",
                        (const char *) online_nodes->str);
         g_string_free(online_nodes, TRUE);
     }
     if (offline_nodes != NULL) {
         out->list_item(out, "OFFLINE", "[ %s ]",
                        (const char *) offline_nodes->str);
         g_string_free(offline_nodes, TRUE);
     }
     if (online_remote_nodes) {
         out->list_item(out, "RemoteOnline", "[ %s ]",
                        (const char *) online_remote_nodes->str);
         g_string_free(online_remote_nodes, TRUE);
     }
     if (offline_remote_nodes) {
         out->list_item(out, "RemoteOFFLINE", "[ %s ]",
                        (const char *) offline_remote_nodes->str);
         g_string_free(offline_remote_nodes, TRUE);
     }
     if (online_guest_nodes != NULL) {
         out->list_item(out, "GuestOnline", "[ %s ]",
                        (const char *) online_guest_nodes->str);
         g_string_free(online_guest_nodes, TRUE);
     }
 
     PCMK__OUTPUT_LIST_FOOTER(out, rc);
     return rc;
 }
 
 PCMK__OUTPUT_ARGS("node-list", "GList *", "GList *", "GList *", "uint32_t", "bool")
 static int
 node_list_xml(pcmk__output_t *out, va_list args) {
     GList *nodes = va_arg(args, GList *);
     GList *only_node = va_arg(args, GList *);
     GList *only_rsc = va_arg(args, GList *);
     uint32_t show_opts = va_arg(args, uint32_t);
     bool print_spacer G_GNUC_UNUSED = va_arg(args, int);
 
     out->begin_list(out, NULL, NULL, "nodes");
     for (GList *gIter = nodes; gIter != NULL; gIter = gIter->next) {
         pe_node_t *node = (pe_node_t *) gIter->data;
 
         if (!pcmk__str_in_list(node->details->uname, only_node,
                                pcmk__str_star_matches|pcmk__str_casei)) {
             continue;
         }
 
         out->message(out, "node", node, show_opts, true, only_node, only_rsc);
     }
     out->end_list(out);
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("node-summary", "pe_working_set_t *", "GList *", "GList *",
                   "uint32_t", "uint32_t", "bool")
 static int
 node_summary(pcmk__output_t *out, va_list args) {
     pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
     GList *only_node = va_arg(args, GList *);
     GList *only_rsc = va_arg(args, GList *);
     uint32_t section_opts = va_arg(args, uint32_t);
     uint32_t show_opts = va_arg(args, uint32_t);
     bool print_spacer = va_arg(args, int);
 
     xmlNode *node_state = NULL;
     xmlNode *cib_status = pcmk_find_cib_element(data_set->input,
                                                 XML_CIB_TAG_STATUS);
     int rc = pcmk_rc_no_output;
 
     if (xmlChildElementCount(cib_status) == 0) {
         return rc;
     }
 
     for (node_state = first_named_child(cib_status, XML_CIB_TAG_STATE);
          node_state != NULL; node_state = crm_next_same_xml(node_state)) {
         pe_node_t *node = pe_find_node_id(data_set->nodes, ID(node_state));
 
         if (!node || !node->details || !node->details->online) {
             continue;
         }
 
         if (!pcmk__str_in_list(node->details->uname, only_node,
                                pcmk__str_star_matches|pcmk__str_casei)) {
             continue;
         }
 
         PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc,
                                  pcmk_is_set(section_opts, pcmk_section_operations) ? "Operations" : "Migration Summary");
 
         out->message(out, "node-history-list", data_set, node, node_state,
                      only_node, only_rsc, section_opts, show_opts);
     }
 
     PCMK__OUTPUT_LIST_FOOTER(out, rc);
     return rc;
 }
 
 PCMK__OUTPUT_ARGS("node-weight", "const pe_resource_t *", "const char *",
                   "const char *", "const char *")
 static int
 node_weight(pcmk__output_t *out, va_list args)
 {
     const pe_resource_t *rsc = va_arg(args, const pe_resource_t *);
     const char *prefix = va_arg(args, const char *);
     const char *uname = va_arg(args, const char *);
     const char *score = va_arg(args, const char *);
 
     if (rsc) {
         out->list_item(out, NULL, "%s: %s allocation score on %s: %s",
                        prefix, rsc->id, uname, score);
     } else {
         out->list_item(out, NULL, "%s: %s = %s", prefix, uname, score);
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("node-weight", "const pe_resource_t *", "const char *",
                   "const char *", "const char *")
 static int
 node_weight_xml(pcmk__output_t *out, va_list args)
 {
     const pe_resource_t *rsc = va_arg(args, const pe_resource_t *);
     const char *prefix = va_arg(args, const char *);
     const char *uname = va_arg(args, const char *);
     const char *score = va_arg(args, const char *);
 
     xmlNodePtr node = pcmk__output_create_xml_node(out, "node_weight",
                                                    "function", prefix,
                                                    "node", uname,
                                                    "score", score,
                                                    NULL);
 
     if (rsc) {
         crm_xml_add(node, "id", rsc->id);
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("op-history", "xmlNodePtr", "const char *", "const char *", "int", "uint32_t")
 static int
 op_history_text(pcmk__output_t *out, va_list args) {
     xmlNodePtr xml_op = va_arg(args, xmlNodePtr);
     const char *task = va_arg(args, const char *);
     const char *interval_ms_s = va_arg(args, const char *);
     int rc = va_arg(args, int);
     uint32_t show_opts = va_arg(args, uint32_t);
 
     char *buf = op_history_string(xml_op, task, interval_ms_s, rc,
                                   pcmk_is_set(show_opts, pcmk_show_timing));
 
     out->list_item(out, NULL, "%s", buf);
 
     free(buf);
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("op-history", "xmlNodePtr", "const char *", "const char *", "int", "uint32_t")
 static int
 op_history_xml(pcmk__output_t *out, va_list args) {
     xmlNodePtr xml_op = va_arg(args, xmlNodePtr);
     const char *task = va_arg(args, const char *);
     const char *interval_ms_s = va_arg(args, const char *);
     int rc = va_arg(args, int);
     uint32_t show_opts = va_arg(args, uint32_t);
 
     char *rc_s = pcmk__itoa(rc);
     xmlNodePtr node = pcmk__output_create_xml_node(out, "operation_history",
                                                    "call", crm_element_value(xml_op, XML_LRM_ATTR_CALLID),
                                                    "task", task,
                                                    "rc", rc_s,
                                                    "rc_text", services_ocf_exitcode_str(rc),
                                                    NULL);
     free(rc_s);
 
     if (interval_ms_s && !pcmk__str_eq(interval_ms_s, "0", pcmk__str_casei)) {
         char *s = crm_strdup_printf("%sms", interval_ms_s);
         crm_xml_add(node, "interval", s);
         free(s);
     }
 
     if (pcmk_is_set(show_opts, pcmk_show_timing)) {
         const char *value = NULL;
         time_t epoch = 0;
 
         if ((crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE,
                                      &epoch) == pcmk_ok) && (epoch > 0)) {
             char *s = pcmk__epoch2str(&epoch, 0);
             crm_xml_add(node, XML_RSC_OP_LAST_CHANGE, s);
             free(s);
         }
 
         value = crm_element_value(xml_op, XML_RSC_OP_T_EXEC);
         if (value) {
             char *s = crm_strdup_printf("%sms", value);
             crm_xml_add(node, XML_RSC_OP_T_EXEC, s);
             free(s);
         }
         value = crm_element_value(xml_op, XML_RSC_OP_T_QUEUE);
         if (value) {
             char *s = crm_strdup_printf("%sms", value);
             crm_xml_add(node, XML_RSC_OP_T_QUEUE, s);
             free(s);
         }
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("promotion-score", "pe_resource_t *", "pe_node_t *", "const char *")
 static int
 promotion_score(pcmk__output_t *out, va_list args)
 {
     pe_resource_t *child_rsc = va_arg(args, pe_resource_t *);
     pe_node_t *chosen = va_arg(args, pe_node_t *);
     const char *score = va_arg(args, const char *);
 
     out->list_item(out, NULL, "%s promotion score on %s: %s",
                    child_rsc->id,
                    chosen? chosen->details->uname : "none",
                    score);
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("promotion-score", "pe_resource_t *", "pe_node_t *", "const char *")
 static int
 promotion_score_xml(pcmk__output_t *out, va_list args)
 {
     pe_resource_t *child_rsc = va_arg(args, pe_resource_t *);
     pe_node_t *chosen = va_arg(args, pe_node_t *);
     const char *score = va_arg(args, const char *);
 
     xmlNodePtr node = pcmk__output_create_xml_node(out, "promotion_score",
                                                    "id", child_rsc->id,
                                                    "score", score,
                                                    NULL);
 
     if (chosen) {
         crm_xml_add(node, "node", chosen->details->uname);
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("resource-config", "pe_resource_t *", "bool")
 static int
 resource_config(pcmk__output_t *out, va_list args) {
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     bool raw = va_arg(args, int);
 
     char *rsc_xml = formatted_xml_buf(rsc, raw);
 
     out->output_xml(out, "xml", rsc_xml);
 
     free(rsc_xml);
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("resource-config", "pe_resource_t *", "bool")
 static int
 resource_config_text(pcmk__output_t *out, va_list args) {
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     bool raw = va_arg(args, int);
 
     char *rsc_xml = formatted_xml_buf(rsc, raw);
 
     pcmk__formatted_printf(out, "Resource XML:\n");
     out->output_xml(out, "xml", rsc_xml);
 
     free(rsc_xml);
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("resource-history", "pe_resource_t *", "const char *", "bool", "int", "time_t", "bool")
 static int
 resource_history_text(pcmk__output_t *out, va_list args) {
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     const char *rsc_id = va_arg(args, const char *);
     bool all = va_arg(args, int);
     int failcount = va_arg(args, int);
     time_t last_failure = va_arg(args, time_t);
     bool as_header = va_arg(args, int);
 
     char *buf = resource_history_string(rsc, rsc_id, all, failcount, last_failure);
 
     if (as_header) {
         out->begin_list(out, NULL, NULL, "%s", buf);
     } else {
         out->list_item(out, NULL, "%s", buf);
     }
 
     free(buf);
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("resource-history", "pe_resource_t *", "const char *", "bool", "int", "time_t", "bool")
 static int
 resource_history_xml(pcmk__output_t *out, va_list args) {
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     const char *rsc_id = va_arg(args, const char *);
     bool all = va_arg(args, int);
     int failcount = va_arg(args, int);
     time_t last_failure = va_arg(args, time_t);
     bool as_header = va_arg(args, int);
 
     xmlNodePtr node = pcmk__output_xml_create_parent(out, "resource_history",
                                                      "id", rsc_id,
                                                      NULL);
 
     if (rsc == NULL) {
         pcmk__xe_set_bool_attr(node, "orphan", true);
     } else if (all || failcount || last_failure > 0) {
         char *migration_s = pcmk__itoa(rsc->migration_threshold);
 
         pcmk__xe_set_props(node, "orphan", "false",
                            "migration-threshold", migration_s,
                            NULL);
         free(migration_s);
 
         if (failcount > 0) {
             char *s = pcmk__itoa(failcount);
 
             crm_xml_add(node, PCMK__FAIL_COUNT_PREFIX, s);
             free(s);
         }
 
         if (last_failure > 0) {
             char *s = pcmk__epoch2str(&last_failure, 0);
 
             crm_xml_add(node, PCMK__LAST_FAILURE_PREFIX, s);
             free(s);
         }
     }
 
     if (!as_header) {
         pcmk__output_xml_pop_parent(out);
     }
 
     return pcmk_rc_ok;
 }
 
 static void
 print_resource_header(pcmk__output_t *out, uint32_t show_opts)
 {
     if (pcmk_is_set(show_opts, pcmk_show_rscs_by_node)) {
         /* Active resources have already been printed by node */
         out->begin_list(out, NULL, NULL, "Inactive Resources");
     } else if (pcmk_is_set(show_opts, pcmk_show_inactive_rscs)) {
         out->begin_list(out, NULL, NULL, "Full List of Resources");
     } else {
         out->begin_list(out, NULL, NULL, "Active Resources");
     }
 }
 
 
 PCMK__OUTPUT_ARGS("resource-list", "pe_working_set_t *", "uint32_t", "bool",
                   "GList *", "GList *", "bool")
 static int
 resource_list(pcmk__output_t *out, va_list args)
 {
     pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
     uint32_t show_opts = va_arg(args, uint32_t);
     bool print_summary = va_arg(args, int);
     GList *only_node = va_arg(args, GList *);
     GList *only_rsc = va_arg(args, GList *);
     bool print_spacer = va_arg(args, int);
 
     GList *rsc_iter;
     int rc = pcmk_rc_no_output;
     bool printed_header = false;
 
     /* If we already showed active resources by node, and
      * we're not showing inactive resources, we have nothing to do
      */
     if (pcmk_is_set(show_opts, pcmk_show_rscs_by_node) &&
         !pcmk_is_set(show_opts, pcmk_show_inactive_rscs)) {
         return rc;
     }
 
     /* If we haven't already printed resources grouped by node,
      * and brief output was requested, print resource summary */
     if (pcmk_is_set(show_opts, pcmk_show_brief) && !pcmk_is_set(show_opts, pcmk_show_rscs_by_node)) {
         GList *rscs = pe__filter_rsc_list(data_set->resources, only_rsc);
 
         PCMK__OUTPUT_SPACER_IF(out, print_spacer);
         print_resource_header(out, show_opts);
         printed_header = true;
 
         rc = pe__rscs_brief_output(out, rscs, show_opts);
         g_list_free(rscs);
     }
 
     /* For each resource, display it if appropriate */
     for (rsc_iter = data_set->resources; rsc_iter != NULL; rsc_iter = rsc_iter->next) {
         pe_resource_t *rsc = (pe_resource_t *) rsc_iter->data;
         int x;
 
         /* Complex resources may have some sub-resources active and some inactive */
         gboolean is_active = rsc->fns->active(rsc, TRUE);
         gboolean partially_active = rsc->fns->active(rsc, FALSE);
 
         /* Skip inactive orphans (deleted but still in CIB) */
         if (pcmk_is_set(rsc->flags, pe_rsc_orphan) && !is_active) {
             continue;
 
         /* Skip active resources if we already displayed them by node */
         } else if (pcmk_is_set(show_opts, pcmk_show_rscs_by_node)) {
             if (is_active) {
                 continue;
             }
 
         /* Skip primitives already counted in a brief summary */
         } else if (pcmk_is_set(show_opts, pcmk_show_brief) && (rsc->variant == pe_native)) {
             continue;
 
         /* Skip resources that aren't at least partially active,
          * unless we're displaying inactive resources
          */
         } else if (!partially_active && !pcmk_is_set(show_opts, pcmk_show_inactive_rscs)) {
             continue;
 
         } else if (partially_active && !pe__rsc_running_on_any(rsc, only_node)) {
             continue;
         }
 
         if (!printed_header) {
             PCMK__OUTPUT_SPACER_IF(out, print_spacer);
             print_resource_header(out, show_opts);
             printed_header = true;
         }
 
         /* Print this resource */
         x = out->message(out, crm_map_element_name(rsc->xml), show_opts, rsc,
                          only_node, only_rsc);
         if (x == pcmk_rc_ok) {
             rc = pcmk_rc_ok;
         }
     }
 
     if (print_summary && rc != pcmk_rc_ok) {
         if (!printed_header) {
             PCMK__OUTPUT_SPACER_IF(out, print_spacer);
             print_resource_header(out, show_opts);
             printed_header = true;
         }
 
         if (pcmk_is_set(show_opts, pcmk_show_rscs_by_node)) {
             out->list_item(out, NULL, "No inactive resources");
         } else if (pcmk_is_set(show_opts, pcmk_show_inactive_rscs)) {
             out->list_item(out, NULL, "No resources");
         } else {
             out->list_item(out, NULL, "No active resources");
         }
     }
 
     if (printed_header) {
         out->end_list(out);
     }
 
     return rc;
 }
 
 PCMK__OUTPUT_ARGS("resource-operation-list", "pe_working_set_t *", "pe_resource_t *",
                   "pe_node_t *", "GList *", "uint32_t")
 static int
 resource_operation_list(pcmk__output_t *out, va_list args)
 {
     pe_working_set_t *data_set G_GNUC_UNUSED = va_arg(args, pe_working_set_t *);
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     pe_node_t *node = va_arg(args, pe_node_t *);
     GList *op_list = va_arg(args, GList *);
     uint32_t show_opts = va_arg(args, uint32_t);
 
     GList *gIter = NULL;
     int rc = pcmk_rc_no_output;
 
     /* Print each operation */
     for (gIter = op_list; gIter != NULL; gIter = gIter->next) {
         xmlNode *xml_op = (xmlNode *) gIter->data;
         const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
         const char *interval_ms_s = crm_element_value(xml_op,
                                                       XML_LRM_ATTR_INTERVAL_MS);
         const char *op_rc = crm_element_value(xml_op, XML_LRM_ATTR_RC);
         int op_rc_i;
 
         pcmk__scan_min_int(op_rc, &op_rc_i, 0);
 
         /* Display 0-interval monitors as "probe" */
         if (pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_casei)
             && pcmk__str_eq(interval_ms_s, "0", pcmk__str_null_matches | pcmk__str_casei)) {
             task = "probe";
         }
 
         /* If this is the first printed operation, print heading for resource */
         if (rc == pcmk_rc_no_output) {
             time_t last_failure = 0;
             int failcount = pe_get_failcount(node, rsc, &last_failure, pe_fc_default,
                                              NULL);
 
             out->message(out, "resource-history", rsc, rsc_printable_id(rsc), true,
                          failcount, last_failure, true);
             rc = pcmk_rc_ok;
         }
 
         /* Print the operation */
         out->message(out, "op-history", xml_op, task, interval_ms_s,
                      op_rc_i, show_opts);
     }
 
     /* Free the list we created (no need to free the individual items) */
     g_list_free(op_list);
 
     PCMK__OUTPUT_LIST_FOOTER(out, rc);
     return rc;
 }
 
 PCMK__OUTPUT_ARGS("resource-util", "pe_resource_t *", "pe_node_t *", "const char *")
 static int
 resource_util(pcmk__output_t *out, va_list args)
 {
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     pe_node_t *node = va_arg(args, pe_node_t *);
     const char *fn = va_arg(args, const char *);
 
     char *dump_text = crm_strdup_printf("%s: %s utilization on %s:",
                                         fn, rsc->id, pe__node_name(node));
 
     g_hash_table_foreach(rsc->utilization, append_dump_text, &dump_text);
     out->list_item(out, NULL, "%s", dump_text);
     free(dump_text);
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("resource-util", "pe_resource_t *", "pe_node_t *", "const char *")
 static int
 resource_util_xml(pcmk__output_t *out, va_list args)
 {
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     pe_node_t *node = va_arg(args, pe_node_t *);
     const char *fn = va_arg(args, const char *);
 
     xmlNodePtr xml_node = pcmk__output_create_xml_node(out, "utilization",
                                                        "resource", rsc->id,
                                                        "node", node->details->uname,
                                                        "function", fn,
                                                        NULL);
     g_hash_table_foreach(rsc->utilization, add_dump_node, xml_node);
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("ticket", "pe_ticket_t *")
 static int
 ticket_html(pcmk__output_t *out, va_list args) {
     pe_ticket_t *ticket = va_arg(args, pe_ticket_t *);
 
     if (ticket->last_granted > -1) {
         char *epoch_str = pcmk__epoch2str(&(ticket->last_granted), 0);
 
         out->list_item(out, NULL, "%s:\t%s%s %s=\"%s\"", ticket->id,
                        ticket->granted ? "granted" : "revoked",
                        ticket->standby ? " [standby]" : "",
                        "last-granted", pcmk__s(epoch_str, ""));
         free(epoch_str);
     } else {
         out->list_item(out, NULL, "%s:\t%s%s", ticket->id,
                        ticket->granted ? "granted" : "revoked",
                        ticket->standby ? " [standby]" : "");
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("ticket", "pe_ticket_t *")
 static int
 ticket_text(pcmk__output_t *out, va_list args) {
     pe_ticket_t *ticket = va_arg(args, pe_ticket_t *);
 
     if (ticket->last_granted > -1) {
         char *epoch_str = pcmk__epoch2str(&(ticket->last_granted), 0);
 
         out->list_item(out, ticket->id, "%s%s %s=\"%s\"",
                        ticket->granted ? "granted" : "revoked",
                        ticket->standby ? " [standby]" : "",
                        "last-granted", pcmk__s(epoch_str, ""));
         free(epoch_str);
     } else {
         out->list_item(out, ticket->id, "%s%s",
                        ticket->granted ? "granted" : "revoked",
                        ticket->standby ? " [standby]" : "");
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("ticket", "pe_ticket_t *")
 static int
 ticket_xml(pcmk__output_t *out, va_list args) {
     pe_ticket_t *ticket = va_arg(args, pe_ticket_t *);
 
     xmlNodePtr node = NULL;
 
     node = pcmk__output_create_xml_node(out, "ticket",
                                         "id", ticket->id,
                                         "status", ticket->granted ? "granted" : "revoked",
                                         "standby", pcmk__btoa(ticket->standby),
                                         NULL);
 
     if (ticket->last_granted > -1) {
         char *buf = pcmk__epoch2str(&ticket->last_granted, 0);
 
         crm_xml_add(node, "last-granted", buf);
         free(buf);
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("ticket-list", "pe_working_set_t *", "bool")
 static int
 ticket_list(pcmk__output_t *out, va_list args) {
     pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
     bool print_spacer = va_arg(args, int);
 
     GHashTableIter iter;
     gpointer key, value;
 
     if (g_hash_table_size(data_set->tickets) == 0) {
         return pcmk_rc_no_output;
     }
 
     PCMK__OUTPUT_SPACER_IF(out, print_spacer);
 
     /* Print section heading */
     out->begin_list(out, NULL, NULL, "Tickets");
 
     /* Print each ticket */
     g_hash_table_iter_init(&iter, data_set->tickets);
     while (g_hash_table_iter_next(&iter, &key, &value)) {
         pe_ticket_t *ticket = (pe_ticket_t *) value;
         out->message(out, "ticket", ticket);
     }
 
     /* Close section */
     out->end_list(out);
     return pcmk_rc_ok;
 }
 
 static pcmk__message_entry_t fmt_functions[] = {
     { "ban", "default", ban_text },
     { "ban", "html", ban_html },
     { "ban", "xml", ban_xml },
     { "ban-list", "default", ban_list },
     { "bundle", "default", pe__bundle_text },
     { "bundle", "xml",  pe__bundle_xml },
     { "bundle", "html",  pe__bundle_html },
     { "clone", "default", pe__clone_default },
     { "clone", "xml",  pe__clone_xml },
     { "cluster-counts", "default", cluster_counts_text },
     { "cluster-counts", "html", cluster_counts_html },
     { "cluster-counts", "xml", cluster_counts_xml },
     { "cluster-dc", "default", cluster_dc_text },
     { "cluster-dc", "html", cluster_dc_html },
     { "cluster-dc", "xml", cluster_dc_xml },
     { "cluster-options", "default", cluster_options_text },
     { "cluster-options", "html", cluster_options_html },
     { "cluster-options", "log", cluster_options_log },
     { "cluster-options", "xml", cluster_options_xml },
     { "cluster-summary", "default", cluster_summary },
     { "cluster-summary", "html", cluster_summary_html },
     { "cluster-stack", "default", cluster_stack_text },
     { "cluster-stack", "html", cluster_stack_html },
     { "cluster-stack", "xml", cluster_stack_xml },
     { "cluster-times", "default", cluster_times_text },
     { "cluster-times", "html", cluster_times_html },
     { "cluster-times", "xml", cluster_times_xml },
     { "failed-action", "default", failed_action_default },
     { "failed-action", "xml", failed_action_xml },
     { "failed-action-list", "default", failed_action_list },
     { "group", "default",  pe__group_default},
     { "group", "xml",  pe__group_xml },
     { "maint-mode", "text", cluster_maint_mode_text },
     { "node", "default", node_text },
     { "node", "html", node_html },
     { "node", "xml", node_xml },
     { "node-and-op", "default", node_and_op },
     { "node-and-op", "xml", node_and_op_xml },
     { "node-capacity", "default", node_capacity },
     { "node-capacity", "xml", node_capacity_xml },
     { "node-history-list", "default", node_history_list },
     { "node-list", "default", node_list_text },
     { "node-list", "html", node_list_html },
     { "node-list", "xml", node_list_xml },
     { "node-weight", "default", node_weight },
     { "node-weight", "xml", node_weight_xml },
     { "node-attribute", "default", node_attribute_text },
     { "node-attribute", "html", node_attribute_html },
     { "node-attribute", "xml", node_attribute_xml },
     { "node-attribute-list", "default", node_attribute_list },
     { "node-summary", "default", node_summary },
     { "op-history", "default", op_history_text },
     { "op-history", "xml", op_history_xml },
     { "primitive", "default",  pe__resource_text },
     { "primitive", "xml",  pe__resource_xml },
     { "primitive", "html",  pe__resource_html },
     { "promotion-score", "default", promotion_score },
     { "promotion-score", "xml", promotion_score_xml },
     { "resource-config", "default", resource_config },
     { "resource-config", "text", resource_config_text },
     { "resource-history", "default", resource_history_text },
     { "resource-history", "xml", resource_history_xml },
     { "resource-list", "default", resource_list },
     { "resource-operation-list", "default", resource_operation_list },
     { "resource-util", "default", resource_util },
     { "resource-util", "xml", resource_util_xml },
     { "ticket", "default", ticket_text },
     { "ticket", "html", ticket_html },
     { "ticket", "xml", ticket_xml },
     { "ticket-list", "default", ticket_list },
 
     { NULL, NULL, NULL }
 };
 
 void
 pe__register_messages(pcmk__output_t *out) {
     pcmk__register_messages(out, fmt_functions);
 }
diff --git a/tools/crm_mon.c b/tools/crm_mon.c
index 7065dacdb5..0d356a3211 100644
--- a/tools/crm_mon.c
+++ b/tools/crm_mon.c
@@ -1,2180 +1,2190 @@
 /*
  * Copyright 2004-2022 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <sys/param.h>
 
 #include <crm/crm.h>
 
 #include <stdint.h>
 #include <stdio.h>
 #include <sys/types.h>
 #include <sys/stat.h>
 #include <unistd.h>
 
 #include <stdlib.h>
 #include <errno.h>
 #include <fcntl.h>
 #include <libgen.h>
 #include <signal.h>
 #include <sys/utsname.h>
 
 #include <crm/msg_xml.h>
 #include <crm/services.h>
 #include <crm/lrmd.h>
 #include <crm/common/cmdline_internal.h>
 #include <crm/common/internal.h>  // pcmk__ends_with_ext()
 #include <crm/common/ipc.h>
 #include <crm/common/mainloop.h>
 #include <crm/common/output.h>
 #include <crm/common/output_internal.h>
 #include <crm/common/results.h>
 #include <crm/common/util.h>
 #include <crm/common/xml.h>
 #include <crm/common/xml_internal.h>
 
 #include <crm/cib/internal.h>
 #include <crm/pengine/status.h>
 #include <crm/pengine/internal.h>
 #include <pacemaker-internal.h>
 #include <crm/stonith-ng.h>
 #include <crm/fencing/internal.h>
 
 #include "crm_mon.h"
 
 #define SUMMARY "Provides a summary of cluster's current state.\n\n" \
                 "Outputs varying levels of detail in a number of different formats."
 
 /*
  * Definitions indicating which items to print
  */
 
 static uint32_t show;
 static uint32_t show_opts = pcmk_show_pending;
 
 /*
  * Definitions indicating how to output
  */
 
 static mon_output_format_t output_format = mon_output_unset;
 
 /* other globals */
 static GIOChannel *io_channel = NULL;
 static GMainLoop *mainloop = NULL;
 static guint reconnect_timer = 0;
 static mainloop_timer_t *refresh_timer = NULL;
 
 static enum pcmk_pacemakerd_state pcmkd_state = pcmk_pacemakerd_state_invalid;
 static cib_t *cib = NULL;
 static stonith_t *st = NULL;
 static xmlNode *current_cib = NULL;
 
 static GError *error = NULL;
 static pcmk__common_args_t *args = NULL;
 static pcmk__output_t *out = NULL;
 static GOptionContext *context = NULL;
 static gchar **processed_args = NULL;
 
 static time_t last_refresh = 0;
 volatile crm_trigger_t *refresh_trigger = NULL;
 
 static enum pcmk__fence_history fence_history = pcmk__fence_history_none;
 
 int interactive_fence_level = 0;
 
 static pcmk__supported_format_t formats[] = {
 #if CURSES_ENABLED
     CRM_MON_SUPPORTED_FORMAT_CURSES,
 #endif
     PCMK__SUPPORTED_FORMAT_HTML,
     PCMK__SUPPORTED_FORMAT_NONE,
     PCMK__SUPPORTED_FORMAT_TEXT,
     PCMK__SUPPORTED_FORMAT_XML,
     { NULL, NULL, NULL }
 };
 
 PCMK__OUTPUT_ARGS("crm-mon-disconnected", "const char *", "int")
 static int
 crm_mon_disconnected_default(pcmk__output_t *out, va_list args)
 {
     return pcmk_rc_no_output;
 }
 
 PCMK__OUTPUT_ARGS("crm-mon-disconnected", "const char *", "int")
 static int
 crm_mon_disconnected_html(pcmk__output_t *out, va_list args)
 {
     const char *desc = va_arg(args, const char *);
     enum pcmk_pacemakerd_state state =
         (enum pcmk_pacemakerd_state) va_arg(args, int);
 
     if (out->dest != stdout) {
         out->reset(out);
     }
 
     pcmk__output_create_xml_text_node(out, "span", "Not connected to CIB");
 
     if (desc != NULL) {
         pcmk__output_create_xml_text_node(out, "span", ": ");
         pcmk__output_create_xml_text_node(out, "span", desc);
     }
 
     if (state != pcmk_pacemakerd_state_invalid) {
         const char *state_s = pcmk__pcmkd_state_enum2friendly(state);
 
         pcmk__output_create_xml_text_node(out, "span", " (");
         pcmk__output_create_xml_text_node(out, "span", state_s);
         pcmk__output_create_xml_text_node(out, "span", ")");
     }
 
     out->finish(out, CRM_EX_DISCONNECT, true, NULL);
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("crm-mon-disconnected", "const char *", "int")
 static int
 crm_mon_disconnected_text(pcmk__output_t *out, va_list args)
 {
     const char *desc = va_arg(args, const char *);
     enum pcmk_pacemakerd_state state =
         (enum pcmk_pacemakerd_state) va_arg(args, int);
     int rc = pcmk_rc_ok;
 
     if (out->dest != stdout) {
         out->reset(out);
     }
 
     if (state != pcmk_pacemakerd_state_invalid) {
         rc = out->info(out, "Not connected to CIB%s%s (%s)",
                        (desc != NULL)? ": " : "", pcmk__s(desc, ""),
                        pcmk__pcmkd_state_enum2friendly(state));
     } else {
         rc = out->info(out, "Not connected to CIB%s%s",
                        (desc != NULL)? ": " : "", pcmk__s(desc, ""));
     }
 
     out->finish(out, CRM_EX_DISCONNECT, true, NULL);
     return rc;
 }
 
 PCMK__OUTPUT_ARGS("crm-mon-disconnected", "const char *", "int")
 static int
 crm_mon_disconnected_xml(pcmk__output_t *out, va_list args)
 {
     const char *desc = va_arg(args, const char *);
     enum pcmk_pacemakerd_state state =
         (enum pcmk_pacemakerd_state) va_arg(args, int);
     const char *state_s = NULL;
 
     if (out->dest != stdout) {
         out->reset(out);
     }
 
     if (state != pcmk_pacemakerd_state_invalid) {
         state_s = pcmk_pacemakerd_api_daemon_state_enum2text(state);
     }
 
     pcmk__output_create_xml_node(out, "crm-mon-disconnected",
                                  XML_ATTR_DESC, desc,
                                  "pacemakerd-state", state_s,
                                  NULL);
 
     out->finish(out, CRM_EX_DISCONNECT, true, NULL);
     return pcmk_rc_ok;
 }
 
 static pcmk__message_entry_t fmt_functions[] = {
     { "crm-mon-disconnected", "default", crm_mon_disconnected_default },
     { "crm-mon-disconnected", "html", crm_mon_disconnected_html },
     { "crm-mon-disconnected", "text", crm_mon_disconnected_text },
     { "crm-mon-disconnected", "xml", crm_mon_disconnected_xml },
     { NULL, NULL, NULL },
 };
 
 /* Define exit codes for monitoring-compatible output
  * For nagios plugins, the possibilities are
  * OK=0, WARN=1, CRIT=2, and UNKNOWN=3
  */
 #define MON_STATUS_WARN    CRM_EX_ERROR
 #define MON_STATUS_CRIT    CRM_EX_INVALID_PARAM
 #define MON_STATUS_UNKNOWN CRM_EX_UNIMPLEMENT_FEATURE
 
 #define RECONNECT_MSECS 5000
 
 struct {
     guint reconnect_ms;
     gboolean daemonize;
     gboolean fence_connect;
     gboolean one_shot;
     gboolean print_pending;
     gboolean show_bans;
     gboolean watch_fencing;
     char *pid_file;
     char *external_agent;
     char *external_recipient;
     char *neg_location_prefix;
     char *only_node;
     char *only_rsc;
     GSList *user_includes_excludes;
     GSList *includes_excludes;
 } options = {
     .fence_connect = TRUE,
     .reconnect_ms = RECONNECT_MSECS
 };
 
 static crm_exit_t clean_up(crm_exit_t exit_code);
 static void crm_diff_update(const char *event, xmlNode * msg);
 static void clean_up_on_connection_failure(int rc);
 static int mon_refresh_display(gpointer user_data);
 static int setup_cib_connection(void);
 static int setup_fencer_connection(void);
 static int setup_api_connections(void);
 static void mon_st_callback_event(stonith_t * st, stonith_event_t * e);
 static void mon_st_callback_display(stonith_t * st, stonith_event_t * e);
 static void refresh_after_event(gboolean data_updated, gboolean enforce);
 
 static uint32_t
 all_includes(mon_output_format_t fmt) {
     if (fmt == mon_output_monitor || fmt == mon_output_plain || fmt == mon_output_console) {
         return ~pcmk_section_options;
     } else {
         return pcmk_section_all;
     }
 }
 
 static uint32_t
 default_includes(mon_output_format_t fmt) {
     switch (fmt) {
         case mon_output_monitor:
         case mon_output_plain:
         case mon_output_console:
         case mon_output_html:
         case mon_output_cgi:
             return pcmk_section_summary
                    |pcmk_section_nodes
                    |pcmk_section_resources
                    |pcmk_section_failures;
 
         case mon_output_xml:
         case mon_output_legacy_xml:
             return all_includes(fmt);
 
         default:
             return 0;
     }
 }
 
 struct {
     const char *name;
     uint32_t bit;
 } sections[] = {
     { "attributes", pcmk_section_attributes },
     { "bans", pcmk_section_bans },
     { "counts", pcmk_section_counts },
     { "dc", pcmk_section_dc },
     { "failcounts", pcmk_section_failcounts },
     { "failures", pcmk_section_failures },
     { PCMK__VALUE_FENCING, pcmk_section_fencing_all },
     { "fencing-failed", pcmk_section_fence_failed },
     { "fencing-pending", pcmk_section_fence_pending },
     { "fencing-succeeded", pcmk_section_fence_worked },
     { "maint-mode", pcmk_section_maint_mode },
     { "nodes", pcmk_section_nodes },
     { "operations", pcmk_section_operations },
     { "options", pcmk_section_options },
     { "resources", pcmk_section_resources },
     { "stack", pcmk_section_stack },
     { "summary", pcmk_section_summary },
     { "tickets", pcmk_section_tickets },
     { "times", pcmk_section_times },
     { NULL }
 };
 
 static uint32_t
 find_section_bit(const char *name) {
     for (int i = 0; sections[i].name != NULL; i++) {
         if (pcmk__str_eq(sections[i].name, name, pcmk__str_casei)) {
             return sections[i].bit;
         }
     }
 
     return 0;
 }
 
 static gboolean
 apply_exclude(const gchar *excludes, GError **error) {
     char **parts = NULL;
     gboolean result = TRUE;
 
     parts = g_strsplit(excludes, ",", 0);
     for (char **s = parts; *s != NULL; s++) {
         uint32_t bit = find_section_bit(*s);
 
         if (pcmk__str_eq(*s, "all", pcmk__str_none)) {
             show = 0;
         } else if (pcmk__str_eq(*s, PCMK__VALUE_NONE, pcmk__str_none)) {
             show = all_includes(output_format);
         } else if (bit != 0) {
             show &= ~bit;
         } else {
             g_set_error(error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
                         "--exclude options: all, attributes, bans, counts, dc, "
                         "failcounts, failures, fencing, fencing-failed, "
                         "fencing-pending, fencing-succeeded, maint-mode, nodes, "
                         PCMK__VALUE_NONE ", operations, options, resources, "
                         "stack, summary, tickets, times");
             result = FALSE;
             break;
         }
     }
     g_strfreev(parts);
     return result;
 }
 
 static gboolean
 apply_include(const gchar *includes, GError **error) {
     char **parts = NULL;
     gboolean result = TRUE;
 
     parts = g_strsplit(includes, ",", 0);
     for (char **s = parts; *s != NULL; s++) {
         uint32_t bit = find_section_bit(*s);
 
         if (pcmk__str_eq(*s, "all", pcmk__str_none)) {
             show = all_includes(output_format);
         } else if (pcmk__starts_with(*s, "bans")) {
             show |= pcmk_section_bans;
             if (options.neg_location_prefix != NULL) {
                 free(options.neg_location_prefix);
                 options.neg_location_prefix = NULL;
             }
 
             if (strlen(*s) > 4 && (*s)[4] == ':') {
                 options.neg_location_prefix = strdup(*s+5);
             }
         } else if (pcmk__str_any_of(*s, "default", "defaults", NULL)) {
             show |= default_includes(output_format);
         } else if (pcmk__str_eq(*s, PCMK__VALUE_NONE, pcmk__str_none)) {
             show = 0;
         } else if (bit != 0) {
             show |= bit;
         } else {
             g_set_error(error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
                         "--include options: all, attributes, bans[:PREFIX], counts, dc, "
                         "default, failcounts, failures, fencing, fencing-failed, "
                         "fencing-pending, fencing-succeeded, maint-mode, nodes, "
                         PCMK__VALUE_NONE ", operations, options, resources, "
                         "stack, summary, tickets, times");
             result = FALSE;
             break;
         }
     }
     g_strfreev(parts);
     return result;
 }
 
 static gboolean
 apply_include_exclude(GSList *lst, GError **error) {
     gboolean rc = TRUE;
     GSList *node = lst;
 
     while (node != NULL) {
         char *s = node->data;
 
         if (pcmk__starts_with(s, "--include=")) {
             rc = apply_include(s+10, error);
         } else if (pcmk__starts_with(s, "-I=")) {
             rc = apply_include(s+3, error);
         } else if (pcmk__starts_with(s, "--exclude=")) {
             rc = apply_exclude(s+10, error);
         } else if (pcmk__starts_with(s, "-U=")) {
             rc = apply_exclude(s+3, error);
         }
 
         if (rc != TRUE) {
             break;
         }
 
         node = node->next;
     }
 
     return rc;
 }
 
 static gboolean
 user_include_exclude_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
     char *s = crm_strdup_printf("%s=%s", option_name, optarg);
 
     options.user_includes_excludes = g_slist_append(options.user_includes_excludes, s);
     return TRUE;
 }
 
 static gboolean
 include_exclude_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
     char *s = crm_strdup_printf("%s=%s", option_name, optarg);
 
     options.includes_excludes = g_slist_append(options.includes_excludes, s);
     return TRUE;
 }
 
 static gboolean
 as_cgi_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
     pcmk__str_update(&args->output_ty, "html");
     output_format = mon_output_cgi;
     options.one_shot = TRUE;
     return TRUE;
 }
 
 static gboolean
 as_html_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
     pcmk__str_update(&args->output_dest, optarg);
     pcmk__str_update(&args->output_ty, "html");
     output_format = mon_output_html;
     umask(S_IWGRP | S_IWOTH);
     return TRUE;
 }
 
 static gboolean
 as_simple_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
     pcmk__str_update(&args->output_ty, "text");
     output_format = mon_output_monitor;
     options.one_shot = TRUE;
     return TRUE;
 }
 
 static gboolean
 as_xml_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
     pcmk__str_update(&args->output_ty, "xml");
     output_format = mon_output_legacy_xml;
     return TRUE;
 }
 
 static gboolean
 fence_history_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
     if (optarg == NULL) {
         interactive_fence_level = 2;
     } else {
         pcmk__scan_min_int(optarg, &interactive_fence_level, 0);
     }
 
     switch (interactive_fence_level) {
         case 3:
             options.fence_connect = TRUE;
             fence_history = pcmk__fence_history_full;
             return include_exclude_cb("--include", PCMK__VALUE_FENCING, data,
                                       err);
 
         case 2:
             options.fence_connect = TRUE;
             fence_history = pcmk__fence_history_full;
             return include_exclude_cb("--include", PCMK__VALUE_FENCING, data,
                                       err);
 
         case 1:
             options.fence_connect = TRUE;
             fence_history = pcmk__fence_history_full;
             return include_exclude_cb("--include", "fencing-failed,fencing-pending", data, err);
 
         case 0:
             options.fence_connect = FALSE;
             fence_history = pcmk__fence_history_none;
             return include_exclude_cb("--exclude", PCMK__VALUE_FENCING, data,
                                       err);
 
         default:
             g_set_error(err, PCMK__EXITC_ERROR, CRM_EX_INVALID_PARAM, "Fence history must be 0-3");
             return FALSE;
     }
 }
 
 static gboolean
 group_by_node_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
     show_opts |= pcmk_show_rscs_by_node;
     return TRUE;
 }
 
 static gboolean
 hide_headers_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
     return user_include_exclude_cb("--exclude", "summary", data, err);
 }
 
 static gboolean
 inactive_resources_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
     show_opts |= pcmk_show_inactive_rscs;
     return TRUE;
 }
 
 static gboolean
 no_curses_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
     output_format = mon_output_plain;
     return TRUE;
 }
 
 static gboolean
 print_brief_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
     show_opts |= pcmk_show_brief;
     return TRUE;
 }
 
 static gboolean
 print_detail_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
     show_opts |= pcmk_show_details;
     return TRUE;
 }
 
+static gboolean
+print_description_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
+    show_opts |= pcmk_show_description;
+    return TRUE;
+}
+
 static gboolean
 print_timing_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
     show_opts |= pcmk_show_timing;
     return user_include_exclude_cb("--include", "operations", data, err);
 }
 
 static gboolean
 reconnect_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
     int rc = crm_get_msec(optarg);
 
     if (rc == -1) {
         g_set_error(err, PCMK__EXITC_ERROR, CRM_EX_INVALID_PARAM, "Invalid value for -i: %s", optarg);
         return FALSE;
     } else {
         options.reconnect_ms = crm_parse_interval_spec(optarg);
     }
 
     return TRUE;
 }
 
 static gboolean
 show_attributes_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
     return user_include_exclude_cb("--include", "attributes", data, err);
 }
 
 static gboolean
 show_bans_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
     if (optarg != NULL) {
         char *s = crm_strdup_printf("bans:%s", optarg);
         gboolean rc = user_include_exclude_cb("--include", s, data, err);
         free(s);
         return rc;
     } else {
         return user_include_exclude_cb("--include", "bans", data, err);
     }
 }
 
 static gboolean
 show_failcounts_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
     return user_include_exclude_cb("--include", "failcounts", data, err);
 }
 
 static gboolean
 show_operations_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
     return user_include_exclude_cb("--include", "failcounts,operations", data, err);
 }
 
 static gboolean
 show_tickets_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
     return user_include_exclude_cb("--include", "tickets", data, err);
 }
 
 static gboolean
 use_cib_file_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
     setenv("CIB_file", optarg, 1);
     options.one_shot = TRUE;
     return TRUE;
 }
 
 #define INDENT "                                    "
 
 /* *INDENT-OFF* */
 static GOptionEntry addl_entries[] = {
     { "interval", 'i', 0, G_OPTION_ARG_CALLBACK, reconnect_cb,
       "Update frequency (default is 5 seconds)",
       "TIMESPEC" },
 
     { "one-shot", '1', 0, G_OPTION_ARG_NONE, &options.one_shot,
       "Display the cluster status once on the console and exit",
       NULL },
 
     { "daemonize", 'd', 0, G_OPTION_ARG_NONE, &options.daemonize,
       "Run in the background as a daemon.\n"
       INDENT "Requires at least one of --output-to and --external-agent.",
       NULL },
 
     { "pid-file", 'p', 0, G_OPTION_ARG_FILENAME, &options.pid_file,
       "(Advanced) Daemon pid file location",
       "FILE" },
 
     { "external-agent", 'E', 0, G_OPTION_ARG_FILENAME, &options.external_agent,
       "A program to run when resource operations take place",
       "FILE" },
 
     { "external-recipient", 'e', 0, G_OPTION_ARG_STRING, &options.external_recipient,
       "A recipient for your program (assuming you want the program to send something to someone).",
       "RCPT" },
 
     { "watch-fencing", 'W', 0, G_OPTION_ARG_NONE, &options.watch_fencing,
       "Listen for fencing events. For use with --external-agent.",
       NULL },
 
     { "xml-file", 'x', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_CALLBACK, use_cib_file_cb,
       NULL,
       NULL },
 
     { NULL }
 };
 
 static GOptionEntry display_entries[] = {
     { "include", 'I', 0, G_OPTION_ARG_CALLBACK, user_include_exclude_cb,
       "A list of sections to include in the output.\n"
       INDENT "See `Output Control` help for more information.",
       "SECTION(s)" },
 
     { "exclude", 'U', 0, G_OPTION_ARG_CALLBACK, user_include_exclude_cb,
       "A list of sections to exclude from the output.\n"
       INDENT "See `Output Control` help for more information.",
       "SECTION(s)" },
 
     { "node", 0, 0, G_OPTION_ARG_STRING, &options.only_node,
       "When displaying information about nodes, show only what's related to the given\n"
       INDENT "node, or to all nodes tagged with the given tag",
       "NODE" },
 
     { "resource", 0, 0, G_OPTION_ARG_STRING, &options.only_rsc,
       "When displaying information about resources, show only what's related to the given\n"
       INDENT "resource, or to all resources tagged with the given tag",
       "RSC" },
 
     { "group-by-node", 'n', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, group_by_node_cb,
       "Group resources by node",
       NULL },
 
     { "inactive", 'r', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, inactive_resources_cb,
       "Display inactive resources",
       NULL },
 
     { "failcounts", 'f', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, show_failcounts_cb,
       "Display resource fail counts",
       NULL },
 
     { "operations", 'o', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, show_operations_cb,
       "Display resource operation history",
       NULL },
 
     { "timing-details", 't', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, print_timing_cb,
       "Display resource operation history with timing details",
       NULL },
 
     { "tickets", 'c', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, show_tickets_cb,
       "Display cluster tickets",
       NULL },
 
     { "fence-history", 'm', G_OPTION_FLAG_OPTIONAL_ARG, G_OPTION_ARG_CALLBACK, fence_history_cb,
       "Show fence history:\n"
       INDENT "0=off, 1=failures and pending (default without option),\n"
       INDENT "2=add successes (default without value for option),\n"
       INDENT "3=show full history without reduction to most recent of each flavor",
       "LEVEL" },
 
     { "neg-locations", 'L', G_OPTION_FLAG_OPTIONAL_ARG, G_OPTION_ARG_CALLBACK, show_bans_cb,
       "Display negative location constraints [optionally filtered by id prefix]",
       NULL },
 
     { "show-node-attributes", 'A', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, show_attributes_cb,
       "Display node attributes",
       NULL },
 
     { "hide-headers", 'D', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, hide_headers_cb,
       "Hide all headers",
       NULL },
 
     { "show-detail", 'R', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, print_detail_cb,
       "Show more details (node IDs, individual clone instances)",
       NULL },
 
+    { "show-description", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, print_description_cb,
+      "Show resource descriptions",
+      NULL },
+
     { "brief", 'b', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, print_brief_cb,
       "Brief output",
       NULL },
 
     { "pending", 'j', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_NONE, &options.print_pending,
       "Display pending state if 'record-pending' is enabled",
       NULL },
 
     { "simple-status", 's', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, as_simple_cb,
       "Display the cluster status once as a simple one line output (suitable for nagios)",
       NULL },
 
     { NULL }
 };
 
 static GOptionEntry deprecated_entries[] = {
     { "as-html", 'h', G_OPTION_FLAG_FILENAME, G_OPTION_ARG_CALLBACK, as_html_cb,
       "Write cluster status to the named HTML file.\n"
       INDENT "Use --output-as=html --output-to=FILE instead.",
       "FILE" },
 
     { "as-xml", 'X', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, as_xml_cb,
       "Write cluster status as XML to stdout. This will enable one-shot mode.\n"
       INDENT "Use --output-as=xml instead.",
       NULL },
 
     { "disable-ncurses", 'N', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, no_curses_cb,
       "Disable the use of ncurses.\n"
       INDENT "Use --output-as=text instead.",
       NULL },
 
     { "web-cgi", 'w', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, as_cgi_cb,
       "Web mode with output suitable for CGI (preselected when run as *.cgi).\n"
       INDENT "Use --output-as=html --html-cgi instead.",
       NULL },
 
     { NULL }
 };
 /* *INDENT-ON* */
 
 /* Reconnect to the CIB and fencing agent after reconnect_ms has passed.  This sounds
  * like it would be more broadly useful, but only ever happens after a disconnect via
  * mon_cib_connection_destroy.
  */
 static gboolean
 reconnect_after_timeout(gpointer data)
 {
 #if CURSES_ENABLED
     if (output_format == mon_output_console) {
         clear();
         refresh();
     }
 #endif
 
     out->transient(out, "Reconnecting...");
     if (setup_api_connections() == pcmk_rc_ok) {
         // Trigger redrawing the screen (needs reconnect_timer == 0)
         reconnect_timer = 0;
         refresh_after_event(FALSE, TRUE);
         return G_SOURCE_REMOVE;
     }
 
     out->message(out, "crm-mon-disconnected",
                  "Latest connection attempt failed", pcmkd_state);
 
     reconnect_timer = g_timeout_add(options.reconnect_ms,
                                     reconnect_after_timeout, NULL);
     return G_SOURCE_REMOVE;
 }
 
 /* Called from various places when we are disconnected from the CIB or from the
  * fencing agent.  If the CIB connection is still valid, this function will also
  * attempt to sign off and reconnect.
  */
 static void
 mon_cib_connection_destroy(gpointer user_data)
 {
     const char *msg = "Connection to the cluster lost";
 
     pcmkd_state = pcmk_pacemakerd_state_invalid;
 
     /* No crm-mon-disconnected message for console; a working implementation
      * is not currently worth the effort
      */
     out->transient(out, "%s", msg);
 
     out->message(out, "crm-mon-disconnected", msg, pcmkd_state);
 
     if (refresh_timer != NULL) {
         /* we'll trigger a refresh after reconnect */
         mainloop_timer_stop(refresh_timer);
     }
     if (reconnect_timer) {
         /* we'll trigger a new reconnect-timeout at the end */
         g_source_remove(reconnect_timer);
         reconnect_timer = 0;
     }
 
     /* the client API won't properly reconnect notifications if they are still
      * in the table - so remove them
      */
     stonith_api_delete(st);
     st = NULL;
 
     if (cib) {
         cib->cmds->signoff(cib);
         reconnect_timer = g_timeout_add(options.reconnect_ms,
                                         reconnect_after_timeout, NULL);
     }
 }
 
 /* Signal handler installed into the mainloop for normal program shutdown */
 static void
 mon_shutdown(int nsig)
 {
     clean_up(CRM_EX_OK);
 }
 
 #if CURSES_ENABLED
 static volatile sighandler_t ncurses_winch_handler;
 
 /* Signal handler installed the regular way (not into the main loop) for when
  * the screen is resized.  Commonly, this happens when running in an xterm and
  * the user changes its size.
  */
 static void
 mon_winresize(int nsig)
 {
     static int not_done;
     int lines = 0, cols = 0;
 
     if (!not_done++) {
         if (ncurses_winch_handler)
             /* the original ncurses WINCH signal handler does the
              * magic of retrieving the new window size;
              * otherwise, we'd have to use ioctl or tgetent */
             (*ncurses_winch_handler) (SIGWINCH);
         getmaxyx(stdscr, lines, cols);
         resizeterm(lines, cols);
         /* Alert the mainloop code we'd like the refresh_trigger to run next
          * time the mainloop gets around to checking.
          */
         mainloop_set_trigger((crm_trigger_t *) refresh_trigger);
     }
     not_done--;
 }
 #endif
 
 static int
 setup_fencer_connection(void)
 {
     int rc = pcmk_ok;
 
     if (options.fence_connect && st == NULL) {
         st = stonith_api_new();
     }
 
     if (!options.fence_connect || st == NULL || st->state != stonith_disconnected) {
         return rc;
     }
 
     rc = st->cmds->connect(st, crm_system_name, NULL);
     if (rc == pcmk_ok) {
         crm_trace("Setting up stonith callbacks");
         if (options.watch_fencing) {
             st->cmds->register_notification(st, T_STONITH_NOTIFY_DISCONNECT,
                                             mon_st_callback_event);
             st->cmds->register_notification(st, T_STONITH_NOTIFY_FENCE, mon_st_callback_event);
         } else {
             st->cmds->register_notification(st, T_STONITH_NOTIFY_DISCONNECT,
                                             mon_st_callback_display);
             st->cmds->register_notification(st, T_STONITH_NOTIFY_HISTORY, mon_st_callback_display);
         }
     } else {
         stonith_api_delete(st);
         st = NULL;
     }
 
     return rc;
 }
 
 static int
 setup_cib_connection(void)
 {
     int rc = pcmk_rc_ok;
 
     CRM_CHECK(cib != NULL, return EINVAL);
 
     if (cib->state != cib_disconnected) {
         // Already connected with notifications registered for CIB updates
         return rc;
     }
 
     rc = cib__signon_query(out, &cib, &current_cib);
 
     if (rc == pcmk_rc_ok) {
         rc = pcmk_legacy2rc(cib->cmds->set_connection_dnotify(cib,
             mon_cib_connection_destroy));
         if (rc == EPROTONOSUPPORT) {
             out->err(out,
                      "CIB client does not support connection loss "
                      "notifications; crm_mon will be unable to reconnect after "
                      "connection loss");
             rc = pcmk_rc_ok;
         }
 
         if (rc == pcmk_rc_ok) {
             cib->cmds->del_notify_callback(cib, T_CIB_DIFF_NOTIFY,
                                            crm_diff_update);
             rc = pcmk_legacy2rc(cib->cmds->add_notify_callback(cib,
                                     T_CIB_DIFF_NOTIFY, crm_diff_update));
         }
 
         if (rc != pcmk_rc_ok) {
             if (rc == EPROTONOSUPPORT) {
                 out->err(out,
                          "CIB client does not support CIB diff "
                          "notifications");
             } else {
                 out->err(out, "CIB diff notification setup failed");
             }
 
             out->err(out, "Cannot monitor CIB changes; exiting");
             cib__clean_up_connection(&cib);
             stonith_api_delete(st);
             st = NULL;
         }
     }
     return rc;
 }
 
 /* This is used to set up the fencing options after the interactive UI has been stared.
  * fence_history_cb can't be used because it builds up a list of includes/excludes that
  * then have to be processed with apply_include_exclude and that could affect other
  * things.
  */
 static void
 set_fencing_options(int level)
 {
     switch (level) {
         case 3:
             options.fence_connect = TRUE;
             fence_history = pcmk__fence_history_full;
             show |= pcmk_section_fencing_all;
             break;
 
         case 2:
             options.fence_connect = TRUE;
             fence_history = pcmk__fence_history_full;
             show |= pcmk_section_fencing_all;
             break;
 
         case 1:
             options.fence_connect = TRUE;
             fence_history = pcmk__fence_history_full;
             show |= pcmk_section_fence_failed | pcmk_section_fence_pending;
             break;
 
         default:
             interactive_fence_level = 0;
             options.fence_connect = FALSE;
             fence_history = pcmk__fence_history_none;
             show &= ~pcmk_section_fencing_all;
             break;
     }
 }
 
 static int
 setup_api_connections(void)
 {
     int rc = pcmk_rc_ok;
 
     CRM_CHECK(cib != NULL, return EINVAL);
 
     if (cib->state != cib_disconnected) {
         return rc;
     }
 
     if (cib->variant == cib_native) {
         rc = pcmk__pacemakerd_status(out, crm_system_name,
                                      options.reconnect_ms / 2, false,
                                      &pcmkd_state);
         if (rc != pcmk_rc_ok) {
             return rc;
         }
 
         switch (pcmkd_state) {
             case pcmk_pacemakerd_state_running:
             case pcmk_pacemakerd_state_remote:
             case pcmk_pacemakerd_state_shutting_down:
                 /* Fencer and CIB may still be available while shutting down or
                  * running on a Pacemaker Remote node
                  */
                 break;
             default:
                 // Fencer and CIB are definitely unavailable
                 return ENOTCONN;
         }
 
         setup_fencer_connection();
     }
 
     rc = setup_cib_connection();
     return rc;
 }
 
 #if CURSES_ENABLED
 static const char *
 get_option_desc(char c)
 {
     const char *desc = "No help available";
 
     for (GOptionEntry *entry = display_entries; entry != NULL; entry++) {
         if (entry->short_name == c) {
             desc = entry->description;
             break;
         }
     }
     return desc;
 }
 
 #define print_option_help(out, option, condition) \
     curses_formatted_printf(out, "%c %c: \t%s\n", ((condition)? '*': ' '), option, get_option_desc(option));
 
 /* This function is called from the main loop when there is something to be read
  * on stdin, like an interactive user's keystroke.  All it does is read the keystroke,
  * set flags (or show the page showing which keystrokes are valid), and redraw the
  * screen.  It does not do anything with connections to the CIB or fencing agent
  * agent what would happen in mon_refresh_display.
  */
 static gboolean
 detect_user_input(GIOChannel *channel, GIOCondition condition, gpointer user_data)
 {
     int c;
     gboolean config_mode = FALSE;
 
     while (1) {
 
         /* Get user input */
         c = getchar();
 
         switch (c) {
             case 'm':
                 interactive_fence_level++;
                 if (interactive_fence_level > 3) {
                     interactive_fence_level = 0;
                 }
 
                 set_fencing_options(interactive_fence_level);
                 break;
             case 'c':
                 show ^= pcmk_section_tickets;
                 break;
             case 'f':
                 show ^= pcmk_section_failcounts;
                 break;
             case 'n':
                 show_opts ^= pcmk_show_rscs_by_node;
                 break;
             case 'o':
                 show ^= pcmk_section_operations;
                 if (!pcmk_is_set(show, pcmk_section_operations)) {
                     show_opts &= ~pcmk_show_timing;
                 }
                 break;
             case 'r':
                 show_opts ^= pcmk_show_inactive_rscs;
                 break;
             case 'R':
                 show_opts ^= pcmk_show_details;
 #ifdef PCMK__COMPAT_2_0
                 // Keep failed action output the same as 2.0.x
                 show_opts |= pcmk_show_failed_detail;
 #endif
                 break;
             case 't':
                 show_opts ^= pcmk_show_timing;
                 if (pcmk_is_set(show_opts, pcmk_show_timing)) {
                     show |= pcmk_section_operations;
                 }
                 break;
             case 'A':
                 show ^= pcmk_section_attributes;
                 break;
             case 'L':
                 show ^= pcmk_section_bans;
                 break;
             case 'D':
                 /* If any header is shown, clear them all, otherwise set them all */
                 if (pcmk_any_flags_set(show, pcmk_section_summary)) {
                     show &= ~pcmk_section_summary;
                 } else {
                     show |= pcmk_section_summary;
                 }
                 /* Regardless, we don't show options in console mode. */
                 show &= ~pcmk_section_options;
                 break;
             case 'b':
                 show_opts ^= pcmk_show_brief;
                 break;
             case 'j':
                 show_opts ^= pcmk_show_pending;
                 break;
             case '?':
                 config_mode = TRUE;
                 break;
             default:
                 /* All other keys just redraw the screen. */
                 goto refresh;
         }
 
         if (!config_mode)
             goto refresh;
 
         clear();
         refresh();
 
         curses_formatted_printf(out, "%s", "Display option change mode\n");
         print_option_help(out, 'c', pcmk_is_set(show, pcmk_section_tickets));
         print_option_help(out, 'f', pcmk_is_set(show, pcmk_section_failcounts));
         print_option_help(out, 'n', pcmk_is_set(show_opts, pcmk_show_rscs_by_node));
         print_option_help(out, 'o', pcmk_is_set(show, pcmk_section_operations));
         print_option_help(out, 'r', pcmk_is_set(show_opts, pcmk_show_inactive_rscs));
         print_option_help(out, 't', pcmk_is_set(show_opts, pcmk_show_timing));
         print_option_help(out, 'A', pcmk_is_set(show, pcmk_section_attributes));
         print_option_help(out, 'L', pcmk_is_set(show, pcmk_section_bans));
         print_option_help(out, 'D', !pcmk_is_set(show, pcmk_section_summary));
 #ifdef PCMK__COMPAT_2_0
         print_option_help(out, 'R', pcmk_any_flags_set(show_opts, pcmk_show_details & ~pcmk_show_failed_detail));
 #else
         print_option_help(out, 'R', pcmk_any_flags_set(show_opts, pcmk_show_details));
 #endif
         print_option_help(out, 'b', pcmk_is_set(show_opts, pcmk_show_brief));
         print_option_help(out, 'j', pcmk_is_set(show_opts, pcmk_show_pending));
         curses_formatted_printf(out, "%d m: \t%s\n", interactive_fence_level, get_option_desc('m'));
         curses_formatted_printf(out, "%s", "\nToggle fields via field letter, type any other key to return\n");
     }
 
 refresh:
     refresh_after_event(FALSE, TRUE);
 
     return TRUE;
 }
 #endif  // CURSES_ENABLED
 
 // Basically crm_signal_handler(SIGCHLD, SIG_IGN) plus the SA_NOCLDWAIT flag
 static void
 avoid_zombies(void)
 {
     struct sigaction sa;
 
     memset(&sa, 0, sizeof(struct sigaction));
     if (sigemptyset(&sa.sa_mask) < 0) {
         crm_warn("Cannot avoid zombies: %s", pcmk_rc_str(errno));
         return;
     }
     sa.sa_handler = SIG_IGN;
     sa.sa_flags = SA_RESTART|SA_NOCLDWAIT;
     if (sigaction(SIGCHLD, &sa, NULL) < 0) {
         crm_warn("Cannot avoid zombies: %s", pcmk_rc_str(errno));
     }
 }
 
 static GOptionContext *
 build_arg_context(pcmk__common_args_t *args, GOptionGroup **group) {
     GOptionContext *context = NULL;
 
     GOptionEntry extra_prog_entries[] = {
         { "quiet", 'Q', 0, G_OPTION_ARG_NONE, &(args->quiet),
           "Be less descriptive in output.",
           NULL },
 
         { NULL }
     };
 
     const char *description = "Notes:\n\n"
                               "If this program is called as crm_mon.cgi, --output-as=html --html-cgi will\n"
                               "automatically be added to the command line arguments.\n\n"
                               "Time Specification:\n\n"
                               "The TIMESPEC in any command line option can be specified in many different\n"
                               "formats.  It can be just an integer number of seconds, a number plus units\n"
                               "(ms/msec/us/usec/s/sec/m/min/h/hr), or an ISO 8601 period specification.\n\n"
                               "Output Control:\n\n"
                               "By default, a certain list of sections are written to the output destination.\n"
                               "The default varies based on the output format - XML includes everything, while\n"
                               "other output formats will display less.  This list can be modified with the\n"
                               "--include and --exclude command line options.  Each option may be given multiple\n"
                               "times on the command line, and each can give a comma-separated list of sections.\n"
                               "The options are applied to the default set, from left to right as seen on the\n"
                               "command line.  For a list of valid sections, pass --include=list or --exclude=list.\n\n"
                               "Interactive Use:\n\n"
                               "When run interactively, crm_mon can be told to hide and display various sections\n"
                               "of output.  To see a help screen explaining the options, hit '?'.  Any key stroke\n"
                               "aside from those listed will cause the screen to refresh.\n\n"
                               "Examples:\n\n"
                               "Display the cluster status on the console with updates as they occur:\n\n"
                               "\tcrm_mon\n\n"
                               "Display the cluster status on the console just once then exit:\n\n"
                               "\tcrm_mon -1\n\n"
                               "Display your cluster status, group resources by node, and include inactive resources in the list:\n\n"
                               "\tcrm_mon --group-by-node --inactive\n\n"
                               "Start crm_mon as a background daemon and have it write the cluster status to an HTML file:\n\n"
                               "\tcrm_mon --daemonize --output-as html --output-to /path/to/docroot/filename.html\n\n"
                               "Start crm_mon and export the current cluster status as XML to stdout, then exit:\n\n"
                               "\tcrm_mon --output-as xml\n\n";
 
 #if CURSES_ENABLED
     context = pcmk__build_arg_context(args, "console (default), html, text, xml", group, NULL);
 #else
     context = pcmk__build_arg_context(args, "text (default), html, xml", group, NULL);
 #endif
     pcmk__add_main_args(context, extra_prog_entries);
     g_option_context_set_description(context, description);
 
     pcmk__add_arg_group(context, "display", "Display Options:",
                         "Show display options", display_entries);
     pcmk__add_arg_group(context, "additional", "Additional Options:",
                         "Show additional options", addl_entries);
     pcmk__add_arg_group(context, "deprecated", "Deprecated Options:",
                         "Show deprecated options", deprecated_entries);
 
     return context;
 }
 
 /* If certain format options were specified, we want to set some extra
  * options.  We can just process these like they were given on the
  * command line.
  */
 static void
 add_output_args(void) {
     GError *err = NULL;
 
     if (output_format == mon_output_plain) {
         if (!pcmk__force_args(context, &err, "%s --text-fancy", g_get_prgname())) {
             g_propagate_error(&error, err);
             clean_up(CRM_EX_USAGE);
         }
     } else if (output_format == mon_output_cgi) {
         if (!pcmk__force_args(context, &err, "%s --html-cgi", g_get_prgname())) {
             g_propagate_error(&error, err);
             clean_up(CRM_EX_USAGE);
         }
     } else if (output_format == mon_output_xml) {
         if (!pcmk__force_args(context, &err, "%s --xml-simple-list --xml-substitute", g_get_prgname())) {
             g_propagate_error(&error, err);
             clean_up(CRM_EX_USAGE);
         }
     } else if (output_format == mon_output_legacy_xml) {
         output_format = mon_output_xml;
         if (!pcmk__force_args(context, &err, "%s --xml-legacy --xml-substitute", g_get_prgname())) {
             g_propagate_error(&error, err);
             clean_up(CRM_EX_USAGE);
         }
     }
 }
 
 /* Which output format to use could come from two places:  The --as-xml
  * style arguments we gave in deprecated_entries above, or the formatted output
  * arguments added by pcmk__register_formats.  If the latter were used,
  * output_format will be mon_output_unset.
  *
  * Call the callbacks as if those older style arguments were provided so
  * the various things they do get done.
  */
 static void
 reconcile_output_format(pcmk__common_args_t *args) {
     gboolean retval = TRUE;
     GError *err = NULL;
 
     if (output_format != mon_output_unset) {
         return;
     }
 
     if (pcmk__str_eq(args->output_ty, "html", pcmk__str_casei)) {
         char *dest = NULL;
 
         pcmk__str_update(&dest, args->output_dest);
         retval = as_html_cb("h", dest, NULL, &err);
         free(dest);
     } else if (pcmk__str_eq(args->output_ty, "text", pcmk__str_casei)) {
         retval = no_curses_cb("N", NULL, NULL, &err);
     } else if (pcmk__str_eq(args->output_ty, "xml", pcmk__str_casei)) {
         pcmk__str_update(&args->output_ty, "xml");
         output_format = mon_output_xml;
     } else if (options.one_shot) {
         pcmk__str_update(&args->output_ty, "text");
         output_format = mon_output_plain;
     } else if (!options.daemonize && args->output_dest != NULL) {
         options.one_shot = TRUE;
         pcmk__str_update(&args->output_ty, "text");
         output_format = mon_output_plain;
     } else {
         /* Neither old nor new arguments were given, so set the default. */
         pcmk__str_update(&args->output_ty, "console");
         output_format = mon_output_console;
     }
 
     if (!retval) {
         g_propagate_error(&error, err);
         clean_up(CRM_EX_USAGE);
     }
 }
 
 static void
 clean_up_on_connection_failure(int rc)
 {
     if (output_format == mon_output_monitor) {
         g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_ERROR, "CLUSTER CRIT: Connection to cluster failed: %s",
                     pcmk_rc_str(rc));
         clean_up(MON_STATUS_CRIT);
     } else if (rc == ENOTCONN) {
         if (pcmkd_state == pcmk_pacemakerd_state_remote) {
             g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_ERROR, "Error: remote-node not connected to cluster");
         } else {
             g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_ERROR, "Error: cluster is not available on this node");
         }
     } else {
         g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_ERROR, "Connection to cluster failed: %s", pcmk_rc_str(rc));
     }
 
     clean_up(pcmk_rc2exitc(rc));
 }
 
 static void
 one_shot(void)
 {
     int rc = pcmk__status(out, cib, fence_history, show, show_opts,
                           options.only_node, options.only_rsc,
                           options.neg_location_prefix,
                           output_format == mon_output_monitor, 0);
 
     if (rc == pcmk_rc_ok) {
         clean_up(pcmk_rc2exitc(rc));
     } else {
         clean_up_on_connection_failure(rc);
     }
 }
 
 static void
 exit_on_invalid_cib(void)
 {
     if (cib != NULL) {
         return;
     }
 
     // Shouldn't really be possible
     g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_ERROR, "Invalid CIB source");
     clean_up(CRM_EX_ERROR);
 }
 
 int
 main(int argc, char **argv)
 {
     int rc = pcmk_rc_ok;
     GOptionGroup *output_group = NULL;
 
     args = pcmk__new_common_args(SUMMARY);
     context = build_arg_context(args, &output_group);
     pcmk__register_formats(output_group, formats);
 
     options.pid_file = strdup("/tmp/ClusterMon.pid");
     pcmk__cli_init_logging("crm_mon", 0);
 
     // Avoid needing to wait for subprocesses forked for -E/--external-agent
     avoid_zombies();
 
     if (pcmk__ends_with_ext(argv[0], ".cgi")) {
         output_format = mon_output_cgi;
         options.one_shot = TRUE;
     }
 
     processed_args = pcmk__cmdline_preproc(argv, "ehimpxEILU");
 
     fence_history_cb("--fence-history", "1", NULL, NULL);
 
     /* Set an HTML title regardless of what format we will eventually use.  This can't
      * be done in add_output_args.  That function is called after command line
      * arguments are processed in the next block, which means it'll override whatever
      * title the user provides.  Doing this here means the user can give their own
      * title on the command line.
      */
     if (!pcmk__force_args(context, &error, "%s --html-title \"Cluster Status\"",
                           g_get_prgname())) {
         return clean_up(CRM_EX_USAGE);
     }
 
     if (!g_option_context_parse_strv(context, &processed_args, &error)) {
         return clean_up(CRM_EX_USAGE);
     }
 
     for (int i = 0; i < args->verbosity; i++) {
         crm_bump_log_level(argc, argv);
     }
 
     if (!args->version) {
         if (args->quiet) {
             include_exclude_cb("--exclude", "times", NULL, NULL);
         }
 
         if (options.watch_fencing) {
             fence_history_cb("--fence-history", "0", NULL, NULL);
             options.fence_connect = TRUE;
         }
 
         /* create the cib-object early to be able to do further
          * decisions based on the cib-source
          */
         cib = cib_new();
 
         exit_on_invalid_cib();
 
         switch (cib->variant) {
             case cib_native:
                 // Everything (fencer, CIB, pcmkd status) should be available
                 break;
 
             case cib_file:
                 // Live fence history is not meaningful
                 fence_history_cb("--fence-history", "0", NULL, NULL);
 
                 /* Notifications are unsupported; nothing to monitor
                  * @COMPAT: Let setup_cib_connection() handle this by exiting?
                  */
                 options.one_shot = TRUE;
                 break;
 
             case cib_remote:
                 // We won't receive any fencing updates
                 fence_history_cb("--fence-history", "0", NULL, NULL);
                 break;
 
             case cib_undefined:
             case cib_database:
             default:
                 /* something is odd */
                 exit_on_invalid_cib();
                 break;
         }
 
         if (options.one_shot) {
             if (output_format == mon_output_console) {
                 output_format = mon_output_plain;
             }
 
         } else if (options.daemonize) {
             if (pcmk__str_eq(args->output_dest, "-", pcmk__str_null_matches|pcmk__str_casei) &&
                 !options.external_agent) {
                 g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
                             "--daemonize requires at least one of --output-to and --external-agent");
                 return clean_up(CRM_EX_USAGE);
             }
 
         } else if (output_format == mon_output_console) {
 #if CURSES_ENABLED
             crm_enable_stderr(FALSE);
 #else
             options.one_shot = TRUE;
             output_format = mon_output_plain;
             printf("Defaulting to one-shot mode\n");
             printf("You need to have curses available at compile time to enable console mode\n");
 #endif
         }
     }
 
     reconcile_output_format(args);
     add_output_args();
 
     /* output_format MUST NOT BE CHANGED AFTER THIS POINT. */
 
     if (args->version && output_format == mon_output_console) {
         /* Use the text output format here if we are in curses mode but were given
          * --version.  Displaying version information uses printf, and then we
          *  immediately exit.  We don't want to initialize curses for that.
          */
         rc = pcmk__output_new(&out, "text", args->output_dest, argv);
     } else {
         rc = pcmk__output_new(&out, args->output_ty, args->output_dest, argv);
     }
 
     if (rc != pcmk_rc_ok) {
         g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_ERROR, "Error creating output format %s: %s",
                     args->output_ty, pcmk_rc_str(rc));
         return clean_up(CRM_EX_ERROR);
     }
 
     if (options.daemonize) {
         if (!options.external_agent && (output_format == mon_output_console ||
                                         output_format == mon_output_unset ||
                                         output_format == mon_output_none)) {
             g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
                         "--daemonize requires --output-as=[html|text|xml]");
             return clean_up(CRM_EX_USAGE);
         }
 
         crm_enable_stderr(FALSE);
 
         cib_delete(cib);
         cib = NULL;
         pcmk__daemonize(crm_system_name, options.pid_file);
         cib = cib_new();
         exit_on_invalid_cib();
     }
 
     show = default_includes(output_format);
 
     /* Apply --include/--exclude flags we used internally.  There's no error reporting
      * here because this would be a programming error.
      */
     apply_include_exclude(options.includes_excludes, &error);
 
     /* And now apply any --include/--exclude flags the user gave on the command line.
      * These are done in a separate pass from the internal ones because we want to
      * make sure whatever the user specifies overrides whatever we do.
      */
     if (!apply_include_exclude(options.user_includes_excludes, &error)) {
         return clean_up(CRM_EX_USAGE);
     }
 
     /* Sync up the initial value of interactive_fence_level with whatever was set with
      * --include/--exclude= options.
      */
     if (pcmk_all_flags_set(show, pcmk_section_fencing_all)) {
         interactive_fence_level = 3;
     } else if (pcmk_is_set(show, pcmk_section_fence_worked)) {
         interactive_fence_level = 2;
     } else if (pcmk_any_flags_set(show, pcmk_section_fence_failed | pcmk_section_fence_pending)) {
         interactive_fence_level = 1;
     } else {
         interactive_fence_level = 0;
     }
 
     pcmk__register_lib_messages(out);
     crm_mon_register_messages(out);
     pe__register_messages(out);
     stonith__register_messages(out);
 
     // Messages internal to this file, nothing curses-specific
     pcmk__register_messages(out, fmt_functions);
 
     if (args->version) {
         out->version(out, false);
         return clean_up(CRM_EX_OK);
     }
 
     /* Extra sanity checks when in CGI mode */
     if (output_format == mon_output_cgi) {
         if (cib->variant == cib_file) {
             g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, "CGI mode used with CIB file");
             return clean_up(CRM_EX_USAGE);
         } else if (options.external_agent != NULL) {
             g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, "CGI mode cannot be used with --external-agent");
             return clean_up(CRM_EX_USAGE);
         } else if (options.daemonize == TRUE) {
             g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, "CGI mode cannot be used with -d");
             return clean_up(CRM_EX_USAGE);
         }
     }
 
     if (output_format == mon_output_xml || output_format == mon_output_legacy_xml) {
         show_opts |= pcmk_show_inactive_rscs | pcmk_show_timing;
 
         if (!options.daemonize) {
             options.one_shot = TRUE;
         }
     }
 
     if ((output_format == mon_output_html || output_format == mon_output_cgi) &&
         out->dest != stdout) {
         pcmk__html_add_header("meta", "http-equiv", "refresh", "content",
                               pcmk__itoa(options.reconnect_ms / 1000), NULL);
     }
 
 #ifdef PCMK__COMPAT_2_0
     // Keep failed action output the same as 2.0.x
     show_opts |= pcmk_show_failed_detail;
 #endif
 
     crm_info("Starting %s", crm_system_name);
 
     cib__set_output(cib, out);
 
     if (options.one_shot) {
         one_shot();
     }
 
     out->message(out, "crm-mon-disconnected",
                  "Waiting for initial connection", pcmkd_state);
     do {
         out->transient(out, "Connecting to cluster...");
         rc = setup_api_connections();
 
         if (rc != pcmk_rc_ok) {
             if ((rc == ENOTCONN) || (rc == ECONNREFUSED)) {
                 out->transient(out, "Connection failed. Retrying in %ums...",
                                options.reconnect_ms);
             }
 
             // Give some time to view all output even if we won't retry
             pcmk__sleep_ms(options.reconnect_ms);
 #if CURSES_ENABLED
             if (output_format == mon_output_console) {
                 clear();
                 refresh();
             }
 #endif
         }
     } while ((rc == ENOTCONN) || (rc == ECONNREFUSED));
 
     if (rc != pcmk_rc_ok) {
         clean_up_on_connection_failure(rc);
     }
 
     set_fencing_options(interactive_fence_level);
     mon_refresh_display(NULL);
 
     mainloop = g_main_loop_new(NULL, FALSE);
 
     mainloop_add_signal(SIGTERM, mon_shutdown);
     mainloop_add_signal(SIGINT, mon_shutdown);
 #if CURSES_ENABLED
     if (output_format == mon_output_console) {
         ncurses_winch_handler = crm_signal_handler(SIGWINCH, mon_winresize);
         if (ncurses_winch_handler == SIG_DFL ||
             ncurses_winch_handler == SIG_IGN || ncurses_winch_handler == SIG_ERR)
             ncurses_winch_handler = NULL;
 
         io_channel = g_io_channel_unix_new(STDIN_FILENO);
         g_io_add_watch(io_channel, G_IO_IN, detect_user_input, NULL);
     }
 #endif
 
     /* When refresh_trigger->trigger is set to TRUE, call mon_refresh_display.  In
      * this file, that is anywhere mainloop_set_trigger is called.
      */
     refresh_trigger = mainloop_add_trigger(G_PRIORITY_LOW, mon_refresh_display, NULL);
 
     g_main_loop_run(mainloop);
     g_main_loop_unref(mainloop);
 
     if (io_channel != NULL) {
         g_io_channel_shutdown(io_channel, TRUE, NULL);
     }
 
     crm_info("Exiting %s", crm_system_name);
 
     return clean_up(CRM_EX_OK);
 }
 
 static int
 send_custom_trap(const char *node, const char *rsc, const char *task, int target_rc, int rc,
                  int status, const char *desc)
 {
     pid_t pid;
 
     /*setenv needs chars, these are ints */
     char *rc_s = pcmk__itoa(rc);
     char *status_s = pcmk__itoa(status);
     char *target_rc_s = pcmk__itoa(target_rc);
 
     crm_debug("Sending external notification to '%s' via '%s'", options.external_recipient, options.external_agent);
 
     if(rsc) {
         setenv("CRM_notify_rsc", rsc, 1);
     }
     if (options.external_recipient) {
         setenv("CRM_notify_recipient", options.external_recipient, 1);
     }
     setenv("CRM_notify_node", node, 1);
     setenv("CRM_notify_task", task, 1);
     setenv("CRM_notify_desc", desc, 1);
     setenv("CRM_notify_rc", rc_s, 1);
     setenv("CRM_notify_target_rc", target_rc_s, 1);
     setenv("CRM_notify_status", status_s, 1);
 
     pid = fork();
     if (pid == -1) {
         crm_perror(LOG_ERR, "notification fork() failed.");
     }
     if (pid == 0) {
         /* crm_debug("notification: I am the child. Executing the nofitication program."); */
         execl(options.external_agent, options.external_agent, NULL);
         exit(CRM_EX_ERROR);
     }
 
     crm_trace("Finished running custom notification program '%s'.", options.external_agent);
     free(target_rc_s);
     free(status_s);
     free(rc_s);
     return 0;
 }
 
 static int
 handle_rsc_op(xmlNode *xml, void *userdata)
 {
     const char *node_id = (const char *) userdata;
     int rc = -1;
     int status = -1;
     int target_rc = -1;
     gboolean notify = TRUE;
 
     char *rsc = NULL;
     char *task = NULL;
     const char *desc = NULL;
     const char *magic = NULL;
     const char *id = NULL;
     const char *node = NULL;
 
     xmlNode *n = xml;
     xmlNode * rsc_op = xml;
 
     if(strcmp((const char*)xml->name, XML_LRM_TAG_RSC_OP) != 0) {
         pcmk__xe_foreach_child(xml, NULL, handle_rsc_op, (void *) node_id);
         return pcmk_rc_ok;
     }
 
     id = crm_element_value(rsc_op, XML_LRM_ATTR_TASK_KEY);
     if (id == NULL) {
         /* Compatibility with <= 1.1.5 */
         id = ID(rsc_op);
     }
 
     magic = crm_element_value(rsc_op, XML_ATTR_TRANSITION_MAGIC);
     if (magic == NULL) {
         /* non-change */
         return pcmk_rc_ok;
     }
 
     if (!decode_transition_magic(magic, NULL, NULL, NULL, &status, &rc,
                                  &target_rc)) {
         crm_err("Invalid event %s detected for %s", magic, id);
         return pcmk_rc_ok;
     }
 
     if (parse_op_key(id, &rsc, &task, NULL) == FALSE) {
         crm_err("Invalid event detected for %s", id);
         goto bail;
     }
 
     node = crm_element_value(rsc_op, XML_LRM_ATTR_TARGET);
 
     while (n != NULL && !pcmk__str_eq(XML_CIB_TAG_STATE, TYPE(n), pcmk__str_casei)) {
         n = n->parent;
     }
 
     if(node == NULL && n) {
         node = crm_element_value(n, XML_ATTR_UNAME);
     }
 
     if (node == NULL && n) {
         node = ID(n);
     }
 
     if (node == NULL) {
         node = node_id;
     }
 
     if (node == NULL) {
         crm_err("No node detected for event %s (%s)", magic, id);
         goto bail;
     }
 
     /* look up where we expected it to be? */
     desc = pcmk_rc_str(pcmk_rc_ok);
     if ((status == PCMK_EXEC_DONE) && (target_rc == rc)) {
         crm_notice("%s of %s on %s completed: %s", task, rsc, node, desc);
         if (rc == PCMK_OCF_NOT_RUNNING) {
             notify = FALSE;
         }
 
     } else if (status == PCMK_EXEC_DONE) {
         desc = services_ocf_exitcode_str(rc);
         crm_warn("%s of %s on %s failed: %s", task, rsc, node, desc);
 
     } else {
         desc = pcmk_exec_status_str(status);
         crm_warn("%s of %s on %s failed: %s", task, rsc, node, desc);
     }
 
     if (notify && options.external_agent) {
         send_custom_trap(node, rsc, task, target_rc, rc, status, desc);
     }
 
   bail:
     free(rsc);
     free(task);
     return pcmk_rc_ok;
 }
 
 /* This function is just a wrapper around mainloop_set_trigger so that it can be
  * called from a mainloop directly.  It's simply another way of ensuring the screen
  * gets redrawn.
  */
 static gboolean
 mon_trigger_refresh(gpointer user_data)
 {
     mainloop_set_trigger((crm_trigger_t *) refresh_trigger);
     return FALSE;
 }
 
 static int
 handle_op_for_node(xmlNode *xml, void *userdata)
 {
     const char *node = crm_element_value(xml, XML_ATTR_UNAME);
 
     if (node == NULL) {
         node = ID(xml);
     }
 
     handle_rsc_op(xml, (void *) node);
     return pcmk_rc_ok;
 }
 
 static void
 crm_diff_update_v2(const char *event, xmlNode * msg)
 {
     xmlNode *change = NULL;
     xmlNode *diff = get_message_xml(msg, F_CIB_UPDATE_RESULT);
 
     for (change = pcmk__xml_first_child(diff); change != NULL;
          change = pcmk__xml_next(change)) {
         const char *name = NULL;
         const char *op = crm_element_value(change, XML_DIFF_OP);
         const char *xpath = crm_element_value(change, XML_DIFF_PATH);
         xmlNode *match = NULL;
         const char *node = NULL;
 
         if(op == NULL) {
             continue;
 
         } else if(strcmp(op, "create") == 0) {
             match = change->children;
 
         } else if(strcmp(op, "move") == 0) {
             continue;
 
         } else if(strcmp(op, "delete") == 0) {
             continue;
 
         } else if(strcmp(op, "modify") == 0) {
             match = first_named_child(change, XML_DIFF_RESULT);
             if(match) {
                 match = match->children;
             }
         }
 
         if(match) {
             name = (const char *)match->name;
         }
 
         crm_trace("Handling %s operation for %s %p, %s", op, xpath, match, name);
         if(xpath == NULL) {
             /* Version field, ignore */
 
         } else if(name == NULL) {
             crm_debug("No result for %s operation to %s", op, xpath);
             CRM_ASSERT(strcmp(op, "delete") == 0 || strcmp(op, "move") == 0);
 
         } else if(strcmp(name, XML_TAG_CIB) == 0) {
             pcmk__xe_foreach_child(first_named_child(match, XML_CIB_TAG_STATUS),
                                    NULL, handle_op_for_node, NULL);
 
         } else if(strcmp(name, XML_CIB_TAG_STATUS) == 0) {
             pcmk__xe_foreach_child(match, NULL, handle_op_for_node, NULL);
 
         } else if(strcmp(name, XML_CIB_TAG_STATE) == 0) {
             node = crm_element_value(match, XML_ATTR_UNAME);
             if (node == NULL) {
                 node = ID(match);
             }
             handle_rsc_op(match, (void *) node);
 
         } else if(strcmp(name, XML_CIB_TAG_LRM) == 0) {
             node = ID(match);
             handle_rsc_op(match, (void *) node);
 
         } else if(strcmp(name, XML_LRM_TAG_RESOURCES) == 0) {
             char *local_node = pcmk__xpath_node_id(xpath, "lrm");
 
             handle_rsc_op(match, local_node);
             free(local_node);
 
         } else if(strcmp(name, XML_LRM_TAG_RESOURCE) == 0) {
             char *local_node = pcmk__xpath_node_id(xpath, "lrm");
 
             handle_rsc_op(match, local_node);
             free(local_node);
 
         } else if(strcmp(name, XML_LRM_TAG_RSC_OP) == 0) {
             char *local_node = pcmk__xpath_node_id(xpath, "lrm");
 
             handle_rsc_op(match, local_node);
             free(local_node);
 
         } else {
             crm_trace("Ignoring %s operation for %s %p, %s", op, xpath, match, name);
         }
     }
 }
 
 static void
 crm_diff_update_v1(const char *event, xmlNode * msg)
 {
     /* Process operation updates */
     xmlXPathObject *xpathObj = xpath_search(msg,
                                             "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_ADDED
                                             "//" XML_LRM_TAG_RSC_OP);
     int lpc = 0, max = numXpathResults(xpathObj);
 
     for (lpc = 0; lpc < max; lpc++) {
         xmlNode *rsc_op = getXpathResult(xpathObj, lpc);
 
         handle_rsc_op(rsc_op, NULL);
     }
     freeXpathObject(xpathObj);
 }
 
 static void
 crm_diff_update(const char *event, xmlNode * msg)
 {
     int rc = -1;
     static bool stale = FALSE;
     gboolean cib_updated = FALSE;
     xmlNode *diff = get_message_xml(msg, F_CIB_UPDATE_RESULT);
 
     out->progress(out, false);
 
     if (current_cib != NULL) {
         rc = xml_apply_patchset(current_cib, diff, TRUE);
 
         switch (rc) {
             case -pcmk_err_diff_resync:
             case -pcmk_err_diff_failed:
                 crm_notice("[%s] Patch aborted: %s (%d)", event, pcmk_strerror(rc), rc);
                 free_xml(current_cib); current_cib = NULL;
                 break;
             case pcmk_ok:
                 cib_updated = TRUE;
                 break;
             default:
                 crm_notice("[%s] ABORTED: %s (%d)", event, pcmk_strerror(rc), rc);
                 free_xml(current_cib); current_cib = NULL;
         }
     }
 
     if (current_cib == NULL) {
         crm_trace("Re-requesting the full cib");
         cib->cmds->query(cib, NULL, &current_cib, cib_scope_local | cib_sync_call);
     }
 
     if (options.external_agent) {
         int format = 0;
         crm_element_value_int(diff, "format", &format);
         switch(format) {
             case 1:
                 crm_diff_update_v1(event, msg);
                 break;
             case 2:
                 crm_diff_update_v2(event, msg);
                 break;
             default:
                 crm_err("Unknown patch format: %d", format);
         }
     }
 
     if (current_cib == NULL) {
         if(!stale) {
             out->info(out, "--- Stale data ---");
         }
         stale = TRUE;
         return;
     }
 
     stale = FALSE;
     refresh_after_event(cib_updated, FALSE);
 }
 
 static int
 mon_refresh_display(gpointer user_data)
 {
     int rc = pcmk_rc_ok;
 
     last_refresh = time(NULL);
 
     if (output_format == mon_output_none || output_format == mon_output_unset) {
         return G_SOURCE_REMOVE;
     }
 
     if (fence_history == pcmk__fence_history_full &&
         !pcmk_all_flags_set(show, pcmk_section_fencing_all) &&
         output_format != mon_output_xml) {
         fence_history = pcmk__fence_history_reduced;
     }
 
     // Get an up-to-date pacemakerd status for the cluster summary
     if (cib->variant == cib_native) {
         pcmk__pacemakerd_status(out, crm_system_name, options.reconnect_ms / 2,
                                 false, &pcmkd_state);
     }
 
     if (out->dest != stdout) {
         out->reset(out);
     }
 
     rc = pcmk__output_cluster_status(out, st, cib, current_cib, pcmkd_state,
                                      fence_history, show, show_opts,
                                      options.only_node,options.only_rsc,
                                      options.neg_location_prefix,
                                      output_format == mon_output_monitor);
 
     if (output_format == mon_output_monitor && rc != pcmk_rc_ok) {
         clean_up(MON_STATUS_WARN);
         return G_SOURCE_REMOVE;
     } else if (rc == pcmk_rc_schema_validation) {
         clean_up(CRM_EX_CONFIG);
         return G_SOURCE_REMOVE;
     }
 
     if (out->dest != stdout) {
         out->finish(out, CRM_EX_OK, true, NULL);
     }
 
     return G_SOURCE_CONTINUE;
 }
 
 /* This function is called for fencing events (see setup_fencer_connection() for
  * which ones) when --watch-fencing is used on the command line
  */
 static void
 mon_st_callback_event(stonith_t * st, stonith_event_t * e)
 {
     if (st->state == stonith_disconnected) {
         /* disconnect cib as well and have everything reconnect */
         mon_cib_connection_destroy(NULL);
     } else if (options.external_agent) {
         char *desc = stonith__event_description(e);
 
         send_custom_trap(e->target, NULL, e->operation, pcmk_ok, e->result, 0, desc);
         free(desc);
     }
 }
 
 /* Cause the screen to be redrawn (via mainloop_set_trigger) when various conditions are met:
  *
  * - If the last update occurred more than reconnect_ms ago (defaults to 5s, but
  *   can be changed via the -i command line option), or
  * - After every 10 CIB updates, or
  * - If it's been 2s since the last update
  *
  * This function sounds like it would be more broadly useful, but it is only called when a
  * fencing event is received or a CIB diff occurrs.
  */
 static void
 refresh_after_event(gboolean data_updated, gboolean enforce)
 {
     static int updates = 0;
     time_t now = time(NULL);
 
     if (data_updated) {
         updates++;
     }
 
     if(refresh_timer == NULL) {
         refresh_timer = mainloop_timer_add("refresh", 2000, FALSE, mon_trigger_refresh, NULL);
     }
 
     if (reconnect_timer > 0) {
         /* we will receive a refresh request after successful reconnect */
         mainloop_timer_stop(refresh_timer);
         return;
     }
 
     /* as we're not handling initial failure of fencer-connection as
      * fatal give it a retry here
      * not getting here if cib-reconnection is already on the way
      */
     setup_fencer_connection();
 
     if (enforce ||
         ((now - last_refresh) > (options.reconnect_ms / 1000)) ||
         updates >= 10) {
         mainloop_set_trigger((crm_trigger_t *) refresh_trigger);
         mainloop_timer_stop(refresh_timer);
         updates = 0;
 
     } else {
         mainloop_timer_start(refresh_timer);
     }
 }
 
 /* This function is called for fencing events (see setup_fencer_connection() for
  * which ones) when --watch-fencing is NOT used on the command line
  */
 static void
 mon_st_callback_display(stonith_t * st, stonith_event_t * e)
 {
     if (st->state == stonith_disconnected) {
         /* disconnect cib as well and have everything reconnect */
         mon_cib_connection_destroy(NULL);
     } else {
         out->progress(out, false);
         refresh_after_event(TRUE, FALSE);
     }
 }
 
 /*
  * De-init ncurses, disconnect from the CIB manager, disconnect fencing,
  * deallocate memory and show usage-message if requested.
  *
  * We don't actually return, but nominally returning crm_exit_t allows a usage
  * like "return clean_up(exit_code);" which helps static analysis understand the
  * code flow.
  */
 static crm_exit_t
 clean_up(crm_exit_t exit_code)
 {
     /* Quitting crm_mon is much more complicated than it ought to be. */
 
     /* (1) Close connections, free things, etc. */
     cib__clean_up_connection(&cib);
     stonith_api_delete(st);
     free(options.neg_location_prefix);
     free(options.only_node);
     free(options.only_rsc);
     free(options.pid_file);
     g_slist_free_full(options.includes_excludes, free);
 
     g_strfreev(processed_args);
 
     /* (2) If this is abnormal termination and we're in curses mode, shut down
      * curses first.  Any messages displayed to the screen before curses is shut
      * down will be lost because doing the shut down will also restore the
      * screen to whatever it looked like before crm_mon was started.
      */
     if ((error != NULL || exit_code == CRM_EX_USAGE) && output_format == mon_output_console) {
         out->finish(out, exit_code, false, NULL);
         pcmk__output_free(out);
         out = NULL;
     }
 
     /* (3) If this is a command line usage related failure, print the usage
      * message.
      */
     if (exit_code == CRM_EX_USAGE && (output_format == mon_output_console || output_format == mon_output_plain)) {
         char *help = g_option_context_get_help(context, TRUE, NULL);
 
         fprintf(stderr, "%s", help);
         g_free(help);
     }
 
     pcmk__free_arg_context(context);
 
     /* (4) If this is any kind of error, print the error out and exit.  Make
      * sure to handle situations both before and after formatted output is
      * set up.  We want errors to appear formatted if at all possible.
      */
     if (error != NULL) {
         if (out != NULL) {
             out->err(out, "%s: %s", g_get_prgname(), error->message);
             out->finish(out, exit_code, true, NULL);
             pcmk__output_free(out);
         } else {
             fprintf(stderr, "%s: %s\n", g_get_prgname(), error->message);
         }
 
         g_clear_error(&error);
         crm_exit(exit_code);
     }
 
     /* (5) Print formatted output to the screen if we made it far enough in
      * crm_mon to be able to do so.
      */
     if (out != NULL) {
         if (!options.daemonize) {
             out->finish(out, exit_code, true, NULL);
         }
 
         pcmk__output_free(out);
         pcmk__unregister_formats();
     }
 
     crm_exit(exit_code);
 }
diff --git a/xml/api/crm_mon-2.29.rng b/xml/api/crm_mon-2.29.rng
new file mode 100644
index 0000000000..9cc554cf75
--- /dev/null
+++ b/xml/api/crm_mon-2.29.rng
@@ -0,0 +1,213 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+         datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+
+    <start>
+        <ref name="element-crm-mon"/>
+    </start>
+
+    <define name="element-crm-mon">
+        <choice>
+            <ref name="element-crm-mon-disconnected" />
+            <group>
+                <optional>
+                    <externalRef href="pacemakerd-health-2.25.rng" />
+                </optional>
+                <optional>
+                    <ref name="element-summary" />
+                </optional>
+                <optional>
+                    <ref name="nodes-list" />
+                </optional>
+                <optional>
+                    <ref name="resources-list" />
+                </optional>
+                <optional>
+                    <ref name="node-attributes-list" />
+                </optional>
+                <optional>
+                    <externalRef href="node-history-2.12.rng"/>
+                </optional>
+                <optional>
+                    <ref name="failures-list" />
+                </optional>
+                <optional>
+                    <ref name="fence-event-list" />
+                </optional>
+                <optional>
+                    <ref name="tickets-list" />
+                </optional>
+                <optional>
+                    <ref name="bans-list" />
+                </optional>
+            </group>
+        </choice>
+    </define>
+
+    <define name="element-crm-mon-disconnected">
+        <element name="crm-mon-disconnected">
+            <optional>
+                <attribute name="description"> <text /> </attribute>
+            </optional>
+            <optional>
+                <attribute name="pacemakerd-state"> <text /> </attribute>
+            </optional>
+        </element>
+    </define>
+
+    <define name="element-summary">
+        <element name="summary">
+            <optional>
+                <element name="stack">
+                    <attribute name="type"> <text /> </attribute>
+                    <optional>
+                        <attribute name="pacemakerd-state">
+                            <text />
+                        </attribute>
+                    </optional>
+                </element>
+            </optional>
+            <optional>
+                <element name="current_dc">
+                    <attribute name="present"> <data type="boolean" /> </attribute>
+                    <optional>
+                        <group>
+                            <attribute name="version"> <text /> </attribute>
+                            <attribute name="name"> <text /> </attribute>
+                            <attribute name="id"> <text /> </attribute>
+                            <attribute name="with_quorum"> <data type="boolean" /> </attribute>
+                        </group>
+                    </optional>
+                    <optional>
+                        <attribute name="mixed_version"> <data type="boolean" /> </attribute>
+                    </optional>
+                </element>
+            </optional>
+            <optional>
+                <element name="last_update">
+                    <attribute name="time"> <text /> </attribute>
+                    <optional>
+                        <attribute name="origin"> <text /> </attribute>
+                    </optional>
+                </element>
+                <element name="last_change">
+                    <attribute name="time"> <text /> </attribute>
+                    <attribute name="user"> <text /> </attribute>
+                    <attribute name="client"> <text /> </attribute>
+                    <attribute name="origin"> <text /> </attribute>
+                </element>
+            </optional>
+            <optional>
+                <element name="nodes_configured">
+                    <attribute name="number"> <data type="nonNegativeInteger" /> </attribute>
+                </element>
+                <element name="resources_configured">
+                    <attribute name="number"> <data type="nonNegativeInteger" /> </attribute>
+                    <attribute name="disabled"> <data type="nonNegativeInteger" /> </attribute>
+                    <attribute name="blocked"> <data type="nonNegativeInteger" /> </attribute>
+                </element>
+            </optional>
+            <optional>
+                <element name="cluster_options">
+                    <attribute name="stonith-enabled"> <data type="boolean" /> </attribute>
+                    <attribute name="symmetric-cluster"> <data type="boolean" /> </attribute>
+                    <attribute name="no-quorum-policy"> <text /> </attribute>
+                    <attribute name="maintenance-mode"> <data type="boolean" /> </attribute>
+                    <attribute name="stop-all-resources"> <data type="boolean" /> </attribute>
+                    <attribute name="stonith-timeout-ms"> <data type="integer" /> </attribute>
+                    <attribute name="priority-fencing-delay-ms"> <data type="integer" /> </attribute>
+                </element>
+            </optional>
+        </element>
+    </define>
+
+    <define name="resources-list">
+        <element name="resources">
+            <zeroOrMore>
+                <externalRef href="resources-2.29.rng" />
+            </zeroOrMore>
+        </element>
+    </define>
+
+    <define name="nodes-list">
+        <element name="nodes">
+            <zeroOrMore>
+                <externalRef href="nodes-2.29.rng" />
+            </zeroOrMore>
+        </element>
+    </define>
+
+    <define name="node-attributes-list">
+        <element name="node_attributes">
+            <zeroOrMore>
+                <externalRef href="node-attrs-2.8.rng" />
+            </zeroOrMore>
+        </element>
+    </define>
+
+    <define name="failures-list">
+        <element name="failures">
+            <zeroOrMore>
+                <externalRef href="failure-2.8.rng" />
+            </zeroOrMore>
+        </element>
+    </define>
+
+    <define name="fence-event-list">
+        <element name="fence_history">
+            <optional>
+                <attribute name="status"> <data type="integer" /> </attribute>
+            </optional>
+            <zeroOrMore>
+                <externalRef href="fence-event-2.15.rng" />
+            </zeroOrMore>
+        </element>
+    </define>
+
+    <define name="tickets-list">
+        <element name="tickets">
+            <zeroOrMore>
+                <ref name="element-ticket" />
+            </zeroOrMore>
+        </element>
+    </define>
+
+    <define name="bans-list">
+        <element name="bans">
+            <zeroOrMore>
+                <ref name="element-ban" />
+            </zeroOrMore>
+        </element>
+    </define>
+
+    <define name="element-ticket">
+        <element name="ticket">
+            <attribute name="id"> <text /> </attribute>
+            <attribute name="status">
+                <choice>
+                    <value>granted</value>
+                    <value>revoked</value>
+                </choice>
+            </attribute>
+            <attribute name="standby"> <data type="boolean" /> </attribute>
+            <optional>
+                <attribute name="last-granted"> <text /> </attribute>
+            </optional>
+        </element>
+    </define>
+
+    <define name="element-ban">
+        <element name="ban">
+            <attribute name="id"> <text /> </attribute>
+            <attribute name="resource"> <text /> </attribute>
+            <attribute name="node"> <text /> </attribute>
+            <attribute name="weight"> <data type="integer" /> </attribute>
+            <attribute name="promoted-only"> <data type="boolean" /> </attribute>
+            <!-- DEPRECATED: master_only is a duplicate of promoted-only that is
+                 provided solely for API backward compatibility. It will be
+                 removed in a future release. Check promoted-only instead.
+              -->
+            <attribute name="master_only"> <data type="boolean" /> </attribute>
+        </element>
+    </define>
+</grammar>
diff --git a/xml/api/crm_resource-2.29.rng b/xml/api/crm_resource-2.29.rng
new file mode 100644
index 0000000000..d95fd56a87
--- /dev/null
+++ b/xml/api/crm_resource-2.29.rng
@@ -0,0 +1,288 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+         datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+
+    <start>
+        <ref name="element-crm-resource"/>
+    </start>
+
+    <define name="element-crm-resource">
+        <choice>
+            <ref name="agents-list" />
+            <ref name="alternatives-list" />
+            <ref name="constraints-list" />
+            <externalRef href="generic-list-2.4.rng"/>
+            <element name="metadata"> <text/> </element>
+            <ref name="locate-list" />
+            <ref name="operations-list" />
+            <ref name="providers-list" />
+            <ref name="reasons-list" />
+            <ref name="resource-check" />
+            <ref name="resource-config" />
+            <ref name="resources-list" />
+            <ref name="resource-agent-action" />
+        </choice>
+    </define>
+
+    <define name="agents-list">
+        <element name="agents">
+            <attribute name="standard"> <text/> </attribute>
+            <optional>
+                <attribute name="provider"> <text/> </attribute>
+            </optional>
+            <zeroOrMore>
+                <element name="agent"> <text/> </element>
+            </zeroOrMore>
+        </element>
+    </define>
+
+    <define name="alternatives-list">
+        <element name="providers">
+            <attribute name="for"> <text/> </attribute>
+            <zeroOrMore>
+                <element name="provider"> <text/> </element>
+            </zeroOrMore>
+        </element>
+    </define>
+
+    <define name="constraints-list">
+        <element name="constraints">
+            <interleave>
+                <zeroOrMore>
+                    <ref name="rsc-location" />
+                </zeroOrMore>
+                <zeroOrMore>
+                    <ref name="rsc-colocation" />
+                </zeroOrMore>
+            </interleave>
+        </element>
+    </define>
+
+    <define name="locate-list">
+        <element name="nodes">
+            <attribute name="resource"> <text/> </attribute>
+            <zeroOrMore>
+                <element name="node">
+                    <optional>
+                        <attribute name="state"><value>promoted</value></attribute>
+                    </optional>
+                    <text/>
+                </element>
+            </zeroOrMore>
+        </element>
+    </define>
+
+    <define name="rsc-location">
+        <element name="rsc_location">
+            <attribute name="node"> <text/> </attribute>
+            <attribute name="rsc"> <text/> </attribute>
+            <attribute name="id"> <text/> </attribute>
+            <externalRef href="../score.rng"/>
+        </element>
+    </define>
+
+    <define name="operations-list">
+        <element name="operations">
+            <oneOrMore>
+                <ref name="element-operation-list" />
+            </oneOrMore>
+        </element>
+    </define>
+
+    <define name="providers-list">
+        <element name="providers">
+            <attribute name="standard"> <value>ocf</value> </attribute>
+            <optional>
+                <attribute name="agent"> <text/> </attribute>
+            </optional>
+            <zeroOrMore>
+                <element name="provider"> <text/> </element>
+            </zeroOrMore>
+        </element>
+    </define>
+
+    <define name="reasons-list">
+        <element name="reason">
+            <!-- set only when resource and node are both specified -->
+            <optional>
+                <attribute name="running_on"> <text/> </attribute>
+            </optional>
+
+            <!-- set only when only a resource is specified -->
+            <optional>
+                <attribute name="running"> <data type="boolean"/> </attribute>
+            </optional>
+
+            <choice>
+                <ref name="reasons-with-no-resource"/>
+                <ref name="resource-check"/>
+            </choice>
+        </element>
+    </define>
+
+    <define name="reasons-with-no-resource">
+        <element name="resources">
+            <zeroOrMore>
+                <element name="resource">
+                    <attribute name="id"> <text/> </attribute>
+                    <attribute name="running"> <data type="boolean"/> </attribute>
+                    <optional>
+                        <attribute name="host"> <text/> </attribute>
+                    </optional>
+                    <ref name="resource-check"/>
+                </element>
+            </zeroOrMore>
+        </element>
+    </define>
+
+    <define name="resource-config">
+        <element name="resource_config">
+            <externalRef href="resources-2.29.rng" />
+            <element name="xml"> <text/> </element>
+        </element>
+    </define>
+
+    <define name="resource-check">
+        <element name="check">
+            <attribute name="id"> <text/> </attribute>
+            <optional>
+                <choice>
+                    <attribute name="remain_stopped"><value>true</value></attribute>
+                    <attribute name="promotable"><value>false</value></attribute>
+                </choice>
+            </optional>
+            <optional>
+                <attribute name="unmanaged"><value>true</value></attribute>
+            </optional>
+            <optional>
+                <attribute name="locked-to"> <text/> </attribute>
+            </optional>
+            <optional>
+                <attribute name="unhealthy"><value>true</value></attribute>
+            </optional>
+        </element>
+    </define>
+
+    <define name="resources-list">
+        <element name="resources">
+            <zeroOrMore>
+                <externalRef href="resources-2.29.rng" />
+            </zeroOrMore>
+        </element>
+    </define>
+
+    <define name="rsc-colocation">
+        <element name="rsc_colocation">
+            <attribute name="id"> <text/> </attribute>
+            <attribute name="rsc"> <text/> </attribute>
+            <attribute name="with-rsc"> <text/> </attribute>
+            <externalRef href="../score.rng"/>
+            <optional>
+                <attribute name="node-attribute"> <text/> </attribute>
+            </optional>
+            <optional>
+                <attribute name="rsc-role">
+                    <ref name="attribute-roles"/>
+                </attribute>
+            </optional>
+            <optional>
+                <attribute name="with-rsc-role">
+                    <ref name="attribute-roles"/>
+                </attribute>
+            </optional>
+        </element>
+    </define>
+
+    <define name="element-operation-list">
+        <element name="operation">
+            <optional>
+                <group>
+                    <attribute name="rsc"> <text/> </attribute>
+                    <attribute name="agent"> <text/> </attribute>
+                </group>
+            </optional>
+            <attribute name="op"> <text/> </attribute>
+            <attribute name="node"> <text/> </attribute>
+            <attribute name="call"> <data type="integer" /> </attribute>
+            <attribute name="rc"> <data type="nonNegativeInteger" /> </attribute>
+            <optional>
+                <attribute name="last-rc-change"> <text/> </attribute>
+                <attribute name="exec-time"> <data type="nonNegativeInteger" /> </attribute>
+            </optional>
+            <attribute name="status"> <text/> </attribute>
+        </element>
+    </define>
+
+    <define name="resource-agent-action">
+        <element name="resource-agent-action">
+            <attribute name="action"> <text/> </attribute>
+            <optional>
+                <attribute name="rsc"> <text/> </attribute>
+            </optional>
+            <attribute name="class"> <text/> </attribute>
+            <attribute name="type"> <text/> </attribute>
+            <optional>
+                <attribute name="provider"> <text/> </attribute>
+            </optional>
+            <optional>
+                <ref name="overrides-list"/>
+            </optional>
+            <ref name="agent-status"/>
+            <optional>
+                <element name="command">
+                    <choice>
+                        <text />
+                        <externalRef href="subprocess-output-2.23.rng"/>
+                    </choice>
+                </element>
+            </optional>
+        </element>
+    </define>
+
+    <define name="overrides-list">
+        <element name="overrides">
+            <zeroOrMore>
+                <element name="override">
+                    <optional>
+                        <attribute name="rsc"> <text/> </attribute>
+                    </optional>
+                    <attribute name="name"> <text/> </attribute>
+                    <attribute name="value"> <text/> </attribute>
+                </element>
+            </zeroOrMore>
+        </element>
+    </define>
+
+    <define name="agent-status">
+        <element name="agent-status">
+            <attribute name="code"> <data type="integer" /> </attribute>
+            <optional>
+                <attribute name="message"> <text/> </attribute>
+            </optional>
+            <optional>
+                <attribute name="execution_code"> <data type="integer" /> </attribute>
+            </optional>
+            <optional>
+                <attribute name="execution_message"> <text/> </attribute>
+            </optional>
+            <optional>
+                <attribute name="reason"> <text/> </attribute>
+            </optional>
+        </element>
+    </define>
+
+    <define name="attribute-roles">
+        <choice>
+            <value>Stopped</value>
+            <value>Started</value>
+            <value>Promoted</value>
+            <value>Unpromoted</value>
+
+            <!-- These synonyms for Promoted/Unpromoted are allowed for
+                 backward compatibility with output from older Pacemaker
+                 versions that used them -->
+            <value>Master</value>
+            <value>Slave</value>
+        </choice>
+    </define>
+</grammar>
diff --git a/xml/api/crm_simulate-2.29.rng b/xml/api/crm_simulate-2.29.rng
new file mode 100644
index 0000000000..48cf942f67
--- /dev/null
+++ b/xml/api/crm_simulate-2.29.rng
@@ -0,0 +1,338 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+         datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+
+    <start>
+        <ref name="element-crm-simulate"/>
+    </start>
+
+    <define name="element-crm-simulate">
+        <choice>
+            <ref name="timings-list" />
+            <group>
+                <ref name="cluster-status" />
+                <optional>
+                    <ref name="modifications-list" />
+                </optional>
+                <optional>
+                    <ref name="allocations-utilizations-list" />
+                </optional>
+                <optional>
+                    <ref name="action-list" />
+                </optional>
+                <optional>
+                    <ref name="cluster-injected-actions-list" />
+                    <ref name="revised-cluster-status" />
+                </optional>
+            </group>
+        </choice>
+    </define>
+
+    <define name="allocations-utilizations-list">
+        <choice>
+            <element name="allocations">
+                <zeroOrMore>
+                    <choice>
+                        <ref name="element-allocation" />
+                        <ref name="element-promotion" />
+                    </choice>
+                </zeroOrMore>
+            </element>
+            <element name="utilizations">
+                <zeroOrMore>
+                    <choice>
+                        <ref name="element-capacity" />
+                        <ref name="element-utilization" />
+                    </choice>
+                </zeroOrMore>
+            </element>
+            <element name="allocations_utilizations">
+                <zeroOrMore>
+                    <choice>
+                        <ref name="element-allocation" />
+                        <ref name="element-promotion" />
+                        <ref name="element-capacity" />
+                        <ref name="element-utilization" />
+                    </choice>
+                </zeroOrMore>
+            </element>
+        </choice>
+    </define>
+
+    <define name="cluster-status">
+        <element name="cluster_status">
+            <ref name="nodes-list" />
+            <ref name="resources-list" />
+            <optional>
+                <ref name="node-attributes-list" />
+            </optional>
+            <optional>
+                <externalRef href="node-history-2.12.rng" />
+            </optional>
+            <optional>
+                <ref name="failures-list" />
+            </optional>
+        </element>
+    </define>
+
+    <define name="modifications-list">
+        <element name="modifications">
+            <optional>
+                <attribute name="quorum"> <text /> </attribute>
+            </optional>
+            <optional>
+                <attribute name="watchdog"> <text /> </attribute>
+            </optional>
+            <zeroOrMore>
+                <ref name="element-inject-modify-node" />
+            </zeroOrMore>
+            <zeroOrMore>
+                <ref name="element-inject-modify-ticket" />
+            </zeroOrMore>
+            <zeroOrMore>
+                <ref name="element-inject-spec" />
+            </zeroOrMore>
+            <zeroOrMore>
+                <ref name="element-inject-attr" />
+            </zeroOrMore>
+        </element>
+    </define>
+
+    <define name="revised-cluster-status">
+        <element name="revised_cluster_status">
+            <ref name="nodes-list" />
+            <ref name="resources-list" />
+            <optional>
+                <ref name="node-attributes-list" />
+            </optional>
+            <optional>
+                <ref name="failures-list" />
+            </optional>
+        </element>
+    </define>
+
+    <define name="element-inject-attr">
+        <element name="inject_attr">
+            <attribute name="cib_node"> <text /> </attribute>
+            <attribute name="name"> <text /> </attribute>
+            <attribute name="node_path"> <text /> </attribute>
+            <attribute name="value"> <text /> </attribute>
+        </element>
+    </define>
+
+    <define name="element-inject-modify-node">
+        <element name="modify_node">
+            <attribute name="action"> <text /> </attribute>
+            <attribute name="node"> <text /> </attribute>
+        </element>
+    </define>
+
+    <define name="element-inject-spec">
+        <element name="inject_spec">
+            <attribute name="spec"> <text /> </attribute>
+        </element>
+    </define>
+
+    <define name="element-inject-modify-ticket">
+        <element name="modify_ticket">
+            <attribute name="action"> <text /> </attribute>
+            <attribute name="ticket"> <text /> </attribute>
+        </element>
+    </define>
+
+    <define name="cluster-injected-actions-list">
+        <element name="transition">
+            <zeroOrMore>
+                <ref name="element-injected-actions" />
+            </zeroOrMore>
+        </element>
+    </define>
+
+    <define name="node-attributes-list">
+        <element name="node_attributes">
+            <zeroOrMore>
+                <externalRef href="node-attrs-2.8.rng" />
+            </zeroOrMore>
+        </element>
+    </define>
+
+    <define name="failures-list">
+        <element name="failures">
+            <zeroOrMore>
+                <externalRef href="failure-2.8.rng" />
+            </zeroOrMore>
+        </element>
+    </define>
+
+    <define name="nodes-list">
+        <element name="nodes">
+            <zeroOrMore>
+                <externalRef href="nodes-2.29.rng" />
+            </zeroOrMore>
+        </element>
+    </define>
+
+    <define name="resources-list">
+        <element name="resources">
+            <zeroOrMore>
+                <externalRef href="resources-2.29.rng" />
+            </zeroOrMore>
+        </element>
+    </define>
+
+    <define name="timings-list">
+        <element name="timings">
+            <zeroOrMore>
+                <ref name="element-timing" />
+            </zeroOrMore>
+        </element>
+    </define>
+
+    <define name="action-list">
+        <element name="actions">
+            <zeroOrMore>
+                <ref name="element-node-action" />
+            </zeroOrMore>
+            <zeroOrMore>
+                <ref name="element-rsc-action" />
+            </zeroOrMore>
+        </element>
+    </define>
+
+    <define name="element-allocation">
+        <element name="node_weight">
+            <attribute name="function"> <text /> </attribute>
+            <attribute name="node"> <text /> </attribute>
+            <externalRef href="../score.rng" />
+            <optional>
+                <attribute name="id"> <text /> </attribute>
+            </optional>
+        </element>
+    </define>
+
+    <define name="element-capacity">
+        <element name="capacity">
+            <attribute name="comment"> <text /> </attribute>
+            <attribute name="node"> <text /> </attribute>
+            <zeroOrMore>
+                <element>
+                    <anyName />
+                    <text />
+                </element>
+            </zeroOrMore>
+        </element>
+    </define>
+
+    <define name="element-inject-cluster-action">
+        <element name="cluster_action">
+            <attribute name="node"> <text /> </attribute>
+            <attribute name="task"> <text /> </attribute>
+            <optional>
+                <attribute name="id"> <text /> </attribute>
+            </optional>
+        </element>
+    </define>
+
+    <define name="element-injected-actions">
+        <choice>
+            <ref name="element-inject-cluster-action" />
+            <ref name="element-inject-fencing-action" />
+            <ref name="element-inject-pseudo-action" />
+            <ref name="element-inject-rsc-action" />
+        </choice>
+    </define>
+
+    <define name="element-inject-fencing-action">
+        <element name="fencing_action">
+            <attribute name="op"> <text /> </attribute>
+            <attribute name="target"> <text /> </attribute>
+        </element>
+    </define>
+
+    <define name="element-node-action">
+        <element name="node_action">
+            <attribute name="node"> <text /> </attribute>
+            <attribute name="reason"> <text /> </attribute>
+            <attribute name="task"> <text /> </attribute>
+        </element>
+    </define>
+
+    <define name="element-promotion">
+        <element name="promotion_score">
+            <attribute name="id"> <text /> </attribute>
+            <externalRef href="../score.rng" />
+            <optional>
+                <attribute name="node"> <text /> </attribute>
+            </optional>
+        </element>
+    </define>
+
+    <define name="element-inject-pseudo-action">
+        <element name="pseudo_action">
+            <attribute name="task"> <text /> </attribute>
+            <optional>
+                <attribute name="node"> <text /> </attribute>
+            </optional>
+        </element>
+    </define>
+
+    <define name="element-inject-rsc-action">
+        <element name="rsc_action">
+            <attribute name="node"> <text /> </attribute>
+            <attribute name="op"> <text /> </attribute>
+            <attribute name="resource"> <text /> </attribute>
+            <optional>
+                <attribute name="interval"> <data type="integer" /> </attribute>
+            </optional>
+        </element>
+    </define>
+
+    <define name="element-timing">
+        <element name="timing">
+            <attribute name="file"> <text /> </attribute>
+            <attribute name="duration"> <data type="double" /> </attribute>
+        </element>
+    </define>
+
+    <define name="element-rsc-action">
+        <element name="rsc_action">
+            <attribute name="action"> <text /> </attribute>
+            <attribute name="resource"> <text /> </attribute>
+            <optional>
+                <attribute name="blocked"> <data type="boolean" /> </attribute>
+            </optional>
+            <optional>
+                <attribute name="dest"> <text /> </attribute>
+            </optional>
+            <optional>
+                <attribute name="next-role"> <text /> </attribute>
+            </optional>
+            <optional>
+                <attribute name="node"> <text /> </attribute>
+            </optional>
+            <optional>
+                <attribute name="reason"> <text /> </attribute>
+            </optional>
+            <optional>
+                <attribute name="role"> <text /> </attribute>
+            </optional>
+            <optional>
+                <attribute name="source"> <text /> </attribute>
+            </optional>
+        </element>
+    </define>
+
+    <define name="element-utilization">
+        <element name="utilization">
+            <attribute name="function"> <text /> </attribute>
+            <attribute name="node"> <text /> </attribute>
+            <attribute name="resource"> <text /> </attribute>
+            <zeroOrMore>
+                <element>
+                    <anyName />
+                    <text />
+                </element>
+            </zeroOrMore>
+        </element>
+    </define>
+</grammar>
diff --git a/xml/api/nodes-2.29.rng b/xml/api/nodes-2.29.rng
new file mode 100644
index 0000000000..7dd1798914
--- /dev/null
+++ b/xml/api/nodes-2.29.rng
@@ -0,0 +1,54 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+         datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+
+    <start>
+        <ref name="element-full-node"/>
+    </start>
+
+    <define name="element-full-node">
+        <element name="node">
+            <attribute name="name"> <text/> </attribute>
+            <attribute name="id"> <text/> </attribute>
+            <attribute name="online"> <data type="boolean" /> </attribute>
+            <attribute name="standby"> <data type="boolean" /> </attribute>
+            <attribute name="standby_onfail"> <data type="boolean" /> </attribute>
+            <attribute name="maintenance"> <data type="boolean" /> </attribute>
+            <attribute name="pending"> <data type="boolean" /> </attribute>
+            <attribute name="unclean"> <data type="boolean" /> </attribute>
+            <optional>
+                <attribute name="health">
+                    <choice>
+                        <value>red</value>
+                        <value>yellow</value>
+                        <value>green</value>
+                    </choice>
+                </attribute>
+            </optional>
+            <optional>
+                <attribute name="feature_set"> <text/> </attribute>
+            </optional>
+            <attribute name="shutdown"> <data type="boolean" /> </attribute>
+            <attribute name="expected_up"> <data type="boolean" /> </attribute>
+            <attribute name="is_dc"> <data type="boolean" /> </attribute>
+            <attribute name="resources_running"> <data type="nonNegativeInteger" /> </attribute>
+            <attribute name="type">
+                <choice>
+                    <value>unknown</value>
+                    <value>member</value>
+                    <value>remote</value>
+                    <value>ping</value>
+                </choice>
+            </attribute>
+            <optional>
+                <!-- for virtualized pacemaker_remote nodes, crm_mon 1.1.13 uses
+                     "container_id" while later versions use "id_as_resource" -->
+                <choice>
+                    <attribute name="container_id"> <text/> </attribute>
+                    <attribute name="id_as_resource"> <text/> </attribute>
+                </choice>
+            </optional>
+            <externalRef href="resources-2.29.rng" />
+        </element>
+    </define>
+</grammar>
diff --git a/xml/api/resources-2.29.rng b/xml/api/resources-2.29.rng
new file mode 100644
index 0000000000..f4214a7c64
--- /dev/null
+++ b/xml/api/resources-2.29.rng
@@ -0,0 +1,152 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+         datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+
+    <start>
+        <ref name="element-resource-list"/>
+    </start>
+
+    <define name="element-resource-list">
+        <interleave>
+            <zeroOrMore>
+                <ref name="element-bundle" />
+            </zeroOrMore>
+            <zeroOrMore>
+                <ref name="element-clone" />
+            </zeroOrMore>
+            <zeroOrMore>
+                <ref name="element-group" />
+            </zeroOrMore>
+            <zeroOrMore>
+                <ref name="element-resource" />
+            </zeroOrMore>
+        </interleave>
+    </define>
+
+    <define name="element-bundle">
+        <element name="bundle">
+            <attribute name="id"> <text/> </attribute>
+            <attribute name="type">
+                <choice>
+                    <value>docker</value>
+                    <value>rkt</value>
+                    <value>podman</value>
+                </choice>
+            </attribute>
+            <attribute name="image"> <text/> </attribute>
+            <attribute name="unique"> <data type="boolean" /> </attribute>
+            <optional>
+                <attribute name="maintenance">
+                    <data type="boolean" />
+                </attribute>
+            </optional>
+            <optional>
+                <attribute name="description">
+                    <text/>
+                </attribute>
+            </optional>
+            <attribute name="managed"> <data type="boolean" /> </attribute>
+            <attribute name="failed"> <data type="boolean" /> </attribute>
+            <zeroOrMore>
+                <element name="replica">
+                    <attribute name="id"> <data type="nonNegativeInteger" /> </attribute>
+                    <zeroOrMore>
+                        <ref name="element-resource" />
+                    </zeroOrMore>
+                </element>
+            </zeroOrMore>
+        </element>
+    </define>
+
+    <define name="element-clone">
+        <element name="clone">
+            <attribute name="id"> <text/> </attribute>
+            <attribute name="multi_state"> <data type="boolean" /> </attribute>
+            <attribute name="unique"> <data type="boolean" /> </attribute>
+            <optional>
+                <attribute name="maintenance">
+                    <data type="boolean" />
+                </attribute>
+            </optional>
+            <optional>
+                <attribute name="description">
+                    <text/>
+                </attribute>
+            </optional>
+            <attribute name="managed"> <data type="boolean" /> </attribute>
+            <attribute name="disabled"> <data type="boolean" /> </attribute>
+            <attribute name="failed"> <data type="boolean" /> </attribute>
+            <attribute name="failure_ignored"> <data type="boolean" /> </attribute>
+            <optional>
+                <attribute name="target_role"> <text/> </attribute>
+            </optional>
+            <ref name="element-resource-list" />
+        </element>
+    </define>
+
+    <define name="element-group">
+        <element name="group">
+            <attribute name="id"> <text/> </attribute>
+            <attribute name="number_resources"> <data type="nonNegativeInteger" /> </attribute>
+            <optional>
+                <attribute name="maintenance">
+                    <data type="boolean" />
+                </attribute>
+            </optional>
+            <optional>
+                <attribute name="description">
+                    <text/>
+                </attribute>
+            </optional>
+            <attribute name="managed"> <data type="boolean" /> </attribute>
+            <attribute name="disabled"> <data type="boolean" /> </attribute>
+            <ref name="element-resource-list" />
+        </element>
+    </define>
+
+    <define name="element-resource">
+        <element name="resource">
+            <attribute name="id"> <text/> </attribute>
+            <attribute name="resource_agent"> <text/> </attribute>
+            <attribute name="role"> <text/> </attribute>
+            <optional>
+                <attribute name="target_role"> <text/> </attribute>
+            </optional>
+            <attribute name="active"> <data type="boolean" /> </attribute>
+            <attribute name="orphaned"> <data type="boolean" /> </attribute>
+            <optional>
+                <attribute name="blocked"> <data type="boolean" /> </attribute>
+            </optional>
+            <optional>
+                <attribute name="maintenance">
+                    <data type="boolean" />
+                </attribute>
+            </optional>
+            <optional>
+                <attribute name="description">
+                    <text/>
+                </attribute>
+            </optional>
+            <attribute name="failed"> <data type="boolean" /> </attribute>
+            <attribute name="managed"> <data type="boolean" /> </attribute>
+            <attribute name="failure_ignored"> <data type="boolean" /> </attribute>
+            <attribute name="nodes_running_on"> <data type="nonNegativeInteger" />  </attribute>
+            <optional>
+                <attribute name="pending"> <text/> </attribute>
+            </optional>
+            <optional>
+                <attribute name="locked_to"> <text/> </attribute>
+            </optional>
+            <zeroOrMore>
+                <element name="node">
+                    <attribute name="name"> <text/> </attribute>
+                    <attribute name="id"> <text/> </attribute>
+                    <attribute name="cached"> <data type="boolean" /> </attribute>
+                </element>
+            </zeroOrMore>
+            <optional>
+                <element name="xml"> <text/> </element>
+            </optional>
+        </element>
+    </define>
+</grammar>