diff --git a/cts/cli/regression.crm_mon.exp b/cts/cli/regression.crm_mon.exp
index e9f36ad2b7..cf7d28ec69 100644
--- a/cts/cli/regression.crm_mon.exp
+++ b/cts/cli/regression.crm_mon.exp
@@ -1,3118 +1,3118 @@
 =#=#=#= Begin test: Basic text output =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 27 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
 
 Active Resources:
   * Clone Set: ping-clone [ping]:
     * Started: [ cluster01 cluster02 ]
   * Fencing	(stonith:fence_xvm):	 Started cluster01
   * dummy	(ocf::pacemaker:Dummy):	 Started cluster02
   * Resource Group: exim-group:
     * Public-IP	(ocf::heartbeat:IPaddr):	 Started cluster02
     * Email	(lsb:exim):	 Started cluster02
   * Clone Set: mysql-clone-group [mysql-group]:
     * Started: [ cluster01 cluster02 ]
 =#=#=#= End test: Basic text output - OK (0) =#=#=#=
 * Passed: crm_mon        - Basic text output
 =#=#=#= Begin test: XML output =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" with_quorum="true"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="27" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <clone id="ping-clone" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
     </clone>
     <resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
       <node name="cluster01" id="1" cached="true"/>
     </resource>
     <resource id="dummy" resource_agent="ocf::pacemaker:Dummy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
       <node name="cluster02" id="2" cached="true"/>
     </resource>
     <clone id="inactive-clone" multi_state="false" unique="false" managed="true" disabled="true" failed="false" failure_ignored="false" target_role="stopped">
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
     <group id="inactive-group" number_resources="2" managed="true" disabled="true">
       <resource id="inactive-dummy-1" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dummy-2" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </group>
     <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
       <replica id="0">
         <resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-docker-0" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-0" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </replica>
       <replica id="1">
         <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-docker-1" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-1" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </replica>
       <replica id="2">
         <resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-docker-2" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-2" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </replica>
     </bundle>
     <group id="exim-group" number_resources="2" managed="true" disabled="false">
       <resource id="Public-IP" resource_agent="ocf::heartbeat:IPaddr" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="Email" resource_agent="lsb:exim" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
     </group>
     <clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <group id="mysql-group:0" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:1" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:2" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:3" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:4" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
     </clone>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="11" task="start" rc="0" rc_text="ok" exec-time="2044ms" queue-time="0ms"/>
         <operation_history call="12" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2031ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="dummy" orphan="false" migration-threshold="1000000">
         <operation_history call="18" task="start" rc="0" rc_text="ok" exec-time="6020ms" queue-time="0ms"/>
         <operation_history call="19" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="6015ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Public-IP" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Email" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="17" task="start" rc="0" rc_text="ok" exec-time="2038ms" queue-time="0ms"/>
         <operation_history call="18" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2034ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Fencing" orphan="false" migration-threshold="1000000">
         <operation_history call="15" task="start" rc="0" rc_text="ok" exec-time="36ms" queue-time="0ms"/>
         <operation_history call="19" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="24ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="dummy" orphan="false" migration-threshold="1000000">
         <operation_history call="16" task="stop" rc="0" rc_text="ok" exec-time="6048ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <bans>
     <ban id="not-on-cluster1" resource="dummy" node="cluster01" weight="-1000000" master_only="false"/>
   </bans>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output
 =#=#=#= Begin test: Basic text output without node section =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 27 resource instances configured (4 DISABLED)
 
 Active Resources:
   * Clone Set: ping-clone [ping]:
     * Started: [ cluster01 cluster02 ]
   * Fencing	(stonith:fence_xvm):	 Started cluster01
   * dummy	(ocf::pacemaker:Dummy):	 Started cluster02
   * Resource Group: exim-group:
     * Public-IP	(ocf::heartbeat:IPaddr):	 Started cluster02
     * Email	(lsb:exim):	 Started cluster02
   * Clone Set: mysql-clone-group [mysql-group]:
     * Started: [ cluster01 cluster02 ]
 =#=#=#= End test: Basic text output without node section - OK (0) =#=#=#=
 * Passed: crm_mon        - Basic text output without node section
 =#=#=#= Begin test: XML output without the node section =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --exclude=nodes">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" with_quorum="true"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="27" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
   </summary>
   <resources>
     <clone id="ping-clone" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
     </clone>
     <resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
       <node name="cluster01" id="1" cached="true"/>
     </resource>
     <resource id="dummy" resource_agent="ocf::pacemaker:Dummy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
       <node name="cluster02" id="2" cached="true"/>
     </resource>
     <clone id="inactive-clone" multi_state="false" unique="false" managed="true" disabled="true" failed="false" failure_ignored="false" target_role="stopped">
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
     <group id="inactive-group" number_resources="2" managed="true" disabled="true">
       <resource id="inactive-dummy-1" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dummy-2" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </group>
     <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
       <replica id="0">
         <resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-docker-0" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-0" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </replica>
       <replica id="1">
         <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-docker-1" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-1" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </replica>
       <replica id="2">
         <resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-docker-2" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-2" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </replica>
     </bundle>
     <group id="exim-group" number_resources="2" managed="true" disabled="false">
       <resource id="Public-IP" resource_agent="ocf::heartbeat:IPaddr" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="Email" resource_agent="lsb:exim" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
     </group>
     <clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <group id="mysql-group:0" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:1" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:2" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:3" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:4" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
     </clone>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="11" task="start" rc="0" rc_text="ok" exec-time="2044ms" queue-time="0ms"/>
         <operation_history call="12" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2031ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="dummy" orphan="false" migration-threshold="1000000">
         <operation_history call="18" task="start" rc="0" rc_text="ok" exec-time="6020ms" queue-time="0ms"/>
         <operation_history call="19" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="6015ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Public-IP" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Email" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="17" task="start" rc="0" rc_text="ok" exec-time="2038ms" queue-time="0ms"/>
         <operation_history call="18" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2034ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Fencing" orphan="false" migration-threshold="1000000">
         <operation_history call="15" task="start" rc="0" rc_text="ok" exec-time="36ms" queue-time="0ms"/>
         <operation_history call="19" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="24ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="dummy" orphan="false" migration-threshold="1000000">
         <operation_history call="16" task="stop" rc="0" rc_text="ok" exec-time="6048ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <bans>
     <ban id="not-on-cluster1" resource="dummy" node="cluster01" weight="-1000000" master_only="false"/>
   </bans>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output without the node section - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output without the node section
 =#=#=#= Begin test: Text output with only the node section =#=#=#=
 Node List:
   * Online: [ cluster01 cluster02 ]
 =#=#=#= End test: Text output with only the node section - OK (0) =#=#=#=
 * Passed: crm_mon        - Text output with only the node section
 =#=#=#= Begin test: Complete text output =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 27 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
 
 Active Resources:
   * Clone Set: ping-clone [ping]:
     * Started: [ cluster01 cluster02 ]
   * Fencing	(stonith:fence_xvm):	 Started cluster01
   * dummy	(ocf::pacemaker:Dummy):	 Started cluster02
   * Resource Group: exim-group:
     * Public-IP	(ocf::heartbeat:IPaddr):	 Started cluster02
     * Email	(lsb:exim):	 Started cluster02
   * Clone Set: mysql-clone-group [mysql-group]:
     * Started: [ cluster01 cluster02 ]
 
 Node Attributes:
   * Node: cluster01:
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02:
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02:
     * ping: migration-threshold=1000000:
       * (11) start
       * (12) monitor: interval="10000ms"
     * dummy: migration-threshold=1000000:
       * (18) start
       * (19) monitor: interval="60000ms"
     * Public-IP: migration-threshold=1000000:
       * (2) start
     * Email: migration-threshold=1000000:
       * (2) start
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
   * Node: cluster01:
     * ping: migration-threshold=1000000:
       * (17) start
       * (18) monitor: interval="10000ms"
     * Fencing: migration-threshold=1000000:
       * (15) start
       * (19) monitor: interval="60000ms"
     * dummy: migration-threshold=1000000:
       * (16) stop
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
 
 Negative Location Constraints:
   * not-on-cluster1	prevents dummy from running on cluster01
 =#=#=#= End test: Complete text output - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete text output
 =#=#=#= Begin test: Complete text output with detail =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (2) (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 27 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 (1) cluster02 (2) ]
 
 Active Resources:
   * Clone Set: ping-clone [ping]:
     * ping	(ocf::pacemaker:ping):	 Started cluster02
     * ping	(ocf::pacemaker:ping):	 Started cluster01
   * Fencing	(stonith:fence_xvm):	 Started cluster01
   * dummy	(ocf::pacemaker:Dummy):	 Started cluster02
   * Resource Group: exim-group:
     * Public-IP	(ocf::heartbeat:IPaddr):	 Started cluster02
     * Email	(lsb:exim):	 Started cluster02
   * Clone Set: mysql-clone-group [mysql-group]:
     * Resource Group: mysql-group:0:
       * mysql-proxy	(lsb:mysql-proxy):	 Started cluster02
     * Resource Group: mysql-group:1:
       * mysql-proxy	(lsb:mysql-proxy):	 Started cluster01
     * Resource Group: mysql-group:2:
       * mysql-proxy	(lsb:mysql-proxy):	 Stopped
     * Resource Group: mysql-group:3:
       * mysql-proxy	(lsb:mysql-proxy):	 Stopped
     * Resource Group: mysql-group:4:
       * mysql-proxy	(lsb:mysql-proxy):	 Stopped
 
 Node Attributes:
   * Node: cluster01 (1):
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02 (2):
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02 (2):
     * ping: migration-threshold=1000000:
       * (11) start
       * (12) monitor: interval="10000ms"
     * dummy: migration-threshold=1000000:
       * (18) start
       * (19) monitor: interval="60000ms"
     * Public-IP: migration-threshold=1000000:
       * (2) start
     * Email: migration-threshold=1000000:
       * (2) start
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
   * Node: cluster01 (1):
     * ping: migration-threshold=1000000:
       * (17) start
       * (18) monitor: interval="10000ms"
     * Fencing: migration-threshold=1000000:
       * (15) start
       * (19) monitor: interval="60000ms"
     * dummy: migration-threshold=1000000:
       * (16) stop
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
 
 Negative Location Constraints:
   * not-on-cluster1	prevents dummy from running on cluster01 (1)
 =#=#=#= End test: Complete text output with detail - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete text output with detail
 =#=#=#= Begin test: Complete brief text output =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 27 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
 
 Active Resources:
-  *  1	(ocf::pacemaker:Dummy):	Active cluster02
-  *  1	(stonith:fence_xvm):	Active cluster01
+  * 1	(ocf::pacemaker:Dummy):	Active cluster02
+  * 1	(stonith:fence_xvm):	Active cluster01
   * Clone Set: ping-clone [ping]:
     * Started: [ cluster01 cluster02 ]
   * Resource Group: exim-group:
-    *  1/1	(lsb:exim):	Active cluster02
-    *  1/1	(ocf::heartbeat:IPaddr):	Active cluster02
+    * 1/1	(lsb:exim):	Active cluster02
+    * 1/1	(ocf::heartbeat:IPaddr):	Active cluster02
   * Clone Set: mysql-clone-group [mysql-group]:
     * Started: [ cluster01 cluster02 ]
 
 Node Attributes:
   * Node: cluster01:
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02:
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02:
     * ping: migration-threshold=1000000:
       * (11) start
       * (12) monitor: interval="10000ms"
     * dummy: migration-threshold=1000000:
       * (18) start
       * (19) monitor: interval="60000ms"
     * Public-IP: migration-threshold=1000000:
       * (2) start
     * Email: migration-threshold=1000000:
       * (2) start
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
   * Node: cluster01:
     * ping: migration-threshold=1000000:
       * (17) start
       * (18) monitor: interval="10000ms"
     * Fencing: migration-threshold=1000000:
       * (15) start
       * (19) monitor: interval="60000ms"
     * dummy: migration-threshold=1000000:
       * (16) stop
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
 
 Negative Location Constraints:
   * not-on-cluster1	prevents dummy from running on cluster01
 =#=#=#= End test: Complete brief text output - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete brief text output
 =#=#=#= Begin test: Complete text output grouped by node =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 27 resource instances configured (4 DISABLED)
 
 Node List:
   * Node cluster01: online:
     * Resources:
       * ping	(ocf::pacemaker:ping):	 Started
       * Fencing	(stonith:fence_xvm):	 Started
       * mysql-proxy	(lsb:mysql-proxy):	 Started
   * Node cluster02: online:
     * Resources:
       * ping	(ocf::pacemaker:ping):	 Started
       * dummy	(ocf::pacemaker:Dummy):	 Started
       * Public-IP	(ocf::heartbeat:IPaddr):	 Started
       * Email	(lsb:exim):	 Started
       * mysql-proxy	(lsb:mysql-proxy):	 Started
   * GuestNode httpd-bundle-0@: OFFLINE:
     * Resources:
   * GuestNode httpd-bundle-1@: OFFLINE:
     * Resources:
   * GuestNode httpd-bundle-2@: OFFLINE:
     * Resources:
 
 Node Attributes:
   * Node: cluster01:
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02:
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02:
     * ping: migration-threshold=1000000:
       * (11) start
       * (12) monitor: interval="10000ms"
     * dummy: migration-threshold=1000000:
       * (18) start
       * (19) monitor: interval="60000ms"
     * Public-IP: migration-threshold=1000000:
       * (2) start
     * Email: migration-threshold=1000000:
       * (2) start
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
   * Node: cluster01:
     * ping: migration-threshold=1000000:
       * (17) start
       * (18) monitor: interval="10000ms"
     * Fencing: migration-threshold=1000000:
       * (15) start
       * (19) monitor: interval="60000ms"
     * dummy: migration-threshold=1000000:
       * (16) stop
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
 
 Negative Location Constraints:
   * not-on-cluster1	prevents dummy from running on cluster01
 =#=#=#= End test: Complete text output grouped by node - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete text output grouped by node
 =#=#=#= Begin test: Complete brief text output grouped by node =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 27 resource instances configured (4 DISABLED)
 
 Node List:
   * Node cluster01: online:
     * Resources:
-      *  1	(lsb:mysql-proxy):	Active 
-      *  1	(ocf::pacemaker:ping):	Active 
-      *  1	(stonith:fence_xvm):	Active 
+      * 1	(lsb:mysql-proxy):	Active 
+      * 1	(ocf::pacemaker:ping):	Active 
+      * 1	(stonith:fence_xvm):	Active 
   * Node cluster02: online:
     * Resources:
-      *  1	(lsb:exim):	Active 
-      *  1	(lsb:mysql-proxy):	Active 
-      *  1	(ocf::heartbeat:IPaddr):	Active 
-      *  1	(ocf::pacemaker:Dummy):	Active 
-      *  1	(ocf::pacemaker:ping):	Active 
+      * 1	(lsb:exim):	Active 
+      * 1	(lsb:mysql-proxy):	Active 
+      * 1	(ocf::heartbeat:IPaddr):	Active 
+      * 1	(ocf::pacemaker:Dummy):	Active 
+      * 1	(ocf::pacemaker:ping):	Active 
 
 Node Attributes:
   * Node: cluster01:
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02:
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02:
     * ping: migration-threshold=1000000:
       * (11) start
       * (12) monitor: interval="10000ms"
     * dummy: migration-threshold=1000000:
       * (18) start
       * (19) monitor: interval="60000ms"
     * Public-IP: migration-threshold=1000000:
       * (2) start
     * Email: migration-threshold=1000000:
       * (2) start
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
   * Node: cluster01:
     * ping: migration-threshold=1000000:
       * (17) start
       * (18) monitor: interval="10000ms"
     * Fencing: migration-threshold=1000000:
       * (15) start
       * (19) monitor: interval="60000ms"
     * dummy: migration-threshold=1000000:
       * (16) stop
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
 
 Negative Location Constraints:
   * not-on-cluster1	prevents dummy from running on cluster01
 =#=#=#= End test: Complete brief text output grouped by node - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete brief text output grouped by node
 =#=#=#= Begin test: XML output grouped by node =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon -1 --output-as=xml --group-by-node">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" with_quorum="true"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="27" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member">
       <resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
       <resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
       <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
     </node>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member">
       <resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="dummy" resource_agent="ocf::pacemaker:Dummy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="Public-IP" resource_agent="ocf::heartbeat:IPaddr" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="Email" resource_agent="lsb:exim" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
     </node>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <clone id="inactive-clone" multi_state="false" unique="false" managed="true" disabled="true" failed="false" failure_ignored="false" target_role="stopped">
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
     <group id="inactive-group" number_resources="2" managed="true" disabled="true">
       <resource id="inactive-dummy-1" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dummy-2" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </group>
     <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
       <replica id="0">
         <resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-docker-0" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-0" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </replica>
       <replica id="1">
         <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-docker-1" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-1" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </replica>
       <replica id="2">
         <resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-docker-2" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-2" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </replica>
     </bundle>
     <clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <group id="mysql-group:0" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:1" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:2" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:3" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:4" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
     </clone>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="11" task="start" rc="0" rc_text="ok" exec-time="2044ms" queue-time="0ms"/>
         <operation_history call="12" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2031ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="dummy" orphan="false" migration-threshold="1000000">
         <operation_history call="18" task="start" rc="0" rc_text="ok" exec-time="6020ms" queue-time="0ms"/>
         <operation_history call="19" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="6015ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Public-IP" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Email" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="17" task="start" rc="0" rc_text="ok" exec-time="2038ms" queue-time="0ms"/>
         <operation_history call="18" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2034ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Fencing" orphan="false" migration-threshold="1000000">
         <operation_history call="15" task="start" rc="0" rc_text="ok" exec-time="36ms" queue-time="0ms"/>
         <operation_history call="19" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="24ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="dummy" orphan="false" migration-threshold="1000000">
         <operation_history call="16" task="stop" rc="0" rc_text="ok" exec-time="6048ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <bans>
     <ban id="not-on-cluster1" resource="dummy" node="cluster01" weight="-1000000" master_only="false"/>
   </bans>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output grouped by node - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output grouped by node
 =#=#=#= Begin test: Complete text output filtered by node =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 27 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 ]
 
 Active Resources:
   * Clone Set: ping-clone [ping]:
     * Started: [ cluster01 ]
   * Fencing	(stonith:fence_xvm):	 Started cluster01
   * Clone Set: mysql-clone-group [mysql-group]:
     * Started: [ cluster01 ]
 
 Node Attributes:
   * Node: cluster01:
     * location                        	: office    
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster01:
     * ping: migration-threshold=1000000:
       * (17) start
       * (18) monitor: interval="10000ms"
     * Fencing: migration-threshold=1000000:
       * (15) start
       * (19) monitor: interval="60000ms"
     * dummy: migration-threshold=1000000:
       * (16) stop
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
 
 Negative Location Constraints:
   * not-on-cluster1	prevents dummy from running on cluster01
 =#=#=#= End test: Complete text output filtered by node - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete text output filtered by node
 =#=#=#= Begin test: XML output filtered by node =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as xml --include=all --node=cluster01">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" with_quorum="true"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="27" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
   </nodes>
   <resources>
     <clone id="ping-clone" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
     </clone>
     <resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
       <node name="cluster01" id="1" cached="true"/>
     </resource>
     <clone id="inactive-clone" multi_state="false" unique="false" managed="true" disabled="true" failed="false" failure_ignored="false" target_role="stopped">
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
     <group id="inactive-group" number_resources="2" managed="true" disabled="true">
       <resource id="inactive-dummy-1" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dummy-2" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </group>
     <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
       <replica id="0">
         <resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-docker-0" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-0" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </replica>
       <replica id="1">
         <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-docker-1" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-1" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </replica>
       <replica id="2">
         <resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-docker-2" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-2" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </replica>
     </bundle>
     <clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <group id="mysql-group:1" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:2" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:3" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:4" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
     </clone>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster01">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="17" task="start" rc="0" rc_text="ok" exec-time="2038ms" queue-time="0ms"/>
         <operation_history call="18" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2034ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Fencing" orphan="false" migration-threshold="1000000">
         <operation_history call="15" task="start" rc="0" rc_text="ok" exec-time="36ms" queue-time="0ms"/>
         <operation_history call="19" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="24ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="dummy" orphan="false" migration-threshold="1000000">
         <operation_history call="16" task="stop" rc="0" rc_text="ok" exec-time="6048ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <bans>
     <ban id="not-on-cluster1" resource="dummy" node="cluster01" weight="-1000000" master_only="false"/>
   </bans>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output filtered by node - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output filtered by node
 =#=#=#= Begin test: Complete text output filtered by tag =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 27 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster02 ]
 
 Active Resources:
   * Clone Set: ping-clone [ping]:
     * Started: [ cluster02 ]
   * dummy	(ocf::pacemaker:Dummy):	 Started cluster02
   * Resource Group: exim-group:
     * Public-IP	(ocf::heartbeat:IPaddr):	 Started cluster02
     * Email	(lsb:exim):	 Started cluster02
   * Clone Set: mysql-clone-group [mysql-group]:
     * Started: [ cluster02 ]
 
 Node Attributes:
   * Node: cluster02:
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02:
     * ping: migration-threshold=1000000:
       * (11) start
       * (12) monitor: interval="10000ms"
     * dummy: migration-threshold=1000000:
       * (18) start
       * (19) monitor: interval="60000ms"
     * Public-IP: migration-threshold=1000000:
       * (2) start
     * Email: migration-threshold=1000000:
       * (2) start
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
 
 Negative Location Constraints:
   * not-on-cluster1	prevents dummy from running on cluster01
 =#=#=#= End test: Complete text output filtered by tag - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete text output filtered by tag
 =#=#=#= Begin test: XML output filtered by tag =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --include=all --node=even-nodes">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" with_quorum="true"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="27" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
   </summary>
   <nodes>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
   </nodes>
   <resources>
     <clone id="ping-clone" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
     </clone>
     <resource id="dummy" resource_agent="ocf::pacemaker:Dummy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
       <node name="cluster02" id="2" cached="true"/>
     </resource>
     <clone id="inactive-clone" multi_state="false" unique="false" managed="true" disabled="true" failed="false" failure_ignored="false" target_role="stopped">
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
     <group id="inactive-group" number_resources="2" managed="true" disabled="true">
       <resource id="inactive-dummy-1" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dummy-2" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </group>
     <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
       <replica id="0">
         <resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-docker-0" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-0" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </replica>
       <replica id="1">
         <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-docker-1" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-1" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </replica>
       <replica id="2">
         <resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-docker-2" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-2" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </replica>
     </bundle>
     <group id="exim-group" number_resources="2" managed="true" disabled="false">
       <resource id="Public-IP" resource_agent="ocf::heartbeat:IPaddr" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="Email" resource_agent="lsb:exim" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
     </group>
     <clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <group id="mysql-group:0" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:2" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:3" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:4" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
     </clone>
   </resources>
   <node_attributes>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="11" task="start" rc="0" rc_text="ok" exec-time="2044ms" queue-time="0ms"/>
         <operation_history call="12" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2031ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="dummy" orphan="false" migration-threshold="1000000">
         <operation_history call="18" task="start" rc="0" rc_text="ok" exec-time="6020ms" queue-time="0ms"/>
         <operation_history call="19" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="6015ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Public-IP" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Email" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <bans>
     <ban id="not-on-cluster1" resource="dummy" node="cluster01" weight="-1000000" master_only="false"/>
   </bans>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output filtered by tag - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output filtered by tag
 =#=#=#= Begin test: Complete text output filtered by resource tag =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 27 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
 
 Active Resources:
   * Fencing	(stonith:fence_xvm):	 Started cluster01
 
 Node Attributes:
   * Node: cluster01:
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02:
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster01:
     * Fencing: migration-threshold=1000000:
       * (15) start
       * (19) monitor: interval="60000ms"
 =#=#=#= End test: Complete text output filtered by resource tag - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete text output filtered by resource tag
 =#=#=#= Begin test: XML output filtered by resource tag =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --include=all --resource=fencing-rscs">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" with_quorum="true"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="27" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
       <node name="cluster01" id="1" cached="true"/>
     </resource>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster01">
       <resource_history id="Fencing" orphan="false" migration-threshold="1000000">
         <operation_history call="15" task="start" rc="0" rc_text="ok" exec-time="36ms" queue-time="0ms"/>
         <operation_history call="19" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="24ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output filtered by resource tag - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output filtered by resource tag
 =#=#=#= Begin test: Basic text output filtered by node that doesn't exist =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 27 resource instances configured (4 DISABLED)
 
 Active Resources:
   * No active resources
 =#=#=#= End test: Basic text output filtered by node that doesn't exist - OK (0) =#=#=#=
 * Passed: crm_mon        - Basic text output filtered by node that doesn't exist
 =#=#=#= Begin test: XML output filtered by node that doesn't exist =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --node=blah">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" with_quorum="true"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="27" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
   </summary>
   <nodes/>
   <resources>
     <clone id="inactive-clone" multi_state="false" unique="false" managed="true" disabled="true" failed="false" failure_ignored="false" target_role="stopped">
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
     <group id="inactive-group" number_resources="2" managed="true" disabled="true">
       <resource id="inactive-dummy-1" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dummy-2" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </group>
     <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
       <replica id="0">
         <resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-docker-0" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-0" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </replica>
       <replica id="1">
         <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-docker-1" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-1" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </replica>
       <replica id="2">
         <resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-docker-2" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-2" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </replica>
     </bundle>
   </resources>
   <bans>
     <ban id="not-on-cluster1" resource="dummy" node="cluster01" weight="-1000000" master_only="false"/>
   </bans>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output filtered by node that doesn't exist - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output filtered by node that doesn't exist
 =#=#=#= Begin test: Basic text output with inactive resources =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 27 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
 
 Full List of Resources:
   * Clone Set: ping-clone [ping]:
     * Started: [ cluster01 cluster02 ]
   * Fencing	(stonith:fence_xvm):	 Started cluster01
   * dummy	(ocf::pacemaker:Dummy):	 Started cluster02
   * Clone Set: inactive-clone [inactive-dhcpd] (disabled):
     * Stopped (disabled): [ cluster01 cluster02 ]
   * Resource Group: inactive-group (disabled):
     * inactive-dummy-1	(ocf::pacemaker:Dummy):	 Stopped (disabled)
     * inactive-dummy-2	(ocf::pacemaker:Dummy):	 Stopped (disabled)
   * Container bundle set: httpd-bundle [pcmk:http]:
     * httpd-bundle-0 (192.168.122.131)	(ocf::heartbeat:apache):	 Stopped
     * httpd-bundle-1 (192.168.122.132)	(ocf::heartbeat:apache):	 Stopped
     * httpd-bundle-2 (192.168.122.133)	(ocf::heartbeat:apache):	 Stopped
   * Resource Group: exim-group:
     * Public-IP	(ocf::heartbeat:IPaddr):	 Started cluster02
     * Email	(lsb:exim):	 Started cluster02
   * Clone Set: mysql-clone-group [mysql-group]:
     * Started: [ cluster01 cluster02 ]
 =#=#=#= End test: Basic text output with inactive resources - OK (0) =#=#=#=
 * Passed: crm_mon        - Basic text output with inactive resources
 =#=#=#= Begin test: Basic text output with inactive resources, filtered by node =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 27 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster02 ]
 
 Full List of Resources:
   * Clone Set: ping-clone [ping]:
     * Started: [ cluster02 ]
   * dummy	(ocf::pacemaker:Dummy):	 Started cluster02
   * Clone Set: inactive-clone [inactive-dhcpd] (disabled):
     * Stopped (disabled): [ cluster02 ]
   * Resource Group: inactive-group (disabled):
     * inactive-dummy-1	(ocf::pacemaker:Dummy):	 Stopped (disabled)
     * inactive-dummy-2	(ocf::pacemaker:Dummy):	 Stopped (disabled)
   * Container bundle set: httpd-bundle [pcmk:http]:
     * httpd-bundle-0 (192.168.122.131)	(ocf::heartbeat:apache):	 Stopped
     * httpd-bundle-1 (192.168.122.132)	(ocf::heartbeat:apache):	 Stopped
     * httpd-bundle-2 (192.168.122.133)	(ocf::heartbeat:apache):	 Stopped
   * Resource Group: exim-group:
     * Public-IP	(ocf::heartbeat:IPaddr):	 Started cluster02
     * Email	(lsb:exim):	 Started cluster02
   * Clone Set: mysql-clone-group [mysql-group]:
     * Started: [ cluster02 ]
 =#=#=#= End test: Basic text output with inactive resources, filtered by node - OK (0) =#=#=#=
 * Passed: crm_mon        - Basic text output with inactive resources, filtered by node
 =#=#=#= Begin test: Complete text output filtered by primitive resource =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 27 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
 
 Active Resources:
   * Fencing	(stonith:fence_xvm):	 Started cluster01
 
 Node Attributes:
   * Node: cluster01:
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02:
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster01:
     * Fencing: migration-threshold=1000000:
       * (15) start
       * (19) monitor: interval="60000ms"
 =#=#=#= End test: Complete text output filtered by primitive resource - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete text output filtered by primitive resource
 =#=#=#= Begin test: XML output filtered by primitive resource =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=Fencing">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" with_quorum="true"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="27" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
       <node name="cluster01" id="1" cached="true"/>
     </resource>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster01">
       <resource_history id="Fencing" orphan="false" migration-threshold="1000000">
         <operation_history call="15" task="start" rc="0" rc_text="ok" exec-time="36ms" queue-time="0ms"/>
         <operation_history call="19" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="24ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output filtered by primitive resource - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output filtered by primitive resource
 =#=#=#= Begin test: Complete text output filtered by group resource =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 27 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
 
 Active Resources:
   * Resource Group: exim-group:
     * Public-IP	(ocf::heartbeat:IPaddr):	 Started cluster02
     * Email	(lsb:exim):	 Started cluster02
 
 Node Attributes:
   * Node: cluster01:
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02:
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02:
     * Public-IP: migration-threshold=1000000:
       * (2) start
     * Email: migration-threshold=1000000:
       * (2) start
 =#=#=#= End test: Complete text output filtered by group resource - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete text output filtered by group resource
 =#=#=#= Begin test: XML output filtered by group resource =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=exim-group">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" with_quorum="true"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="27" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <group id="exim-group" number_resources="2" managed="true" disabled="false">
       <resource id="Public-IP" resource_agent="ocf::heartbeat:IPaddr" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="Email" resource_agent="lsb:exim" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
     </group>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="Public-IP" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Email" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output filtered by group resource - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output filtered by group resource
 =#=#=#= Begin test: Complete text output filtered by group resource member =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 27 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
 
 Active Resources:
   * Resource Group: exim-group:
     * Public-IP	(ocf::heartbeat:IPaddr):	 Started cluster02
 
 Node Attributes:
   * Node: cluster01:
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02:
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02:
     * Public-IP: migration-threshold=1000000:
       * (2) start
 =#=#=#= End test: Complete text output filtered by group resource member - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete text output filtered by group resource member
 =#=#=#= Begin test: XML output filtered by group resource member =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=Email">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" with_quorum="true"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="27" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <group id="exim-group" number_resources="2" managed="true" disabled="false">
       <resource id="Email" resource_agent="lsb:exim" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
     </group>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="Email" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output filtered by group resource member - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output filtered by group resource member
 =#=#=#= Begin test: Complete text output filtered by clone resource =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 27 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
 
 Active Resources:
   * Clone Set: ping-clone [ping]:
     * Started: [ cluster01 cluster02 ]
 
 Node Attributes:
   * Node: cluster01:
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02:
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02:
     * ping: migration-threshold=1000000:
       * (11) start
       * (12) monitor: interval="10000ms"
   * Node: cluster01:
     * ping: migration-threshold=1000000:
       * (17) start
       * (18) monitor: interval="10000ms"
 =#=#=#= End test: Complete text output filtered by clone resource - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete text output filtered by clone resource
 =#=#=#= Begin test: XML output filtered by clone resource =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=ping-clone">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" with_quorum="true"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="27" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <clone id="ping-clone" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
     </clone>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="11" task="start" rc="0" rc_text="ok" exec-time="2044ms" queue-time="0ms"/>
         <operation_history call="12" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2031ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="17" task="start" rc="0" rc_text="ok" exec-time="2038ms" queue-time="0ms"/>
         <operation_history call="18" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2034ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output filtered by clone resource - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output filtered by clone resource
 =#=#=#= Begin test: Complete text output filtered by clone resource instance =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 27 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
 
 Active Resources:
   * Clone Set: ping-clone [ping]:
     * Started: [ cluster01 cluster02 ]
 
 Node Attributes:
   * Node: cluster01:
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02:
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02:
     * ping: migration-threshold=1000000:
       * (11) start
       * (12) monitor: interval="10000ms"
   * Node: cluster01:
     * ping: migration-threshold=1000000:
       * (17) start
       * (18) monitor: interval="10000ms"
 =#=#=#= End test: Complete text output filtered by clone resource instance - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete text output filtered by clone resource instance
 =#=#=#= Begin test: XML output filtered by clone resource instance =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=ping">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" with_quorum="true"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="27" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <clone id="ping-clone" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
     </clone>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="11" task="start" rc="0" rc_text="ok" exec-time="2044ms" queue-time="0ms"/>
         <operation_history call="12" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2031ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="17" task="start" rc="0" rc_text="ok" exec-time="2038ms" queue-time="0ms"/>
         <operation_history call="18" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2034ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output filtered by clone resource instance - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output filtered by clone resource instance
 =#=#=#= Begin test: Complete text output filtered by exact clone resource instance =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (2) (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 27 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 (1) cluster02 (2) ]
 
 Active Resources:
   * Clone Set: ping-clone [ping]:
     * ping	(ocf::pacemaker:ping):	 Started cluster02
 
 Node Attributes:
   * Node: cluster01 (1):
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02 (2):
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02 (2):
     * ping: migration-threshold=1000000:
       * (11) start
       * (12) monitor: interval="10000ms"
   * Node: cluster01 (1):
     * ping: migration-threshold=1000000:
       * (17) start
       * (18) monitor: interval="10000ms"
 =#=#=#= End test: Complete text output filtered by exact clone resource instance - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete text output filtered by exact clone resource instance
 =#=#=#= Begin test: XML output filtered by exact clone resource instance =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=ping:1">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" with_quorum="true"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="27" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <clone id="ping-clone" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
     </clone>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="11" task="start" rc="0" rc_text="ok" exec-time="2044ms" queue-time="0ms"/>
         <operation_history call="12" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2031ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="17" task="start" rc="0" rc_text="ok" exec-time="2038ms" queue-time="0ms"/>
         <operation_history call="18" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2034ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output filtered by exact clone resource instance - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output filtered by exact clone resource instance
 =#=#=#= Begin test: Basic text output filtered by resource that doesn't exist =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 27 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
 
 Active Resources:
   * No active resources
 =#=#=#= End test: Basic text output filtered by resource that doesn't exist - OK (0) =#=#=#=
 * Passed: crm_mon        - Basic text output filtered by resource that doesn't exist
 =#=#=#= Begin test: XML output filtered by resource that doesn't exist =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=blah">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" with_quorum="true"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="27" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources/>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history/>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output filtered by resource that doesn't exist - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output filtered by resource that doesn't exist
 =#=#=#= Begin test: Basic text output with inactive resources, filtered by tag =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 27 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
 
 Full List of Resources:
   * Clone Set: inactive-clone [inactive-dhcpd] (disabled):
     * Stopped (disabled): [ cluster01 cluster02 ]
   * Resource Group: inactive-group (disabled):
     * inactive-dummy-1	(ocf::pacemaker:Dummy):	 Stopped (disabled)
     * inactive-dummy-2	(ocf::pacemaker:Dummy):	 Stopped (disabled)
 =#=#=#= End test: Basic text output with inactive resources, filtered by tag - OK (0) =#=#=#=
 * Passed: crm_mon        - Basic text output with inactive resources, filtered by tag
 =#=#=#= Begin test: Basic text output with inactive resources, filtered by bundle resource =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 27 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
 
 Full List of Resources:
   * Container bundle set: httpd-bundle [pcmk:http]:
     * httpd-bundle-0 (192.168.122.131)	(ocf::heartbeat:apache):	 Stopped
     * httpd-bundle-1 (192.168.122.132)	(ocf::heartbeat:apache):	 Stopped
     * httpd-bundle-2 (192.168.122.133)	(ocf::heartbeat:apache):	 Stopped
 =#=#=#= End test: Basic text output with inactive resources, filtered by bundle resource - OK (0) =#=#=#=
 * Passed: crm_mon        - Basic text output with inactive resources, filtered by bundle resource
 =#=#=#= Begin test: XML output filtered by inactive bundle resource =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=httpd-bundle">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" with_quorum="true"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="27" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
       <replica id="0">
         <resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-docker-0" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-0" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </replica>
       <replica id="1">
         <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-docker-1" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-1" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </replica>
       <replica id="2">
         <resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-docker-2" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-2" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </replica>
     </bundle>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history/>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output filtered by inactive bundle resource - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output filtered by inactive bundle resource
 =#=#=#= Begin test: Basic text output with inactive resources, filtered by bundled IP address resource =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 27 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
 
 Full List of Resources:
   * Container bundle set: httpd-bundle [pcmk:http]:
     * Replica[0]
       * httpd-bundle-ip-192.168.122.131	(ocf::heartbeat:IPaddr2):	 Stopped
 =#=#=#= End test: Basic text output with inactive resources, filtered by bundled IP address resource - OK (0) =#=#=#=
 * Passed: crm_mon        - Basic text output with inactive resources, filtered by bundled IP address resource
 =#=#=#= Begin test: XML output filtered by bundled IP address resource =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=httpd-bundle-ip-192.168.122.132">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" with_quorum="true"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="27" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
       <replica id="1">
         <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </replica>
     </bundle>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history/>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output filtered by bundled IP address resource - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output filtered by bundled IP address resource
 =#=#=#= Begin test: Basic text output with inactive resources, filtered by bundled container =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 27 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
 
 Full List of Resources:
   * Container bundle set: httpd-bundle [pcmk:http]:
     * Replica[1]
       * httpd-bundle-docker-1	(ocf::heartbeat:docker):	 Stopped
 =#=#=#= End test: Basic text output with inactive resources, filtered by bundled container - OK (0) =#=#=#=
 * Passed: crm_mon        - Basic text output with inactive resources, filtered by bundled container
 =#=#=#= Begin test: XML output filtered by bundled container =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=httpd-bundle-docker-2">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" with_quorum="true"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="27" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
       <replica id="2">
         <resource id="httpd-bundle-docker-2" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </replica>
     </bundle>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history/>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output filtered by bundled container - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output filtered by bundled container
 =#=#=#= Begin test: Basic text output with inactive resources, filtered by bundle connection =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 27 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
 
 Full List of Resources:
   * Container bundle set: httpd-bundle [pcmk:http]:
     * Replica[0]
       * httpd-bundle-0	(ocf::pacemaker:remote):	 Stopped
 =#=#=#= End test: Basic text output with inactive resources, filtered by bundle connection - OK (0) =#=#=#=
 * Passed: crm_mon        - Basic text output with inactive resources, filtered by bundle connection
 =#=#=#= Begin test: XML output filtered by bundle connection =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=httpd-bundle-0">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" with_quorum="true"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="27" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
       <replica id="0">
         <resource id="httpd-bundle-0" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </replica>
     </bundle>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history/>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output filtered by bundle connection - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output filtered by bundle connection
 =#=#=#= Begin test: Basic text output with inactive resources, filtered by bundled primitive resource =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 27 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
 
 Full List of Resources:
   * Container bundle set: httpd-bundle [pcmk:http]:
     * Replica[0]
       * httpd	(ocf::heartbeat:apache):	 Stopped
     * Replica[1]
       * httpd	(ocf::heartbeat:apache):	 Stopped
     * Replica[2]
       * httpd	(ocf::heartbeat:apache):	 Stopped
 =#=#=#= End test: Basic text output with inactive resources, filtered by bundled primitive resource - OK (0) =#=#=#=
 * Passed: crm_mon        - Basic text output with inactive resources, filtered by bundled primitive resource
 =#=#=#= Begin test: XML output filtered by bundled primitive resource =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=httpd">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" with_quorum="true"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="27" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
       <replica id="0">
         <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </replica>
       <replica id="1">
         <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </replica>
       <replica id="2">
         <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </replica>
     </bundle>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history/>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output filtered by bundled primitive resource - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output filtered by bundled primitive resource
 =#=#=#= Begin test: Complete text output, filtered by clone name in cloned group =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (2) (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 27 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 (1) cluster02 (2) ]
 
 Active Resources:
   * Clone Set: mysql-clone-group [mysql-group]:
     * Resource Group: mysql-group:0:
       * mysql-proxy	(lsb:mysql-proxy):	 Started cluster02
     * Resource Group: mysql-group:1:
       * mysql-proxy	(lsb:mysql-proxy):	 Started cluster01
     * Resource Group: mysql-group:2:
       * mysql-proxy	(lsb:mysql-proxy):	 Stopped
     * Resource Group: mysql-group:3:
       * mysql-proxy	(lsb:mysql-proxy):	 Stopped
     * Resource Group: mysql-group:4:
       * mysql-proxy	(lsb:mysql-proxy):	 Stopped
 
 Node Attributes:
   * Node: cluster01 (1):
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02 (2):
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02 (2):
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
   * Node: cluster01 (1):
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
 =#=#=#= End test: Complete text output, filtered by clone name in cloned group - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete text output, filtered by clone name in cloned group
 =#=#=#= Begin test: XML output, filtered by clone name in cloned group =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=mysql-clone-group">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" with_quorum="true"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="27" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false"/>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output, filtered by clone name in cloned group - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output, filtered by clone name in cloned group
 =#=#=#= Begin test: Complete text output, filtered by group name in cloned group =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (2) (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 27 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 (1) cluster02 (2) ]
 
 Active Resources:
   * Clone Set: mysql-clone-group [mysql-group]:
     * Resource Group: mysql-group:0:
       * mysql-proxy	(lsb:mysql-proxy):	 Started cluster02
     * Resource Group: mysql-group:1:
       * mysql-proxy	(lsb:mysql-proxy):	 Started cluster01
     * Resource Group: mysql-group:2:
       * mysql-proxy	(lsb:mysql-proxy):	 Stopped
     * Resource Group: mysql-group:3:
       * mysql-proxy	(lsb:mysql-proxy):	 Stopped
     * Resource Group: mysql-group:4:
       * mysql-proxy	(lsb:mysql-proxy):	 Stopped
 
 Node Attributes:
   * Node: cluster01 (1):
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02 (2):
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02 (2):
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
   * Node: cluster01 (1):
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
 =#=#=#= End test: Complete text output, filtered by group name in cloned group - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete text output, filtered by group name in cloned group
 =#=#=#= Begin test: XML output, filtered by group name in cloned group =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=mysql-group">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" with_quorum="true"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="27" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <group id="mysql-group:0" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:1" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:2" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:3" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:4" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
     </clone>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output, filtered by group name in cloned group - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output, filtered by group name in cloned group
 =#=#=#= Begin test: Complete text output, filtered by exact group instance name in cloned group =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (2) (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 27 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 (1) cluster02 (2) ]
 
 Active Resources:
   * Clone Set: mysql-clone-group [mysql-group]:
     * Resource Group: mysql-group:1:
       * mysql-proxy	(lsb:mysql-proxy):	 Started cluster01
 
 Node Attributes:
   * Node: cluster01 (1):
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02 (2):
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02 (2):
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
   * Node: cluster01 (1):
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
 =#=#=#= End test: Complete text output, filtered by exact group instance name in cloned group - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete text output, filtered by exact group instance name in cloned group
 =#=#=#= Begin test: XML output, filtered by exact group instance name in cloned group =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=mysql-group:1">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" with_quorum="true"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="27" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <group id="mysql-group:1" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </group>
     </clone>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output, filtered by exact group instance name in cloned group - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output, filtered by exact group instance name in cloned group
 =#=#=#= Begin test: Complete text output, filtered by primitive name in cloned group =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (2) (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 27 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 (1) cluster02 (2) ]
 
 Active Resources:
   * Clone Set: mysql-clone-group [mysql-group]:
     * Resource Group: mysql-group:0:
       * mysql-proxy	(lsb:mysql-proxy):	 Started cluster02
     * Resource Group: mysql-group:1:
       * mysql-proxy	(lsb:mysql-proxy):	 Started cluster01
     * Resource Group: mysql-group:2:
       * mysql-proxy	(lsb:mysql-proxy):	 Stopped
     * Resource Group: mysql-group:3:
       * mysql-proxy	(lsb:mysql-proxy):	 Stopped
     * Resource Group: mysql-group:4:
       * mysql-proxy	(lsb:mysql-proxy):	 Stopped
 
 Node Attributes:
   * Node: cluster01 (1):
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02 (2):
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02 (2):
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
   * Node: cluster01 (1):
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
 =#=#=#= End test: Complete text output, filtered by primitive name in cloned group - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete text output, filtered by primitive name in cloned group
 =#=#=#= Begin test: XML output, filtered by primitive name in cloned group =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=mysql-proxy">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" with_quorum="true"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="27" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <group id="mysql-group:0" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:1" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:2" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:3" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:4" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
     </clone>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output, filtered by primitive name in cloned group - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output, filtered by primitive name in cloned group
 =#=#=#= Begin test: Complete text output, filtered by exact primitive instance name in cloned group =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (2) (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 27 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 (1) cluster02 (2) ]
 
 Active Resources:
   * Clone Set: mysql-clone-group [mysql-group]:
     * Resource Group: mysql-group:1:
       * mysql-proxy	(lsb:mysql-proxy):	 Started cluster01
 
 Node Attributes:
   * Node: cluster01 (1):
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02 (2):
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02 (2):
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
   * Node: cluster01 (1):
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
 =#=#=#= End test: Complete text output, filtered by exact primitive instance name in cloned group - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete text output, filtered by exact primitive instance name in cloned group
 =#=#=#= Begin test: XML output, filtered by exact primitive instance name in cloned group =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=mysql-proxy:1">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" with_quorum="true"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="27" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <group id="mysql-group:1" number_resources="1" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </group>
     </clone>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output, filtered by exact primitive instance name in cloned group - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output, filtered by exact primitive instance name in cloned group
 =#=#=#= Begin test: Text output of partially active resources =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 4 nodes configured
   * 13 resource instances configured (1 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0@cluster02 httpd-bundle-1@cluster01 ]
 
 Active Resources:
   * Clone Set: ping-clone [ping]:
     * Started: [ cluster01 ]
   * Fencing	(stonith:fence_xvm):	 Started cluster01
   * Container bundle set: httpd-bundle [pcmk:http]:
     * httpd-bundle-0 (192.168.122.131)	(ocf::heartbeat:apache):	 Started cluster02
     * httpd-bundle-1 (192.168.122.132)	(ocf::heartbeat:apache):	 Stopped cluster01
   * Resource Group: partially-active-group:
     * dummy-1	(ocf::pacemaker:Dummy):	 Started cluster02
     * dummy-2	(ocf::pacemaker:Dummy):	 Stopped (disabled)
 =#=#=#= End test: Text output of partially active resources - OK (0) =#=#=#=
 * Passed: crm_mon        - Text output of partially active resources
 =#=#=#= Begin test: XML output of partially active resources =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon -1 --output-as=xml">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" with_quorum="true"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="4"/>
     <resources_configured number="13" disabled="1" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="5" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="4" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
   </nodes>
   <resources>
     <clone id="ping-clone" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
       <resource id="ping" resource_agent="ocf::pacemaker:ping" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
     <resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
       <node name="cluster01" id="1" cached="true"/>
     </resource>
     <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
       <replica id="0">
         <resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf::heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
         <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="httpd-bundle-0" id="httpd-bundle-0" cached="true"/>
         </resource>
         <resource id="httpd-bundle-docker-0" resource_agent="ocf::heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
         <resource id="httpd-bundle-0" resource_agent="ocf::pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </replica>
       <replica id="1">
         <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf::heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
         <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-docker-1" resource_agent="ocf::heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
         <resource id="httpd-bundle-1" resource_agent="ocf::pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </replica>
     </bundle>
     <group id="partially-active-group" number_resources="2" managed="true" disabled="false">
       <resource id="dummy-1" resource_agent="ocf::pacemaker:Dummy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="dummy-2" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </group>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="pingd" value="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="httpd-bundle-ip-192.168.122.131" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="dummy-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="Fencing" orphan="false" migration-threshold="1000000">
         <operation_history call="15" task="start" rc="0" rc_text="ok" exec-time="36ms" queue-time="0ms"/>
         <operation_history call="20" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-ip-192.168.122.132" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-0">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: XML output of partially active resources - OK (0) =#=#=#=
 * Passed: crm_mon        - XML output of partially active resources
 =#=#=#= Begin test: Text output of partially active resources, with inactive resources =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 4 nodes configured
   * 13 resource instances configured (1 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0@cluster02 httpd-bundle-1@cluster01 ]
 
 Full List of Resources:
   * Clone Set: ping-clone [ping]:
     * Started: [ cluster01 ]
     * Stopped: [ cluster02 ]
   * Fencing	(stonith:fence_xvm):	 Started cluster01
   * Container bundle set: httpd-bundle [pcmk:http]:
     * httpd-bundle-0 (192.168.122.131)	(ocf::heartbeat:apache):	 Started cluster02
     * httpd-bundle-1 (192.168.122.132)	(ocf::heartbeat:apache):	 Stopped cluster01
   * Resource Group: partially-active-group:
     * dummy-1	(ocf::pacemaker:Dummy):	 Started cluster02
     * dummy-2	(ocf::pacemaker:Dummy):	 Stopped (disabled)
 =#=#=#= End test: Text output of partially active resources, with inactive resources - OK (0) =#=#=#=
 * Passed: crm_mon        - Text output of partially active resources, with inactive resources
 =#=#=#= Begin test: Complete brief text output, with inactive resources =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 4 nodes configured
   * 13 resource instances configured (1 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0@cluster02 httpd-bundle-1@cluster01 ]
 
 Full List of Resources:
-  *  1/1	(stonith:fence_xvm):	Active cluster01
+  * 1/1	(stonith:fence_xvm):	Active cluster01
   * Clone Set: ping-clone [ping]:
     * Started: [ cluster01 ]
     * Stopped: [ cluster02 ]
   * Container bundle set: httpd-bundle [pcmk:http]:
     * httpd-bundle-0 (192.168.122.131)	(ocf::heartbeat:apache):	 Started cluster02
     * httpd-bundle-1 (192.168.122.132)	(ocf::heartbeat:apache):	 Stopped cluster01
   * Resource Group: partially-active-group:
-    *  1/2	(ocf::pacemaker:Dummy):	Active cluster02
+    * 1/2	(ocf::pacemaker:Dummy):	Active cluster02
 
 Node Attributes:
   * Node: cluster01:
     * pingd                           	: 1000      
   * Node: cluster02:
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02:
     * httpd-bundle-ip-192.168.122.131: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-docker-0: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-0: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="30000ms"
     * dummy-1: migration-threshold=1000000:
       * (2) start
   * Node: cluster01:
     * Fencing: migration-threshold=1000000:
       * (15) start
       * (20) monitor: interval="60000ms"
     * ping: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
     * httpd-bundle-ip-192.168.122.132: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-docker-1: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-1: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="30000ms"
   * Node: httpd-bundle-0@cluster02:
     * httpd: migration-threshold=1000000:
       * (1) start
 =#=#=#= End test: Complete brief text output, with inactive resources - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete brief text output, with inactive resources
 =#=#=#= Begin test: Complete brief text output grouped by node, with inactive resources =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 4 nodes configured
   * 13 resource instances configured (1 DISABLED)
 
 Node List:
   * Node cluster01: online:
     * Resources:
-      *  1	(ocf::heartbeat:IPaddr2):	Active 
-      *  1	(ocf::heartbeat:docker):	Active 
-      *  1	(ocf::pacemaker:ping):	Active 
-      *  1	(ocf::pacemaker:remote):	Active 
-      *  1	(stonith:fence_xvm):	Active 
+      * 1	(ocf::heartbeat:IPaddr2):	Active 
+      * 1	(ocf::heartbeat:docker):	Active 
+      * 1	(ocf::pacemaker:ping):	Active 
+      * 1	(ocf::pacemaker:remote):	Active 
+      * 1	(stonith:fence_xvm):	Active 
   * Node cluster02: online:
     * Resources:
-      *  1	(ocf::heartbeat:IPaddr2):	Active 
-      *  1	(ocf::heartbeat:docker):	Active 
-      *  1	(ocf::pacemaker:Dummy):	Active 
-      *  1	(ocf::pacemaker:remote):	Active 
+      * 1	(ocf::heartbeat:IPaddr2):	Active 
+      * 1	(ocf::heartbeat:docker):	Active 
+      * 1	(ocf::pacemaker:Dummy):	Active 
+      * 1	(ocf::pacemaker:remote):	Active 
   * GuestNode httpd-bundle-0@cluster02: online:
     * Resources:
-      *  1	(ocf::heartbeat:apache):	Active 
+      * 1	(ocf::heartbeat:apache):	Active 
 
 Inactive Resources:
   * Clone Set: ping-clone [ping]:
     * Started: [ cluster01 ]
     * Stopped: [ cluster02 ]
   * Container bundle set: httpd-bundle [pcmk:http]:
     * httpd-bundle-0 (192.168.122.131)	(ocf::heartbeat:apache):	 Started cluster02
     * httpd-bundle-1 (192.168.122.132)	(ocf::heartbeat:apache):	 Stopped cluster01
   * Resource Group: partially-active-group:
-    *  1/2	(ocf::pacemaker:Dummy):	Active cluster02
+    * 1/2	(ocf::pacemaker:Dummy):	Active cluster02
 
 Node Attributes:
   * Node: cluster01:
     * pingd                           	: 1000      
   * Node: cluster02:
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02:
     * httpd-bundle-ip-192.168.122.131: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-docker-0: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-0: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="30000ms"
     * dummy-1: migration-threshold=1000000:
       * (2) start
   * Node: cluster01:
     * Fencing: migration-threshold=1000000:
       * (15) start
       * (20) monitor: interval="60000ms"
     * ping: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
     * httpd-bundle-ip-192.168.122.132: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-docker-1: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-1: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="30000ms"
   * Node: httpd-bundle-0@cluster02:
     * httpd: migration-threshold=1000000:
       * (1) start
 =#=#=#= End test: Complete brief text output grouped by node, with inactive resources - OK (0) =#=#=#=
 * Passed: crm_mon        - Complete brief text output grouped by node, with inactive resources
 =#=#=#= Begin test: Text output of partially active resources, with inactive resources, filtered by node =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 4 nodes configured
   * 13 resource instances configured (1 DISABLED)
 
 Node List:
   * Online: [ cluster01 ]
 
 Full List of Resources:
   * Clone Set: ping-clone [ping]:
     * Started: [ cluster01 ]
   * Fencing	(stonith:fence_xvm):	 Started cluster01
   * Container bundle set: httpd-bundle [pcmk:http]:
     * httpd-bundle-1 (192.168.122.132)	(ocf::heartbeat:apache):	 Stopped cluster01
 =#=#=#= End test: Text output of partially active resources, with inactive resources, filtered by node - OK (0) =#=#=#=
 * Passed: crm_mon        - Text output of partially active resources, with inactive resources, filtered by node
 =#=#=#= Begin test: Text output of partially active resources, filtered by node =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon -1 --output-as=xml --node=cluster01">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" with_quorum="true"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="4"/>
     <resources_configured number="13" disabled="1" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="5" type="member"/>
   </nodes>
   <resources>
     <clone id="ping-clone" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
       <resource id="ping" resource_agent="ocf::pacemaker:ping" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
     <resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
       <node name="cluster01" id="1" cached="true"/>
     </resource>
     <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
       <replica id="1">
         <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf::heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
         <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-docker-1" resource_agent="ocf::heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
         <resource id="httpd-bundle-1" resource_agent="ocf::pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </replica>
     </bundle>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="pingd" value="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster01">
       <resource_history id="Fencing" orphan="false" migration-threshold="1000000">
         <operation_history call="15" task="start" rc="0" rc_text="ok" exec-time="36ms" queue-time="0ms"/>
         <operation_history call="20" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-ip-192.168.122.132" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Text output of partially active resources, filtered by node - OK (0) =#=#=#=
 * Passed: crm_mon        - Text output of partially active resources, filtered by node
 =#=#=#= Begin test: Text output of all resources with maintenance-mode enabled =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 27 resource instances configured (4 DISABLED)
 
               *** Resource management is DISABLED ***
   The cluster will not attempt to start, stop or recover services
 
 Node List:
   * Online: [ cluster01 cluster02 ]
 
 Full List of Resources:
   * Clone Set: ping-clone [ping] (unmanaged):
     * ping	(ocf::pacemaker:ping):	 Started cluster02 (unmanaged)
     * ping	(ocf::pacemaker:ping):	 Started cluster01 (unmanaged)
   * Fencing	(stonith:fence_xvm):	 Started cluster01 (unmanaged)
   * dummy	(ocf::pacemaker:Dummy):	 Started cluster02 (unmanaged)
   * Clone Set: inactive-clone [inactive-dhcpd] (unmanaged) (disabled):
     * Stopped (disabled): [ cluster01 cluster02 ]
   * Resource Group: inactive-group (unmanaged) (disabled):
     * inactive-dummy-1	(ocf::pacemaker:Dummy):	 Stopped (disabled, unmanaged)
     * inactive-dummy-2	(ocf::pacemaker:Dummy):	 Stopped (disabled, unmanaged)
   * Container bundle set: httpd-bundle [pcmk:http] (unmanaged):
     * httpd-bundle-0 (192.168.122.131)	(ocf::heartbeat:apache):	 Stopped (unmanaged)
     * httpd-bundle-1 (192.168.122.132)	(ocf::heartbeat:apache):	 Stopped (unmanaged)
     * httpd-bundle-2 (192.168.122.133)	(ocf::heartbeat:apache):	 Stopped (unmanaged)
   * Resource Group: exim-group (unmanaged):
     * Public-IP	(ocf::heartbeat:IPaddr):	 Started cluster02 (unmanaged)
     * Email	(lsb:exim):	 Started cluster02 (unmanaged)
   * Clone Set: mysql-clone-group [mysql-group] (unmanaged):
     * Resource Group: mysql-group:0 (unmanaged):
       * mysql-proxy	(lsb:mysql-proxy):	 Started cluster02 (unmanaged)
     * Resource Group: mysql-group:1 (unmanaged):
       * mysql-proxy	(lsb:mysql-proxy):	 Started cluster01 (unmanaged)
 =#=#=#= End test: Text output of all resources with maintenance-mode enabled - OK (0) =#=#=#=
 * Passed: crm_mon        - Text output of all resources with maintenance-mode enabled
diff --git a/include/crm/fencing/internal.h b/include/crm/fencing/internal.h
index 391ab72a68..dfb1d640a7 100644
--- a/include/crm/fencing/internal.h
+++ b/include/crm/fencing/internal.h
@@ -1,210 +1,201 @@
 /*
  * Copyright 2011-2020 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #ifndef STONITH_NG_INTERNAL__H
 #  define STONITH_NG_INTERNAL__H
 
 #  include <glib.h>
 #  include <crm/common/ipc.h>
 #  include <crm/common/xml.h>
 #  include <crm/common/output_internal.h>
 #  include <crm/stonith-ng.h>
 
 enum st_device_flags
 {
     st_device_supports_list   = 0x0001,
     st_device_supports_status = 0x0002,
     st_device_supports_reboot = 0x0004,
     st_device_supports_parameter_plug = 0x0008,
     st_device_supports_parameter_port = 0x0010,
 };
 
 #define stonith__set_device_flags(device_flags, device_id, flags_to_set) do { \
         device_flags = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE,      \
                                           "Fence device", device_id,          \
                                           (device_flags), (flags_to_set),     \
                                           #flags_to_set);                     \
     } while (0)
 
 #define stonith__set_call_options(st_call_opts, call_for, flags_to_set) do { \
         st_call_opts = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE,     \
                                           "Fencer call", (call_for),         \
                                           (st_call_opts), (flags_to_set),    \
                                           #flags_to_set);                    \
     } while (0)
 
 #define stonith__clear_call_options(st_call_opts, call_for, flags_to_clear) do { \
         st_call_opts = pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE,     \
                                             "Fencer call", (call_for),         \
                                             (st_call_opts), (flags_to_clear),  \
                                             #flags_to_clear);                  \
     } while (0)
 
 struct stonith_action_s;
 typedef struct stonith_action_s stonith_action_t;
 
 stonith_action_t *stonith_action_create(const char *agent,
                                         const char *_action,
                                         const char *victim,
                                         uint32_t victim_nodeid,
                                         int timeout,
                                         GHashTable * device_args,
                                         GHashTable * port_map,
                                         const char * host_arg);
 void stonith__destroy_action(stonith_action_t *action);
 void stonith__action_result(stonith_action_t *action, int *rc, char **output,
                             char **error_output);
 
 int
 stonith_action_execute_async(stonith_action_t * action,
                              void *userdata,
                              void (*done) (GPid pid, int rc, const char *output,
                                            gpointer user_data),
                              void (*fork_cb) (GPid pid, gpointer user_data));
 
 int stonith__execute(stonith_action_t *action);
 
 xmlNode *create_level_registration_xml(const char *node, const char *pattern,
                                        const char *attr, const char *value,
                                        int level,
                                        stonith_key_value_t *device_list);
 
 xmlNode *create_device_registration_xml(const char *id,
                                         enum stonith_namespace namespace,
                                         const char *agent,
                                         stonith_key_value_t *params,
                                         const char *rsc_provides);
 
 void stonith__register_messages(pcmk__output_t *out);
 
 GList *stonith__parse_targets(const char *hosts);
 
 gboolean stonith__later_succeeded(stonith_history_t *event, stonith_history_t *top_history);
 stonith_history_t *stonith__sort_history(stonith_history_t *history);
 
 void stonith__device_parameter_flags(uint32_t *device_flags,
                                      const char *device_name,
                                      xmlNode *metadata);
 
 #  define ST_LEVEL_MAX 10
 
 #  define F_STONITH_CLIENTID      "st_clientid"
 #  define F_STONITH_CALLOPTS      "st_callopt"
 #  define F_STONITH_CALLID        "st_callid"
 #  define F_STONITH_CALLDATA      "st_calldata"
 #  define F_STONITH_OPERATION     "st_op"
 #  define F_STONITH_TARGET        "st_target"
 #  define F_STONITH_REMOTE_OP_ID  "st_remote_op"
 #  define F_STONITH_REMOTE_OP_ID_RELAY  "st_remote_op_relay"
 #  define F_STONITH_RC            "st_rc"
 /*! Timeout period per a device execution */
 #  define F_STONITH_TIMEOUT       "st_timeout"
 #  define F_STONITH_TOLERANCE     "st_tolerance"
 #  define F_STONITH_DELAY         "st_delay"
 /*! Action specific timeout period returned in query of fencing devices. */
 #  define F_STONITH_ACTION_TIMEOUT       "st_action_timeout"
 /*! Host in query result is not allowed to run this action */
 #  define F_STONITH_ACTION_DISALLOWED     "st_action_disallowed"
 /*! Maximum of random fencing delay for a device */
 #  define F_STONITH_DELAY_MAX            "st_delay_max"
 /*! Base delay used for a fencing delay */
 #  define F_STONITH_DELAY_BASE           "st_delay_base"
 /*! Has this device been verified using a monitor type
  *  operation (monitor, list, status) */
 #  define F_STONITH_DEVICE_VERIFIED   "st_monitor_verified"
 /*! device is required for this action */
 #  define F_STONITH_DEVICE_REQUIRED   "st_required"
 /*! number of available devices in query result */
 #  define F_STONITH_AVAILABLE_DEVICES "st-available-devices"
 #  define F_STONITH_CALLBACK_TOKEN    "st_async_id"
 #  define F_STONITH_CLIENTNAME        "st_clientname"
 #  define F_STONITH_CLIENTNODE        "st_clientnode"
 #  define F_STONITH_NOTIFY_ACTIVATE   "st_notify_activate"
 #  define F_STONITH_NOTIFY_DEACTIVATE "st_notify_deactivate"
 #  define F_STONITH_DELEGATE      "st_delegate"
 /*! The node initiating the stonith operation.  If an operation
  * is relayed, this is the last node the operation lands on. When
  * in standalone mode, origin is the client's id that originated the
  * operation. */
 #  define F_STONITH_ORIGIN        "st_origin"
 #  define F_STONITH_HISTORY_LIST  "st_history"
 #  define F_STONITH_DATE          "st_date"
 #  define F_STONITH_STATE         "st_state"
 #  define F_STONITH_ACTIVE        "st_active"
 #  define F_STONITH_DIFFERENTIAL  "st_differential"
 
 #  define F_STONITH_DEVICE        "st_device_id"
 #  define F_STONITH_ACTION        "st_device_action"
 #  define F_STONITH_MODE          "st_mode"
 #  define F_STONITH_MERGED        "st_op_merged"
 
 #  define T_STONITH_NG        "stonith-ng"
 #  define T_STONITH_REPLY     "st-reply"
 /*! For async operations, an event from the server containing
  * the total amount of time the server is allowing for the operation
  * to take place is returned to the client. */
 #  define T_STONITH_TIMEOUT_VALUE "st-async-timeout-value"
 #  define T_STONITH_NOTIFY    "st_notify"
 
 #  define STONITH_ATTR_ACTION_OP   "action"
 
 #  define STONITH_OP_EXEC        "st_execute"
 #  define STONITH_OP_TIMEOUT_UPDATE        "st_timeout_update"
 #  define STONITH_OP_QUERY       "st_query"
 #  define STONITH_OP_FENCE       "st_fence"
 #  define STONITH_OP_RELAY       "st_relay"
 #  define STONITH_OP_DEVICE_ADD      "st_device_register"
 #  define STONITH_OP_DEVICE_DEL      "st_device_remove"
 #  define STONITH_OP_FENCE_HISTORY   "st_fence_history"
 #  define STONITH_OP_LEVEL_ADD       "st_level_add"
 #  define STONITH_OP_LEVEL_DEL       "st_level_remove"
 
 #  define STONITH_WATCHDOG_AGENT  "#watchdog"
 
 #  ifdef HAVE_STONITH_STONITH_H
 // utilities from st_lha.c
 int stonith__list_lha_agents(stonith_key_value_t **devices);
 int stonith__lha_metadata(const char *agent, int timeout, char **output);
 bool stonith__agent_is_lha(const char *agent);
 int stonith__lha_validate(stonith_t *st, int call_options, const char *target,
                           const char *agent, GHashTable *params,
                           int timeout, char **output, char **error_output);
 #  endif
 
 // utilities from st_rhcs.c
 int stonith__list_rhcs_agents(stonith_key_value_t **devices);
 int stonith__rhcs_metadata(const char *agent, int timeout, char **output);
 bool stonith__agent_is_rhcs(const char *agent);
 int stonith__rhcs_validate(stonith_t *st, int call_options, const char *target,
                            const char *agent, GHashTable *params, const char *host_arg,
                            int timeout, char **output, char **error_output);
 
+/* Exported for crm_mon to reference */
 int stonith__failed_history(pcmk__output_t *out, va_list args);
 int stonith__history(pcmk__output_t *out, va_list args);
 int stonith__full_history(pcmk__output_t *out, va_list args);
-int stonith__full_history_xml(pcmk__output_t *out, va_list args);
-int stonith__last_fenced_html(pcmk__output_t *out, va_list args);
-int stonith__last_fenced_text(pcmk__output_t *out, va_list args);
-int stonith__last_fenced_xml(pcmk__output_t *out, va_list args);
 int stonith__pending_actions(pcmk__output_t *out, va_list args);
-int stonith__event_html(pcmk__output_t *out, va_list args);
-int stonith__event_text(pcmk__output_t *out, va_list args);
-int stonith__event_xml(pcmk__output_t *out, va_list args);
-int stonith__validate_agent_html(pcmk__output_t *out, va_list args);
-int stonith__validate_agent_text(pcmk__output_t *out, va_list args);
-int stonith__validate_agent_xml(pcmk__output_t *out, va_list args);
 
 stonith_history_t *stonith__first_matching_event(stonith_history_t *history,
                                                  bool (*matching_fn)(stonith_history_t *, void *),
                                                  void *user_data);
 bool stonith__event_state_pending(stonith_history_t *history, void *user_data);
 bool stonith__event_state_eq(stonith_history_t *history, void *user_data);
 bool stonith__event_state_neq(stonith_history_t *history, void *user_data);
 
 #endif
diff --git a/include/crm/pengine/internal.h b/include/crm/pengine/internal.h
index c4b28cc216..89e17b86aa 100644
--- a/include/crm/pengine/internal.h
+++ b/include/crm/pengine/internal.h
@@ -1,623 +1,597 @@
 /*
  * Copyright 2004-2020 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #ifndef PE_INTERNAL__H
 #  define PE_INTERNAL__H
 #  include <string.h>
 #  include <crm/pengine/status.h>
 #  include <crm/pengine/remote_internal.h>
 #  include <crm/common/internal.h>
 #  include <crm/common/output_internal.h>
 
 #  define pe_rsc_info(rsc, fmt, args...)  crm_log_tag(LOG_INFO,  rsc ? rsc->id : "<NULL>", fmt, ##args)
 #  define pe_rsc_debug(rsc, fmt, args...) crm_log_tag(LOG_DEBUG, rsc ? rsc->id : "<NULL>", fmt, ##args)
 #  define pe_rsc_trace(rsc, fmt, args...) crm_log_tag(LOG_TRACE, rsc ? rsc->id : "<NULL>", fmt, ##args)
 
 #  define pe_err(fmt...) do {           \
         was_processing_error = TRUE;    \
         pcmk__config_err(fmt);          \
     } while (0)
 
 #  define pe_warn(fmt...) do {          \
         was_processing_warning = TRUE;  \
         pcmk__config_warn(fmt);         \
     } while (0)
 
 #  define pe_proc_err(fmt...) { was_processing_error = TRUE; crm_err(fmt); }
 #  define pe_proc_warn(fmt...) { was_processing_warning = TRUE; crm_warn(fmt); }
 
 #define pe__set_working_set_flags(working_set, flags_to_set) do {           \
         (working_set)->flags = pcmk__set_flags_as(__func__, __LINE__,       \
             LOG_TRACE, "Working set", crm_system_name,                      \
             (working_set)->flags, (flags_to_set), #flags_to_set);           \
     } while (0)
 
 #define pe__clear_working_set_flags(working_set, flags_to_clear) do {       \
         (working_set)->flags = pcmk__clear_flags_as(__func__, __LINE__,     \
             LOG_TRACE, "Working set", crm_system_name,                      \
             (working_set)->flags, (flags_to_clear), #flags_to_clear);       \
     } while (0)
 
 #define pe__set_resource_flags(resource, flags_to_set) do {                 \
         (resource)->flags = pcmk__set_flags_as(__func__, __LINE__,          \
             LOG_TRACE, "Resource", (resource)->id, (resource)->flags,       \
             (flags_to_set), #flags_to_set);                                 \
     } while (0)
 
 #define pe__clear_resource_flags(resource, flags_to_clear) do {             \
         (resource)->flags = pcmk__clear_flags_as(__func__, __LINE__,        \
             LOG_TRACE, "Resource", (resource)->id, (resource)->flags,       \
             (flags_to_clear), #flags_to_clear);                             \
     } while (0)
 
 #define pe__set_action_flags(action, flags_to_set) do {                     \
         (action)->flags = pcmk__set_flags_as(__func__, __LINE__,            \
                                              LOG_TRACE,                     \
                                              "Action", (action)->uuid,      \
                                              (action)->flags,               \
                                              (flags_to_set),                \
                                              #flags_to_set);                \
     } while (0)
 
 #define pe__clear_action_flags(action, flags_to_clear) do {                 \
         (action)->flags = pcmk__clear_flags_as(__func__, __LINE__,          \
                                                LOG_TRACE,                   \
                                                "Action", (action)->uuid,    \
                                                (action)->flags,             \
                                                (flags_to_clear),            \
                                                #flags_to_clear);            \
     } while (0)
 
 #define pe__set_raw_action_flags(action_flags, action_name, flags_to_set) do { \
         action_flags = pcmk__set_flags_as(__func__, __LINE__,               \
                                           LOG_TRACE, "Action", action_name, \
                                           (action_flags),                   \
                                           (flags_to_set), #flags_to_set);   \
     } while (0)
 
 #define pe__clear_raw_action_flags(action_flags, action_name, flags_to_clear) do { \
         action_flags = pcmk__clear_flags_as(__func__, __LINE__,             \
                                             LOG_TRACE,                      \
                                             "Action", action_name,          \
                                             (action_flags),                 \
                                             (flags_to_clear),               \
                                             #flags_to_clear);               \
     } while (0)
 
 #define pe__set_action_flags_as(function, line, action, flags_to_set) do {  \
         (action)->flags = pcmk__set_flags_as((function), (line),            \
                                              LOG_TRACE,                     \
                                              "Action", (action)->uuid,      \
                                              (action)->flags,               \
                                              (flags_to_set),                \
                                              #flags_to_set);                \
     } while (0)
 
 #define pe__clear_action_flags_as(function, line, action, flags_to_clear) do { \
         (action)->flags = pcmk__clear_flags_as((function), (line),          \
                                                LOG_TRACE,                   \
                                                "Action", (action)->uuid,    \
                                                (action)->flags,             \
                                                (flags_to_clear),            \
                                                #flags_to_clear);            \
     } while (0)
 
 #define pe__set_order_flags(order_flags, flags_to_set) do {                 \
         order_flags = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE,     \
                                          "Ordering", "constraint",          \
                                          order_flags, (flags_to_set),       \
                                          #flags_to_set);                    \
     } while (0)
 
 #define pe__clear_order_flags(order_flags, flags_to_clear) do {               \
         order_flags = pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE,     \
                                            "Ordering", "constraint",          \
                                            order_flags, (flags_to_clear),     \
                                            #flags_to_clear);                  \
     } while (0)
 
 #define pe__set_graph_flags(graph_flags, gr_action, flags_to_set) do {      \
         graph_flags = pcmk__set_flags_as(__func__, __LINE__,                \
                                          LOG_TRACE, "Graph",                \
                                          (gr_action)->uuid, graph_flags,    \
                                          (flags_to_set), #flags_to_set);    \
     } while (0)
 
 #define pe__clear_graph_flags(graph_flags, gr_action, flags_to_clear) do {     \
         graph_flags = pcmk__clear_flags_as(__func__, __LINE__,                 \
                                            LOG_TRACE, "Graph",                 \
                                            (gr_action)->uuid, graph_flags,     \
                                            (flags_to_clear), #flags_to_clear); \
     } while (0)
 
 // Some warnings we don't want to print every transition
 
 enum pe_warn_once_e {
     pe_wo_blind         = 0x0001,
     pe_wo_restart_type  = 0x0002,
     pe_wo_role_after    = 0x0004,
     pe_wo_poweroff      = 0x0008,
     pe_wo_require_all   = 0x0010,
     pe_wo_order_score   = 0x0020,
     pe_wo_neg_threshold = 0x0040,
 };
 
 extern uint32_t pe_wo;
 
 #define pe_warn_once(pe_wo_bit, fmt...) do {    \
         if (!pcmk_is_set(pe_wo, pe_wo_bit)) {  \
             if (pe_wo_bit == pe_wo_blind) {     \
                 crm_warn(fmt);                  \
             } else {                            \
                 pe_warn(fmt);                   \
             }                                   \
             pe_wo = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE,       \
                                       "Warn-once", "logging", pe_wo,        \
                                       (pe_wo_bit), #pe_wo_bit);             \
         }                                       \
     } while (0);
 
 
 typedef struct pe__location_constraint_s {
     char *id;                           // Constraint XML ID
     pe_resource_t *rsc_lh;              // Resource being located
     enum rsc_role_e role_filter;        // Role to locate
     enum pe_discover_e discover_mode;   // Resource discovery
     GListPtr node_list_rh;              // List of pe_node_t*
 } pe__location_t;
 
 typedef struct pe__order_constraint_s {
     int id;
     enum pe_ordering type;
 
     void *lh_opaque;
     pe_resource_t *lh_rsc;
     pe_action_t *lh_action;
     char *lh_action_task;
 
     void *rh_opaque;
     pe_resource_t *rh_rsc;
     pe_action_t *rh_action;
     char *rh_action_task;
 } pe__ordering_t;
 
 typedef struct notify_data_s {
     GSList *keys;               // Environment variable name/value pairs
 
     const char *action;
 
     pe_action_t *pre;
     pe_action_t *post;
     pe_action_t *pre_done;
     pe_action_t *post_done;
 
     GListPtr active;            /* notify_entry_t*  */
     GListPtr inactive;          /* notify_entry_t*  */
     GListPtr start;             /* notify_entry_t*  */
     GListPtr stop;              /* notify_entry_t*  */
     GListPtr demote;            /* notify_entry_t*  */
     GListPtr promote;           /* notify_entry_t*  */
     GListPtr master;            /* notify_entry_t*  */
     GListPtr slave;             /* notify_entry_t*  */
     GHashTable *allowed_nodes;
 
 } notify_data_t;
 
 bool pe_can_fence(pe_working_set_t *data_set, pe_node_t *node);
 
 int pe__add_scores(int score1, int score2);
 void add_hash_param(GHashTable * hash, const char *name, const char *value);
 
 char *native_parameter(pe_resource_t * rsc, pe_node_t * node, gboolean create, const char *name,
                        pe_working_set_t * data_set);
 pe_node_t *native_location(const pe_resource_t *rsc, GList **list, int current);
 
 void pe_metadata(void);
 void verify_pe_options(GHashTable * options);
 
 void common_update_score(pe_resource_t * rsc, const char *id, int score);
 void native_add_running(pe_resource_t * rsc, pe_node_t * node, pe_working_set_t * data_set);
 
 gboolean native_unpack(pe_resource_t * rsc, pe_working_set_t * data_set);
 gboolean group_unpack(pe_resource_t * rsc, pe_working_set_t * data_set);
 gboolean clone_unpack(pe_resource_t * rsc, pe_working_set_t * data_set);
 gboolean pe__unpack_bundle(pe_resource_t *rsc, pe_working_set_t *data_set);
 
 pe_resource_t *native_find_rsc(pe_resource_t *rsc, const char *id, const pe_node_t *node,
                                int flags);
 
 gboolean native_active(pe_resource_t * rsc, gboolean all);
 gboolean group_active(pe_resource_t * rsc, gboolean all);
 gboolean clone_active(pe_resource_t * rsc, gboolean all);
 gboolean pe__bundle_active(pe_resource_t *rsc, gboolean all);
 
 void native_print(pe_resource_t * rsc, const char *pre_text, long options, void *print_data);
 void group_print(pe_resource_t * rsc, const char *pre_text, long options, void *print_data);
 void clone_print(pe_resource_t * rsc, const char *pre_text, long options, void *print_data);
 void pe__print_bundle(pe_resource_t *rsc, const char *pre_text, long options,
                       void *print_data);
 
 gchar * pcmk__native_output_string(pe_resource_t *rsc, const char *name, pe_node_t *node,
                                    long options, const char *target_role, bool show_nodes);
 
 int pe__name_and_nvpairs_xml(pcmk__output_t *out, bool is_list, const char *tag_name
                          , size_t pairs_count, ...);
 char *pe__node_display_name(pe_node_t *node, bool print_detail);
 
 static inline const char *
 pe__rsc_bool_str(pe_resource_t *rsc, uint64_t rsc_flag)
 {
     return pcmk__btoa(pcmk_is_set(rsc->flags, rsc_flag));
 }
 
-int pe__ban_html(pcmk__output_t *out, va_list args);
-int pe__ban_text(pcmk__output_t *out, va_list args);
-int pe__ban_xml(pcmk__output_t *out, va_list args);
 int pe__clone_xml(pcmk__output_t *out, va_list args);
 int pe__clone_html(pcmk__output_t *out, va_list args);
 int pe__clone_text(pcmk__output_t *out, va_list args);
-int pe__cluster_counts_html(pcmk__output_t *out, va_list args);
-int pe__cluster_counts_text(pcmk__output_t *out, va_list args);
-int pe__cluster_counts_xml(pcmk__output_t *out, va_list args);
-int pe__cluster_dc_html(pcmk__output_t *out, va_list args);
-int pe__cluster_dc_text(pcmk__output_t *out, va_list args);
-int pe__cluster_dc_xml(pcmk__output_t *out, va_list args);
-int pe__cluster_maint_mode_html(pcmk__output_t *out, va_list args);
-int pe__cluster_maint_mode_text(pcmk__output_t *out, va_list args);
-int pe__cluster_options_html(pcmk__output_t *out, va_list args);
-int pe__cluster_options_log(pcmk__output_t *out, va_list args);
-int pe__cluster_options_text(pcmk__output_t *out, va_list args);
-int pe__cluster_options_xml(pcmk__output_t *out, va_list args);
-int pe__cluster_stack_html(pcmk__output_t *out, va_list args);
-int pe__cluster_stack_text(pcmk__output_t *out, va_list args);
-int pe__cluster_stack_xml(pcmk__output_t *out, va_list args);
-int pe__cluster_summary(pcmk__output_t *out, va_list args);
-int pe__cluster_summary_html(pcmk__output_t *out, va_list args);
-int pe__cluster_times_html(pcmk__output_t *out, va_list args);
-int pe__cluster_times_xml(pcmk__output_t *out, va_list args);
-int pe__cluster_times_text(pcmk__output_t *out, va_list args);
-int pe__failed_action_text(pcmk__output_t *out, va_list args);
-int pe__failed_action_xml(pcmk__output_t *out, va_list args);
 int pe__group_xml(pcmk__output_t *out, va_list args);
 int pe__group_html(pcmk__output_t *out, va_list args);
 int pe__group_text(pcmk__output_t *out, va_list args);
 int pe__bundle_xml(pcmk__output_t *out, va_list args);
 int pe__bundle_html(pcmk__output_t *out, va_list args);
 int pe__bundle_text(pcmk__output_t *out, va_list args);
 int pe__node_html(pcmk__output_t *out, va_list args);
 int pe__node_text(pcmk__output_t *out, va_list args);
 int pe__node_xml(pcmk__output_t *out, va_list args);
-int pe__node_and_op(pcmk__output_t *out, va_list args);
-int pe__node_and_op_xml(pcmk__output_t *out, va_list args);
-int pe__node_attribute_html(pcmk__output_t *out, va_list args);
+int pe__resource_xml(pcmk__output_t *out, va_list args);
+int pe__resource_html(pcmk__output_t *out, va_list args);
+int pe__resource_text(pcmk__output_t *out, va_list args);
+
+/* Exported for crm_mon to reference */
+int pe__ban_text(pcmk__output_t *out, va_list args);
+int pe__cluster_counts_text(pcmk__output_t *out, va_list args);
+int pe__cluster_dc_text(pcmk__output_t *out, va_list args);
+int pe__cluster_maint_mode_text(pcmk__output_t *out, va_list args);
+int pe__cluster_options_text(pcmk__output_t *out, va_list args);
+int pe__cluster_stack_text(pcmk__output_t *out, va_list args);
+int pe__cluster_summary(pcmk__output_t *out, va_list args);
+int pe__cluster_times_text(pcmk__output_t *out, va_list args);
+int pe__failed_action_text(pcmk__output_t *out, va_list args);
 int pe__node_attribute_text(pcmk__output_t *out, va_list args);
-int pe__node_attribute_xml(pcmk__output_t *out, va_list args);
-int pe__node_list_html(pcmk__output_t *out, va_list args);
 int pe__node_list_text(pcmk__output_t *out, va_list args);
-int pe__node_list_xml(pcmk__output_t *out, va_list args);
 int pe__op_history_text(pcmk__output_t *out, va_list args);
-int pe__op_history_xml(pcmk__output_t *out, va_list args);
-int pe__resource_config(pcmk__output_t *out, va_list args);
 int pe__resource_history_text(pcmk__output_t *out, va_list args);
-int pe__resource_history_xml(pcmk__output_t *out, va_list args);
-int pe__resource_xml(pcmk__output_t *out, va_list args);
-int pe__resource_html(pcmk__output_t *out, va_list args);
-int pe__resource_text(pcmk__output_t *out, va_list args);
-int pe__resource_list(pcmk__output_t *out, va_list args);
-int pe__ticket_html(pcmk__output_t *out, va_list args);
 int pe__ticket_text(pcmk__output_t *out, va_list args);
-int pe__ticket_xml(pcmk__output_t *out, va_list args);
 
 void native_free(pe_resource_t * rsc);
 void group_free(pe_resource_t * rsc);
 void clone_free(pe_resource_t * rsc);
 void pe__free_bundle(pe_resource_t *rsc);
 
 enum rsc_role_e native_resource_state(const pe_resource_t * rsc, gboolean current);
 enum rsc_role_e group_resource_state(const pe_resource_t * rsc, gboolean current);
 enum rsc_role_e clone_resource_state(const pe_resource_t * rsc, gboolean current);
 enum rsc_role_e pe__bundle_resource_state(const pe_resource_t *rsc,
                                           gboolean current);
 
 void pe__count_common(pe_resource_t *rsc);
 void pe__count_bundle(pe_resource_t *rsc);
 
 gboolean common_unpack(xmlNode * xml_obj, pe_resource_t ** rsc, pe_resource_t * parent,
                        pe_working_set_t * data_set);
 void common_free(pe_resource_t * rsc);
 
 pe_node_t *pe__copy_node(const pe_node_t *this_node);
 extern time_t get_effective_time(pe_working_set_t * data_set);
 
 /* Failure handling utilities (from failcounts.c) */
 
 // bit flags for fail count handling options
 enum pe_fc_flags_e {
     pe_fc_default   = 0x00,
     pe_fc_effective = 0x01, // don't count expired failures
     pe_fc_fillers   = 0x02, // if container, include filler failures in count
 };
 
 int pe_get_failcount(pe_node_t *node, pe_resource_t *rsc, time_t *last_failure,
                      uint32_t flags, xmlNode *xml_op,
                      pe_working_set_t *data_set);
 
 pe_action_t *pe__clear_failcount(pe_resource_t *rsc, pe_node_t *node,
                                  const char *reason,
                                  pe_working_set_t *data_set);
 
 /* Functions for finding/counting a resource's active nodes */
 
 pe_node_t *pe__find_active_on(const pe_resource_t *rsc,
                               unsigned int *count_all,
                               unsigned int *count_clean);
 pe_node_t *pe__find_active_requires(const pe_resource_t *rsc,
                                     unsigned int *count);
 
 static inline pe_node_t *
 pe__current_node(const pe_resource_t *rsc)
 {
     return pe__find_active_on(rsc, NULL, NULL);
 }
 
 
 /* Binary like operators for lists of nodes */
 extern void node_list_exclude(GHashTable * list, GListPtr list2, gboolean merge_scores);
 
 GHashTable *pe__node_list2table(GList *list);
 
 static inline gpointer
 pe_hash_table_lookup(GHashTable * hash, gconstpointer key)
 {
     if (hash) {
         return g_hash_table_lookup(hash, key);
     }
     return NULL;
 }
 
 extern pe_action_t *get_pseudo_op(const char *name, pe_working_set_t * data_set);
 extern gboolean order_actions(pe_action_t * lh_action, pe_action_t * rh_action, enum pe_ordering order);
 
 /* Printing functions for debug */
 extern void print_node(const char *pre_text, pe_node_t * node, gboolean details);
 extern void print_str_str(gpointer key, gpointer value, gpointer user_data);
 extern void pe__output_node(pe_node_t * node, gboolean details, pcmk__output_t *out);
 
 extern void dump_node_capacity(int level, const char *comment, pe_node_t * node);
 extern void dump_rsc_utilization(int level, const char *comment, pe_resource_t * rsc, pe_node_t * node);
 
 void pe__show_node_weights_as(const char *file, const char *function,
                               int line, bool to_log, pe_resource_t *rsc,
                               const char *comment, GHashTable *nodes);
 
 #define pe__show_node_weights(level, rsc, text, nodes)              \
         pe__show_node_weights_as(__FILE__, __func__, __LINE__,      \
                                  (level), (rsc), (text), (nodes))
 
 /* Sorting functions */
 extern gint sort_rsc_priority(gconstpointer a, gconstpointer b);
 extern gint sort_rsc_index(gconstpointer a, gconstpointer b);
 
 extern xmlNode *find_rsc_op_entry(pe_resource_t * rsc, const char *key);
 
 extern pe_action_t *custom_action(pe_resource_t * rsc, char *key, const char *task, pe_node_t * on_node,
                                   gboolean optional, gboolean foo, pe_working_set_t * data_set);
 
 #  define delete_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_DELETE, 0)
 #  define delete_action(rsc, node, optional) custom_action(		\
 		rsc, delete_key(rsc), CRMD_ACTION_DELETE, node,		\
 		optional, TRUE, data_set);
 
 #  define stopped_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_STOPPED, 0)
 #  define stopped_action(rsc, node, optional) custom_action(		\
 		rsc, stopped_key(rsc), CRMD_ACTION_STOPPED, node,	\
 		optional, TRUE, data_set);
 
 #  define stop_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_STOP, 0)
 #  define stop_action(rsc, node, optional) custom_action(			\
 		rsc, stop_key(rsc), CRMD_ACTION_STOP, node,		\
 		optional, TRUE, data_set);
 
 #  define reload_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_RELOAD, 0)
 #  define start_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_START, 0)
 #  define start_action(rsc, node, optional) custom_action(		\
 		rsc, start_key(rsc), CRMD_ACTION_START, node,		\
 		optional, TRUE, data_set)
 
 #  define started_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_STARTED, 0)
 #  define started_action(rsc, node, optional) custom_action(		\
 		rsc, started_key(rsc), CRMD_ACTION_STARTED, node,	\
 		optional, TRUE, data_set)
 
 #  define promote_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_PROMOTE, 0)
 #  define promote_action(rsc, node, optional) custom_action(		\
 		rsc, promote_key(rsc), CRMD_ACTION_PROMOTE, node,	\
 		optional, TRUE, data_set)
 
 #  define promoted_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_PROMOTED, 0)
 #  define promoted_action(rsc, node, optional) custom_action(		\
 		rsc, promoted_key(rsc), CRMD_ACTION_PROMOTED, node,	\
 		optional, TRUE, data_set)
 
 #  define demote_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_DEMOTE, 0)
 #  define demote_action(rsc, node, optional) custom_action(		\
 		rsc, demote_key(rsc), CRMD_ACTION_DEMOTE, node,		\
 		optional, TRUE, data_set)
 
 #  define demoted_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_DEMOTED, 0)
 #  define demoted_action(rsc, node, optional) custom_action(		\
 		rsc, demoted_key(rsc), CRMD_ACTION_DEMOTED, node,	\
 		optional, TRUE, data_set)
 
 extern int pe_get_configured_timeout(pe_resource_t *rsc, const char *action,
                                      pe_working_set_t *data_set);
 
 extern pe_action_t *find_first_action(GListPtr input, const char *uuid, const char *task,
                                       pe_node_t * on_node);
 extern enum action_tasks get_complex_task(pe_resource_t * rsc, const char *name,
                                           gboolean allow_non_atomic);
 
 extern GListPtr find_actions(GListPtr input, const char *key, const pe_node_t *on_node);
 GList *find_actions_exact(GList *input, const char *key,
                           const pe_node_t *on_node);
 extern GListPtr find_recurring_actions(GListPtr input, pe_node_t * not_on_node);
 GList *pe__resource_actions(const pe_resource_t *rsc, const pe_node_t *node,
                             const char *task, bool require_node);
 
 extern void pe_free_action(pe_action_t * action);
 
 extern void resource_location(pe_resource_t * rsc, pe_node_t * node, int score, const char *tag,
                               pe_working_set_t * data_set);
 
 extern gint sort_op_by_callid(gconstpointer a, gconstpointer b);
 extern gboolean get_target_role(pe_resource_t * rsc, enum rsc_role_e *role);
 
 extern pe_resource_t *find_clone_instance(pe_resource_t * rsc, const char *sub_id,
                                           pe_working_set_t * data_set);
 
 extern void destroy_ticket(gpointer data);
 extern pe_ticket_t *ticket_new(const char *ticket_id, pe_working_set_t * data_set);
 
 // Resources for manipulating resource names
 const char *pe_base_name_end(const char *id);
 char *clone_strip(const char *last_rsc_id);
 char *clone_zero(const char *last_rsc_id);
 
 static inline bool
 pe_base_name_eq(pe_resource_t *rsc, const char *id)
 {
     if (id && rsc && rsc->id) {
         // Number of characters in rsc->id before any clone suffix
         size_t base_len = pe_base_name_end(rsc->id) - rsc->id + 1;
 
         return (strlen(id) == base_len) && !strncmp(id, rsc->id, base_len);
     }
     return FALSE;
 }
 
 int pe__target_rc_from_xml(xmlNode *xml_op);
 
 gint sort_node_uname(gconstpointer a, gconstpointer b);
 bool is_set_recursive(pe_resource_t * rsc, long long flag, bool any);
 
 enum rsc_digest_cmp_val {
     /*! Digests are the same */
     RSC_DIGEST_MATCH = 0,
     /*! Params that require a restart changed */
     RSC_DIGEST_RESTART,
     /*! Some parameter changed.  */
     RSC_DIGEST_ALL,
     /*! rsc op didn't have a digest associated with it, so
      *  it is unknown if parameters changed or not. */
     RSC_DIGEST_UNKNOWN,
 };
 
 typedef struct op_digest_cache_s {
     enum rsc_digest_cmp_val rc;
     xmlNode *params_all;
     xmlNode *params_secure;
     xmlNode *params_restart;
     char *digest_all_calc;
     char *digest_secure_calc;
     char *digest_restart_calc;
 } op_digest_cache_t;
 
 op_digest_cache_t *pe__calculate_digests(pe_resource_t *rsc, const char *task,
                                          const char *key, pe_node_t *node,
                                          xmlNode *xml_op, GHashTable *overrides,
                                          bool calc_secure,
                                          pe_working_set_t *data_set);
 
 void pe__free_digests(gpointer ptr);
 
 op_digest_cache_t *rsc_action_digest_cmp(pe_resource_t * rsc, xmlNode * xml_op, pe_node_t * node,
                                          pe_working_set_t * data_set);
 
 pe_action_t *pe_fence_op(pe_node_t * node, const char *op, bool optional, const char *reason, bool priority_delay, pe_working_set_t * data_set);
 void trigger_unfencing(
     pe_resource_t * rsc, pe_node_t *node, const char *reason, pe_action_t *dependency, pe_working_set_t * data_set);
 
 void pe_action_set_reason(pe_action_t *action, const char *reason, bool overwrite);
 void pe_action_set_flag_reason(const char *function, long line, pe_action_t *action, pe_action_t *reason, const char *text, enum pe_action_flags flags, bool overwrite);
 
 #define pe_action_required(action, reason, text)    \
     pe_action_set_flag_reason(__func__, __LINE__, action, reason, text, \
                               pe_action_optional, FALSE)
 #define pe_action_implies(action, reason, flag)     \
     pe_action_set_flag_reason(__func__, __LINE__, action, reason, NULL, \
                               flag, FALSE)
 
 void pe__set_resource_flags_recursive(pe_resource_t *rsc, uint64_t flags);
 void pe__clear_resource_flags_recursive(pe_resource_t *rsc, uint64_t flags);
 
 gboolean add_tag_ref(GHashTable * tags, const char * tag_name,  const char * obj_ref);
 
 void print_rscs_brief(GListPtr rsc_list, const char * pre_text, long options,
                       void * print_data, gboolean print_all);
 int pe__rscs_brief_output(pcmk__output_t *out, GListPtr rsc_list, long options, gboolean print_all);
 void pe_fence_node(pe_working_set_t * data_set, pe_node_t * node, const char *reason, bool priority_delay);
 
 pe_node_t *pe_create_node(const char *id, const char *uname, const char *type,
                           const char *score, pe_working_set_t * data_set);
 void common_print(pe_resource_t * rsc, const char *pre_text, const char *name, pe_node_t *node, long options, void *print_data);
 int pe__common_output_text(pcmk__output_t *out, pe_resource_t * rsc, const char *name, pe_node_t *node, long options);
 int pe__common_output_html(pcmk__output_t *out, pe_resource_t * rsc, const char *name, pe_node_t *node, long options);
 pe_resource_t *pe__find_bundle_replica(const pe_resource_t *bundle,
                                        const pe_node_t *node);
 bool pe__bundle_needs_remote_name(pe_resource_t *rsc);
 const char *pe__add_bundle_remote_name(pe_resource_t *rsc, xmlNode *xml,
                                        const char *field);
 const char *pe_node_attribute_calculated(const pe_node_t *node,
                                          const char *name,
                                          const pe_resource_t *rsc);
 const char *pe_node_attribute_raw(pe_node_t *node, const char *name);
 bool pe__is_universal_clone(pe_resource_t *rsc,
                             pe_working_set_t *data_set);
 void pe__add_param_check(xmlNode *rsc_op, pe_resource_t *rsc, pe_node_t *node,
                          enum pe_check_parameters, pe_working_set_t *data_set);
 void pe__foreach_param_check(pe_working_set_t *data_set,
                              void (*cb)(pe_resource_t*, pe_node_t*, xmlNode*,
                                         enum pe_check_parameters,
                                         pe_working_set_t*));
 void pe__free_param_checks(pe_working_set_t *data_set);
 
 bool pe__shutdown_requested(pe_node_t *node);
 void pe__update_recheck_time(time_t recheck, pe_working_set_t *data_set);
 
 /*!
  * \internal
  * \brief Register xml formatting message functions.
  */
 void pe__register_messages(pcmk__output_t *out);
 
 void pe__unpack_dataset_nvpairs(xmlNode *xml_obj, const char *set_name,
                                 pe_rule_eval_data_t *rule_data, GHashTable *hash,
                                 const char *always_first, gboolean overwrite,
                                 pe_working_set_t *data_set);
 
 bool pe__resource_is_disabled(pe_resource_t *rsc);
 pe_action_t *pe__clear_resource_history(pe_resource_t *rsc, pe_node_t *node,
                                         pe_working_set_t *data_set);
 
 GListPtr pe__rscs_with_tag(pe_working_set_t *data_set, const char *tag_name);
 GListPtr pe__unames_with_tag(pe_working_set_t *data_set, const char *tag_name);
 bool pe__rsc_has_tag(pe_working_set_t *data_set, const char *rsc, const char *tag);
 bool pe__uname_has_tag(pe_working_set_t *data_set, const char *node, const char *tag);
 
 bool pe__rsc_running_on_any_node_in_list(pe_resource_t *rsc, GListPtr node_list);
 GListPtr pe__filter_rsc_list(GListPtr rscs, GListPtr filter);
 
 bool pcmk__rsc_filtered_by_node(pe_resource_t *rsc, GListPtr only_node);
 
 gboolean pe__bundle_is_filtered(pe_resource_t *rsc, GListPtr only_rsc, gboolean check_parent);
 gboolean pe__clone_is_filtered(pe_resource_t *rsc, GListPtr only_rsc, gboolean check_parent);
 gboolean pe__group_is_filtered(pe_resource_t *rsc, GListPtr only_rsc, gboolean check_parent);
 gboolean pe__native_is_filtered(pe_resource_t *rsc, GListPtr only_rsc, gboolean check_parent);
 
 #endif
diff --git a/lib/fencing/st_output.c b/lib/fencing/st_output.c
index 04f4b831c9..f48fd550a5 100644
--- a/lib/fencing/st_output.c
+++ b/lib/fencing/st_output.c
@@ -1,467 +1,460 @@
 /*
  * Copyright 2019-2020 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 #include <stdarg.h>
 
 #include <crm/stonith-ng.h>
 #include <crm/common/iso8601.h>
 #include <crm/common/util.h>
 #include <crm/common/xml.h>
 #include <crm/common/output_internal.h>
 #include <crm/common/xml_internal.h>
 #include <crm/fencing/internal.h>
 #include <crm/pengine/internal.h>
 
 static char *
 time_t_string(time_t when) {
     crm_time_t *crm_when = crm_time_new(NULL);
     char *buf = NULL;
 
     crm_time_set_timet(crm_when, &when);
     buf = crm_time_as_string(crm_when, crm_time_log_date | crm_time_log_timeofday | crm_time_log_with_timezone);
     crm_time_free(crm_when);
     return buf;
 }
 
-PCMK__OUTPUT_ARGS("failed-fencing-history", "stonith_history_t *", "GListPtr", "gboolean", "gboolean")
+PCMK__OUTPUT_ARGS("failed-fencing-list", "stonith_history_t *", "GList *", "gboolean", "gboolean")
 int
 stonith__failed_history(pcmk__output_t *out, va_list args) {
     stonith_history_t *history = va_arg(args, stonith_history_t *);
-    GListPtr only_node = va_arg(args, GListPtr);
+    GList *only_node = va_arg(args, GList *);
     gboolean full_history = va_arg(args, gboolean);
     gboolean print_spacer = va_arg(args, gboolean);
 
     int rc = pcmk_rc_no_output;
 
     for (stonith_history_t *hp = history; hp; hp = hp->next) {
         if (hp->state != st_failed) {
             continue;
         }
 
         if (!pcmk__str_in_list(only_node, hp->target)) {
             continue;
         }
 
         PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc, "Failed Fencing Actions");
         out->message(out, "stonith-event", hp, full_history, stonith__later_succeeded(hp, history));
         out->increment_list(out);
     }
 
     PCMK__OUTPUT_LIST_FOOTER(out, rc);
     return rc;
 }
 
-PCMK__OUTPUT_ARGS("fencing-history", "stonith_history_t *", "GListPtr", "gboolean", "gboolean")
+PCMK__OUTPUT_ARGS("fencing-list", "stonith_history_t *", "GList *", "gboolean", "gboolean")
 int
 stonith__history(pcmk__output_t *out, va_list args) {
     stonith_history_t *history = va_arg(args, stonith_history_t *);
-    GListPtr only_node = va_arg(args, GListPtr);
+    GList *only_node = va_arg(args, GList *);
     gboolean full_history = va_arg(args, gboolean);
     gboolean print_spacer = va_arg(args, gboolean);
 
     int rc = pcmk_rc_no_output;
 
     for (stonith_history_t *hp = history; hp; hp = hp->next) {
         if (!pcmk__str_in_list(only_node, hp->target)) {
             continue;
         }
 
         if (hp->state != st_failed) {
             PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc, "Fencing History");
             out->message(out, "stonith-event", hp, full_history, stonith__later_succeeded(hp, history));
             out->increment_list(out);
         }
     }
 
     PCMK__OUTPUT_LIST_FOOTER(out, rc);
     return rc;
 }
 
-PCMK__OUTPUT_ARGS("full-fencing-history", "crm_exit_t", "stonith_history_t *", "GListPtr", "gboolean", "gboolean")
+PCMK__OUTPUT_ARGS("full-fencing-list", "crm_exit_t", "stonith_history_t *", "GList *", "gboolean", "gboolean")
 int
 stonith__full_history(pcmk__output_t *out, va_list args) {
     crm_exit_t history_rc G_GNUC_UNUSED = va_arg(args, crm_exit_t);
     stonith_history_t *history = va_arg(args, stonith_history_t *);
-    GListPtr only_node = va_arg(args, GListPtr);
+    GList *only_node = va_arg(args, GList *);
     gboolean full_history = va_arg(args, gboolean);
     gboolean print_spacer = va_arg(args, gboolean);
 
     int rc = pcmk_rc_no_output;
 
     for (stonith_history_t *hp = history; hp; hp = hp->next) {
         if (!pcmk__str_in_list(only_node, hp->target)) {
             continue;
         }
 
         PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc, "Fencing History");
         out->message(out, "stonith-event", hp, full_history, stonith__later_succeeded(hp, history));
         out->increment_list(out);
     }
 
     PCMK__OUTPUT_LIST_FOOTER(out, rc);
     return rc;
 }
  
-PCMK__OUTPUT_ARGS("full-fencing-history", "crm_exit_t", "stonith_history_t *", "GListPtr", "gboolean", "gboolean")
-int
-stonith__full_history_xml(pcmk__output_t *out, va_list args) {
+PCMK__OUTPUT_ARGS("full-fencing-list", "crm_exit_t", "stonith_history_t *", "GList *", "gboolean", "gboolean")
+static int
+full_history_xml(pcmk__output_t *out, va_list args) {
     crm_exit_t history_rc = va_arg(args, crm_exit_t);
     stonith_history_t *history = va_arg(args, stonith_history_t *);
-    GListPtr only_node = va_arg(args, GListPtr);
+    GList *only_node = va_arg(args, GList *);
     gboolean full_history = va_arg(args, gboolean);
     gboolean print_spacer G_GNUC_UNUSED = va_arg(args, gboolean);
 
     int rc = pcmk_rc_no_output;
 
     if (history_rc == 0) {
         for (stonith_history_t *hp = history; hp; hp = hp->next) {
             if (!pcmk__str_in_list(only_node, hp->target)) {
                 continue;
             }
 
             PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Fencing History");
             out->message(out, "stonith-event", hp, full_history, stonith__later_succeeded(hp, history));
             out->increment_list(out);
         }
 
         PCMK__OUTPUT_LIST_FOOTER(out, rc);
     } else {
         char *rc_s = crm_itoa(history_rc);
 
         pcmk__output_create_xml_node(out, "fence_history",
                                      "status", rc_s,
                                      NULL);
         free(rc_s);
 
         rc = pcmk_rc_ok;
     }
 
     return rc;
 }
 
 PCMK__OUTPUT_ARGS("last-fenced", "const char *", "time_t")
-int
-stonith__last_fenced_html(pcmk__output_t *out, va_list args) {
+static int
+last_fenced_html(pcmk__output_t *out, va_list args) {
     const char *target = va_arg(args, const char *);
     time_t when = va_arg(args, time_t);
 
     if (when) {
         char *buf = crm_strdup_printf("Node %s last fenced at: %s", target, ctime(&when));
         pcmk__output_create_html_node(out, "div", NULL, NULL, buf);
         free(buf);
         return pcmk_rc_ok;
     } else {
         return pcmk_rc_no_output;
     }
 }
 
 PCMK__OUTPUT_ARGS("last-fenced", "const char *", "time_t")
-int
-stonith__last_fenced_text(pcmk__output_t *out, va_list args) {
+static int
+last_fenced_text(pcmk__output_t *out, va_list args) {
     const char *target = va_arg(args, const char *);
     time_t when = va_arg(args, time_t);
 
     if (when) {
         pcmk__indented_printf(out, "Node %s last fenced at: %s", target, ctime(&when));
     } else {
         pcmk__indented_printf(out, "Node %s has never been fenced\n", target);
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("last-fenced", "const char *", "time_t")
-int
-stonith__last_fenced_xml(pcmk__output_t *out, va_list args) {
+static int
+last_fenced_xml(pcmk__output_t *out, va_list args) {
     const char *target = va_arg(args, const char *);
     time_t when = va_arg(args, time_t);
 
     if (when) {
         char *buf = time_t_string(when);
 
         pcmk__output_create_xml_node(out, "last-fenced",
                                      "target", target,
                                      "when", buf,
                                      NULL);
 
         free(buf);
         return pcmk_rc_ok;
     } else {
         return pcmk_rc_no_output;
     }
 }
 
-PCMK__OUTPUT_ARGS("pending-fencing-actions", "stonith_history_t *", "GListPtr", "gboolean", "gboolean")
+PCMK__OUTPUT_ARGS("pending-fencing-list", "stonith_history_t *", "GList *", "gboolean", "gboolean")
 int
 stonith__pending_actions(pcmk__output_t *out, va_list args) {
     stonith_history_t *history = va_arg(args, stonith_history_t *);
-    GListPtr only_node = va_arg(args, GListPtr);
+    GList *only_node = va_arg(args, GList *);
     gboolean full_history = va_arg(args, gboolean);
     gboolean print_spacer = va_arg(args, gboolean);
 
     int rc = pcmk_rc_no_output;
 
     for (stonith_history_t *hp = history; hp; hp = hp->next) {
         if (!pcmk__str_in_list(only_node, hp->target)) {
             continue;
         }
 
         /* Skip the rest of the history after we see a failed/done action */
         if ((hp->state == st_failed) || (hp->state == st_done)) {
             break;
         }
 
         PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc, "Pending Fencing Actions");
         out->message(out, "stonith-event", hp, full_history, stonith__later_succeeded(hp, history));
         out->increment_list(out);
     }
 
     PCMK__OUTPUT_LIST_FOOTER(out, rc);
     return rc;
 }
 
 PCMK__OUTPUT_ARGS("stonith-event", "stonith_history_t *", "gboolean", "gboolean")
-int
-stonith__event_html(pcmk__output_t *out, va_list args) {
+static int
+stonith_event_html(pcmk__output_t *out, va_list args) {
     stonith_history_t *event = va_arg(args, stonith_history_t *);
     gboolean full_history = va_arg(args, gboolean);
     gboolean later_succeeded = va_arg(args, gboolean);
 
     switch(event->state) {
         case st_done: {
             char *completed_s = time_t_string(event->completed);
 
             out->list_item(out, "successful-stonith-event",
                            "%s of %s successful: delegate=%s, client=%s, origin=%s, %s='%s'",
                            stonith_action_str(event->action), event->target,
                            event->delegate ? event->delegate : "",
                            event->client, event->origin,
                            full_history ? "completed" : "last-successful",
                            completed_s);
             free(completed_s);
             break;
         }
 
         case st_failed: {
             char *failed_s = time_t_string(event->completed);
 
             out->list_item(out, "failed-stonith-event",
                            "%s of %s failed : delegate=%s, client=%s, origin=%s, %s='%s' %s",
                            stonith_action_str(event->action), event->target,
                            event->delegate ? event->delegate : "",
                            event->client, event->origin,
                            full_history ? "completed" : "last-failed",
                            failed_s,
                            later_succeeded ? "(a later attempt succeeded)" : "");
             free(failed_s);
             break;
         }
 
         default:
             out->list_item(out, "pending-stonith-event",
                            "%s of %s pending: client=%s, origin=%s",
                            stonith_action_str(event->action), event->target,
                            event->client, event->origin);
             break;
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("stonith-event", "stonith_history_t *", "gboolean", "gboolean")
-int
-stonith__event_text(pcmk__output_t *out, va_list args) {
+static int
+stonith_event_text(pcmk__output_t *out, va_list args) {
     stonith_history_t *event = va_arg(args, stonith_history_t *);
     gboolean full_history = va_arg(args, gboolean);
     gboolean later_succeeded = va_arg(args, gboolean);
 
     char *buf = time_t_string(event->completed);
 
     switch (event->state) {
         case st_failed:
             pcmk__indented_printf(out, "%s of %s failed: delegate=%s, client=%s, origin=%s, %s='%s' %s\n",
                                   stonith_action_str(event->action), event->target,
                                   event->delegate ? event->delegate : "",
                                   event->client, event->origin,
                                   full_history ? "completed" : "last-failed", buf,
                                   later_succeeded ? "(a later attempt succeeded)" : "");
             break;
 
         case st_done:
             pcmk__indented_printf(out, "%s of %s successful: delegate=%s, client=%s, origin=%s, %s='%s'\n",
                                   stonith_action_str(event->action), event->target,
                                   event->delegate ? event->delegate : "",
                                   event->client, event->origin,
                                   full_history ? "completed" : "last-successful", buf);
             break;
 
         default:
             pcmk__indented_printf(out, "%s of %s pending: client=%s, origin=%s\n",
                                   stonith_action_str(event->action), event->target,
                                   event->client, event->origin);
             break;
     }
 
     free(buf);
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("stonith-event", "stonith_history_t *", "gboolean", "gboolean")
-int
-stonith__event_xml(pcmk__output_t *out, va_list args) {
+static int
+stonith_event_xml(pcmk__output_t *out, va_list args) {
     stonith_history_t *event = va_arg(args, stonith_history_t *);
     gboolean full_history G_GNUC_UNUSED = va_arg(args, gboolean);
     gboolean later_succeeded G_GNUC_UNUSED = va_arg(args, gboolean);
 
     char *buf = NULL;
 
     xmlNodePtr node = pcmk__output_create_xml_node(out, "fence_event",
                                                    "action", event->action,
                                                    "target", event->target,
                                                    "client", event->client,
                                                    "origin", event->origin,
                                                    NULL);
 
     switch (event->state) {
         case st_failed:
             crm_xml_add(node, "status", "failed");
             break;
 
         case st_done:
             crm_xml_add(node, "status", "success");
             break;
 
         default: {
             char *state = crm_itoa(event->state);
             pcmk__xe_set_props(node, "status", "pending",
                                "extended-status", state,
                                NULL);
             free(state);
             break;
         }
     }
 
     if (event->delegate != NULL) {
         crm_xml_add(node, "delegate", event->delegate);
     }
 
     if (event->state == st_failed || event->state == st_done) {
         buf = time_t_string(event->completed);
         crm_xml_add(node, "completed", buf);
         free(buf);
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("validate", "const char *", "const char *", "char *", "char *", "int")
-int
-stonith__validate_agent_html(pcmk__output_t *out, va_list args) {
+static int
+validate_agent_html(pcmk__output_t *out, va_list args) {
     const char *agent = va_arg(args, const char *);
     const char *device = va_arg(args, const char *);
     char *output = va_arg(args, char *);
     char *error_output = va_arg(args, char *);
     int rc = va_arg(args, int);
 
     if (device) {
         char *buf = crm_strdup_printf("Validation of %s on %s %s", agent, device,
                                       rc ? "failed" : "succeeded");
         pcmk__output_create_html_node(out, "div", NULL, NULL, buf);
         free(buf);
     } else {
         char *buf = crm_strdup_printf("Validation of %s %s", agent,
                                       rc ? "failed" : "succeeded");
         pcmk__output_create_html_node(out, "div", NULL, NULL, buf);
         free(buf);
     }
 
     out->subprocess_output(out, rc, output, error_output);
     return rc;
 }
 
 PCMK__OUTPUT_ARGS("validate", "const char *", "const char *", "char *", "char *", "int")
-int
-stonith__validate_agent_text(pcmk__output_t *out, va_list args) {
+static int
+validate_agent_text(pcmk__output_t *out, va_list args) {
     const char *agent = va_arg(args, const char *);
     const char *device = va_arg(args, const char *);
     char *output = va_arg(args, char *);
     char *error_output = va_arg(args, char *);
     int rc = va_arg(args, int);
 
     if (device) {
         pcmk__indented_printf(out, "Validation of %s on %s %s\n", agent, device,
                               rc ? "failed" : "succeeded");
     } else {
         pcmk__indented_printf(out, "Validation of %s %s\n", agent,
                               rc ? "failed" : "succeeded");
     }
 
-    if (output) {
-        puts(output);
-    }
-
-    if (error_output) {
-        puts(error_output);
-    }
-
+    out->subprocess_output(out, rc, output, error_output);
     return rc;
 }
 
 PCMK__OUTPUT_ARGS("validate", "const char *", "const char *", "char *", "char *", "int")
-int
-stonith__validate_agent_xml(pcmk__output_t *out, va_list args) {
+static int
+validate_agent_xml(pcmk__output_t *out, va_list args) {
     const char *agent = va_arg(args, const char *);
     const char *device = va_arg(args, const char *);
     char *output = va_arg(args, char *);
     char *error_output = va_arg(args, char *);
     int rc = va_arg(args, int);
 
     xmlNodePtr node = pcmk__output_create_xml_node(out, "validate",
                                                    "agent", agent,
                                                    "valid", pcmk__btoa(rc),
                                                    NULL);
 
     if (device != NULL) {
         crm_xml_add(node, "device", device);
     }
 
     pcmk__output_xml_push_parent(out, node);
     out->subprocess_output(out, rc, output, error_output);
     pcmk__output_xml_pop_parent(out);
 
     return rc;
 }
 
 static pcmk__message_entry_t fmt_functions[] = {
-    { "failed-fencing-history", "default", stonith__failed_history },
-    { "fencing-history", "default", stonith__history },
-    { "full-fencing-history", "default", stonith__full_history },
-    { "full-fencing-history", "xml", stonith__full_history_xml },
-    { "last-fenced", "html", stonith__last_fenced_html },
-    { "last-fenced", "log", stonith__last_fenced_text },
-    { "last-fenced", "text", stonith__last_fenced_text },
-    { "last-fenced", "xml", stonith__last_fenced_xml },
-    { "pending-fencing-actions", "default", stonith__pending_actions },
-    { "stonith-event", "html", stonith__event_html },
-    { "stonith-event", "log", stonith__event_text },
-    { "stonith-event", "text", stonith__event_text },
-    { "stonith-event", "xml", stonith__event_xml },
-    { "validate", "html", stonith__validate_agent_html },
-    { "validate", "log", stonith__validate_agent_text },
-    { "validate", "text", stonith__validate_agent_text },
-    { "validate", "xml", stonith__validate_agent_xml },
+    { "failed-fencing-list", "default", stonith__failed_history },
+    { "fencing-list", "default", stonith__history },
+    { "full-fencing-list", "default", stonith__full_history },
+    { "full-fencing-list", "xml", full_history_xml },
+    { "last-fenced", "html", last_fenced_html },
+    { "last-fenced", "log", last_fenced_text },
+    { "last-fenced", "text", last_fenced_text },
+    { "last-fenced", "xml", last_fenced_xml },
+    { "pending-fencing-list", "default", stonith__pending_actions },
+    { "stonith-event", "html", stonith_event_html },
+    { "stonith-event", "log", stonith_event_text },
+    { "stonith-event", "text", stonith_event_text },
+    { "stonith-event", "xml", stonith_event_xml },
+    { "validate", "html", validate_agent_html },
+    { "validate", "log", validate_agent_text },
+    { "validate", "text", validate_agent_text },
+    { "validate", "xml", validate_agent_xml },
 
     { NULL, NULL, NULL }
 };
 
 void
 stonith__register_messages(pcmk__output_t *out) {
     pcmk__register_messages(out, fmt_functions);
 }
diff --git a/lib/pacemaker/pcmk_output.c b/lib/pacemaker/pcmk_output.c
index a637031916..1f5a25b283 100644
--- a/lib/pacemaker/pcmk_output.c
+++ b/lib/pacemaker/pcmk_output.c
@@ -1,549 +1,550 @@
 /*
  * Copyright 2019-2020 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 #include <crm/common/results.h>
 #include <crm/common/output_internal.h>
 #include <crm/stonith-ng.h>
 #include <crm/fencing/internal.h>
 #include <libxml/tree.h>
 #include <pacemaker-internal.h>
 
 pcmk__supported_format_t pcmk__out_formats[] = {
     PCMK__SUPPORTED_FORMAT_XML,
     { NULL, NULL, NULL }
 };
 
 int
 pcmk__out_prologue(pcmk__output_t **out, xmlNodePtr *xml) {
     int rc = pcmk_rc_ok;
 
     if (*xml != NULL) {
         xmlFreeNode(*xml);
     }
 
     pcmk__register_formats(NULL, pcmk__out_formats);
     rc = pcmk__output_new(out, "xml", NULL, NULL);
     if (rc != pcmk_rc_ok) {
         return rc;
     }
 
     return rc;
 }
 
 void
 pcmk__out_epilogue(pcmk__output_t *out, xmlNodePtr *xml, int retval) {
     if (retval == pcmk_rc_ok) {
         out->finish(out, 0, FALSE, (void **) xml);
     }
 
     pcmk__output_free(out);
 }
 
 PCMK__OUTPUT_ARGS("colocations-list", "pe_resource_t *", "gboolean", "gboolean")
 static int colocations_list(pcmk__output_t *out, va_list args) {
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     gboolean dependents = va_arg(args, gboolean);
     gboolean recursive = va_arg(args, gboolean);
 
-    GListPtr lpc = NULL;
-    GListPtr list = rsc->rsc_cons;
+    GList *lpc = NULL;
+    GList *list = rsc->rsc_cons;
     bool printed_header = false;
 
     if (dependents) {
         list = rsc->rsc_cons_lhs;
     }
 
     if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) {
         return pcmk_rc_no_output;
     }
 
     pe__set_resource_flags(rsc, pe_rsc_allocating);
     for (lpc = list; lpc != NULL; lpc = lpc->next) {
         rsc_colocation_t *cons = (rsc_colocation_t *) lpc->data;
 
         char *score = NULL;
         pe_resource_t *peer = cons->rsc_rh;
 
         if (dependents) {
             peer = cons->rsc_lh;
         }
 
         if (pcmk_is_set(peer->flags, pe_rsc_allocating)) {
             if (dependents == FALSE) {
                 if (!printed_header) {
                     out->begin_list(out, NULL, NULL, "Colocations");
                     printed_header = true;
                 }
 
                 out->list_item(out, NULL, "%s (id=%s - loop)", peer->id, cons->id);
             }
             continue;
         }
 
         if (dependents && recursive) {
             if (!printed_header) {
                 out->begin_list(out, NULL, NULL, "Colocations");
                 printed_header = true;
             }
 
             out->message(out, "colocations-list", rsc, dependents, recursive);
         }
 
         if (!printed_header) {
             out->begin_list(out, NULL, NULL, "Colocations");
             printed_header = true;
         }
 
         score = score2char(cons->score);
         if (cons->role_rh > RSC_ROLE_STARTED) {
             out->list_item(out, NULL, "%s (score=%s, %s role=%s, id=%s",
                            peer->id, score, dependents ? "needs" : "with",
                            role2text(cons->role_rh), cons->id);
         } else {
             out->list_item(out, NULL, "%s (score=%s, id=%s",
                            peer->id, score, cons->id);
         }
 
         free(score);
         out->message(out, "locations-list", peer);
 
         if (!dependents && recursive) {
             out->message(out, "colocations-list", rsc, dependents, recursive);
         }
     }
 
     if (printed_header) {
         out->end_list(out);
     }
 
     return pcmk_rc_no_output;
 }
 
 PCMK__OUTPUT_ARGS("colocations-list", "pe_resource_t *", "gboolean", "gboolean")
 static int colocations_list_xml(pcmk__output_t *out, va_list args) {
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     gboolean dependents = va_arg(args, gboolean);
     gboolean recursive = va_arg(args, gboolean);
 
-    GListPtr lpc = NULL;
-    GListPtr list = rsc->rsc_cons;
+    GList *lpc = NULL;
+    GList *list = rsc->rsc_cons;
     bool printed_header = false;
 
     if (dependents) {
         list = rsc->rsc_cons_lhs;
     }
 
     if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) {
         return pcmk_rc_ok;
     }
 
     pe__set_resource_flags(rsc, pe_rsc_allocating);
     for (lpc = list; lpc != NULL; lpc = lpc->next) {
         rsc_colocation_t *cons = (rsc_colocation_t *) lpc->data;
         pe_resource_t *peer = cons->rsc_rh;
         char *score = NULL;
 
         if (dependents) {
             peer = cons->rsc_lh;
         }
 
         if (pcmk_is_set(peer->flags, pe_rsc_allocating)) {
             if (dependents == FALSE) {
                 if (!printed_header) {
                     pcmk__output_xml_create_parent(out, "colocations", NULL);
                     printed_header = true;
                 }
 
                 pcmk__output_create_xml_node(out, "colocation",
                                              "peer", peer->id,
                                              "id", cons->id,
                                              NULL);
             }
             continue;
         }
 
         if (dependents && recursive) {
             if (!printed_header) {
                 pcmk__output_xml_create_parent(out, "colocations", NULL);
                 printed_header = true;
             }
 
             out->message(out, "colocations-list", rsc, dependents, recursive);
         }
 
         if (!printed_header) {
             pcmk__output_xml_create_parent(out, "colocations", NULL);
             printed_header = true;
         }
 
         score = score2char(cons->score);
         if (cons->role_rh > RSC_ROLE_STARTED) {
             pcmk__output_create_xml_node(out, "colocation",
                                          "peer", peer->id,
                                          "id", cons->id,
                                          "score", score,
                                          "dependents", dependents ? "needs" : "with",
                                          "role", role2text(cons->role_rh),
                                          NULL);
         } else {
             pcmk__output_create_xml_node(out, "colocation",
                                          "peer", peer->id,
                                          "id", cons->id,
                                          "score", score,
                                          NULL);
         }
 
         free(score);
         out->message(out, "locations-list", peer);
 
         if (!dependents && recursive) {
             out->message(out, "colocations-list", rsc, dependents, recursive);
         }
     }
 
     if (printed_header) {
         pcmk__output_xml_pop_parent(out);
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("locations-list", "pe_resource_t *")
 static int locations_list(pcmk__output_t *out, va_list args) {
-    pe_resource_t *rsc G_GNUC_UNUSED = va_arg(args, pe_resource_t *);
+    pe_resource_t *rsc = va_arg(args, pe_resource_t *);
 
-    GListPtr lpc = NULL;
-    GListPtr list = rsc->rsc_location;
+    GList *lpc = NULL;
+    GList *list = rsc->rsc_location;
 
     out->begin_list(out, NULL, NULL, "Locations");
 
     for (lpc = list; lpc != NULL; lpc = lpc->next) {
         pe__location_t *cons = lpc->data;
 
-        GListPtr lpc2 = NULL;
+        GList *lpc2 = NULL;
 
         for (lpc2 = cons->node_list_rh; lpc2 != NULL; lpc2 = lpc2->next) {
             pe_node_t *node = (pe_node_t *) lpc2->data;
             char *score = score2char(node->weight);
 
             out->list_item(out, NULL, "Node %s (score=%s, id=%s)",
                            node->details->uname, score, cons->id);
             free(score);
         }
     }
 
     out->end_list(out);
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("locations-list", "pe_resource_t *")
 static int locations_list_xml(pcmk__output_t *out, va_list args) {
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
 
-    GListPtr lpc = NULL;
-    GListPtr list = rsc->rsc_location;
+    GList *lpc = NULL;
+    GList *list = rsc->rsc_location;
 
     pcmk__output_xml_create_parent(out, "locations", NULL);
 
     for (lpc = list; lpc != NULL; lpc = lpc->next) {
         pe__location_t *cons = lpc->data;
 
-        GListPtr lpc2 = NULL;
+        GList *lpc2 = NULL;
 
         for (lpc2 = cons->node_list_rh; lpc2 != NULL; lpc2 = lpc2->next) {
             pe_node_t *node = (pe_node_t *) lpc2->data;
             char *score = score2char(node->weight);
 
             pcmk__output_create_xml_node(out, "location",
                                          "host", node->details->uname,
                                          "id", cons->id,
                                          "score", score,
                                          NULL);
             free(score);
         }
     }
 
     pcmk__output_xml_pop_parent(out);
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("stacks-constraints", "pe_resource_t *", "pe_working_set_t *", "gboolean")
 static int
 stacks_and_constraints(pcmk__output_t *out, va_list args) {
-    pe_resource_t *rsc G_GNUC_UNUSED = va_arg(args, pe_resource_t *);
-    pe_working_set_t *data_set G_GNUC_UNUSED = va_arg(args, pe_working_set_t *);
-    gboolean recursive G_GNUC_UNUSED = va_arg(args, gboolean);
+    pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+    pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+    gboolean recursive = va_arg(args, gboolean);
 
-    GListPtr lpc = NULL;
-    xmlNode *cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS,
-                                               data_set->input);
+    GList *lpc = NULL;
+    xmlNodePtr cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS,
+                                                 data_set->input);
 
     unpack_constraints(cib_constraints, data_set);
 
     // Constraints apply to group/clone, not member/instance
     rsc = uber_parent(rsc);
 
     for (lpc = data_set->resources; lpc != NULL; lpc = lpc->next) {
         pe_resource_t *r = (pe_resource_t *) lpc->data;
 
         pe__clear_resource_flags(r, pe_rsc_allocating);
     }
 
     out->message(out, "colocations-list", rsc, TRUE, recursive);
 
     out->begin_list(out, NULL, NULL, "%s", rsc->id);
     out->message(out, "locations-list", rsc);
     out->end_list(out);
 
     for (lpc = data_set->resources; lpc != NULL; lpc = lpc->next) {
         pe_resource_t *r = (pe_resource_t *) lpc->data;
 
         pe__clear_resource_flags(r, pe_rsc_allocating);
     }
 
     out->message(out, "colocations-list", rsc, FALSE, recursive);
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("stacks-constraints", "pe_resource_t *", "pe_working_set_t *", "gboolean")
 static int
 stacks_and_constraints_xml(pcmk__output_t *out, va_list args) {
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
     gboolean recursive = va_arg(args, gboolean);
 
-    GListPtr lpc = NULL;
-    xmlNode *cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS,
-                                               data_set->input);
+    GList *lpc = NULL;
+    xmlNodePtr cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS,
+                                                 data_set->input);
 
     unpack_constraints(cib_constraints, data_set);
 
     // Constraints apply to group/clone, not member/instance
     rsc = uber_parent(rsc);
 
     for (lpc = data_set->resources; lpc != NULL; lpc = lpc->next) {
         pe_resource_t *r = (pe_resource_t *) lpc->data;
 
         pe__clear_resource_flags(r, pe_rsc_allocating);
     }
 
     pcmk__output_xml_create_parent(out, "constraints", NULL);
 
     out->message(out, "colocations-list", rsc, TRUE, recursive);
 
     pcmk__output_xml_create_parent(out, "resource",
                                    "id", rsc->id,
                                    NULL);
     out->message(out, "locations-list", rsc);
     pcmk__output_xml_pop_parent(out);
 
     for (lpc = data_set->resources; lpc != NULL; lpc = lpc->next) {
         pe_resource_t *r = (pe_resource_t *) lpc->data;
 
         pe__clear_resource_flags(r, pe_rsc_allocating);
     }
 
     out->message(out, "colocations-list", rsc, FALSE, recursive);
     return pcmk_rc_ok;
 }
 
-PCMK__OUTPUT_ARGS("health", "char *", "char *", "char *", "char *")
+PCMK__OUTPUT_ARGS("health", "const char *", "const char *", "const char *", "const char *")
 static int
 health_text(pcmk__output_t *out, va_list args)
 {
-    char *sys_from = va_arg(args, char *);
-    char *host_from = va_arg(args, char *);
-    char *fsa_state = va_arg(args, char *);
-    char *result = va_arg(args, char *);
+    const char *sys_from = va_arg(args, const char *);
+    const char *host_from = va_arg(args, const char *);
+    const char *fsa_state = va_arg(args, const char *);
+    const char *result = va_arg(args, const char *);
 
     if (!out->is_quiet(out)) {
         out->info(out, "Status of %s@%s: %s (%s)", crm_str(sys_from),
                        crm_str(host_from), crm_str(fsa_state), crm_str(result));
     } else if (fsa_state != NULL) {
         out->info(out, "%s", fsa_state);
     }
 
     return pcmk_rc_ok;
 }
 
-PCMK__OUTPUT_ARGS("health", "char *", "char *", "char *", "char *")
+PCMK__OUTPUT_ARGS("health", "const char *", "const char *", "const char *", "const char *")
 static int
 health_xml(pcmk__output_t *out, va_list args)
 {
-    char *sys_from = va_arg(args, char *);
-    char *host_from = va_arg(args, char *);
-    char *fsa_state = va_arg(args, char *);
-    char *result = va_arg(args, char *);
+    const char *sys_from = va_arg(args, const char *);
+    const char *host_from = va_arg(args, const char *);
+    const char *fsa_state = va_arg(args, const char *);
+    const char *result = va_arg(args, const char *);
 
     pcmk__output_create_xml_node(out, crm_str(sys_from),
                                  "node_name", crm_str(host_from),
                                  "state", crm_str(fsa_state),
                                  "result", crm_str(result),
                                  NULL);
     return pcmk_rc_ok;
 }
 
-PCMK__OUTPUT_ARGS("pacemakerd-health", "char *", "char *", "char *")
+PCMK__OUTPUT_ARGS("pacemakerd-health", "const char *", "const char *", "const char *")
 static int
 pacemakerd_health_text(pcmk__output_t *out, va_list args)
 {
-    char *sys_from = va_arg(args, char *);
-    char *state = va_arg(args, char *);
-    char *last_updated = va_arg(args, char *);
+    const char *sys_from = va_arg(args, const char *);
+    const char *state = va_arg(args, const char *);
+    const char *last_updated = va_arg(args, const char *);
 
     if (!out->is_quiet(out)) {
         out->info(out, "Status of %s: '%s' %s %s", crm_str(sys_from),
                   crm_str(state), (!pcmk__str_empty(last_updated))?
                   "last updated":"", crm_str(last_updated));
     } else {
         out->info(out, "%s", crm_str(state));
     }
 
     return pcmk_rc_ok;
 }
 
-PCMK__OUTPUT_ARGS("pacemakerd-health", "char *", "char *", "char *")
+PCMK__OUTPUT_ARGS("pacemakerd-health", "const char *", "const char *", "const char *")
 static int
 pacemakerd_health_xml(pcmk__output_t *out, va_list args)
 {
-    char *sys_from = va_arg(args, char *);
-    char *state = va_arg(args, char *);
-    char *last_updated = va_arg(args, char *);
+    const char *sys_from = va_arg(args, const char *);
+    const char *state = va_arg(args, const char *);
+    const char *last_updated = va_arg(args, const char *);
 
     pcmk__output_create_xml_node(out, crm_str(sys_from),
                                  "state", crm_str(state),
                                  "last_updated", crm_str(last_updated),
                                  NULL);
     return pcmk_rc_ok;
 }
 
-PCMK__OUTPUT_ARGS("dc", "char *")
+PCMK__OUTPUT_ARGS("dc", "const char *")
 static int
 dc_text(pcmk__output_t *out, va_list args)
 {
-    char *dc = va_arg(args, char *);
+    const char *dc = va_arg(args, const char *);
 
     if (!out->is_quiet(out)) {
         out->info(out, "Designated Controller is: %s", crm_str(dc));
     } else if (dc != NULL) {
         out->info(out, "%s", dc);
     }
 
     return pcmk_rc_ok;
 }
 
-PCMK__OUTPUT_ARGS("dc", "char *")
+PCMK__OUTPUT_ARGS("dc", "const char *")
 static int
 dc_xml(pcmk__output_t *out, va_list args)
 {
-    char *dc = va_arg(args, char *);
+    const char *dc = va_arg(args, const char *);
 
     pcmk__output_create_xml_node(out, "dc",
                                  "node_name", crm_str(dc),
                                  NULL);
     return pcmk_rc_ok;
 }
 
 
-PCMK__OUTPUT_ARGS("crmadmin-node-list", "pcmk__output_t *", "xmlNode *")
+PCMK__OUTPUT_ARGS("crmadmin-node-list", "xmlNodePtr", "gboolean")
 static int
 crmadmin_node_list(pcmk__output_t *out, va_list args)
 {
-    xmlNode *xml_node = va_arg(args, xmlNode *);
+    xmlNodePtr xml_node = va_arg(args, xmlNodePtr);
+    gboolean BASH_EXPORT = va_arg(args, gboolean);
+
     int found = 0;
     xmlNode *node = NULL;
     xmlNode *nodes = get_object_root(XML_CIB_TAG_NODES, xml_node);
-    gboolean BASH_EXPORT = va_arg(args, gboolean);
 
     out->begin_list(out, NULL, NULL, "nodes");
 
     for (node = first_named_child(nodes, XML_CIB_TAG_NODE); node != NULL;
          node = crm_next_same_xml(node)) {
         const char *node_type = BASH_EXPORT ? NULL :
                      crm_element_value(node, XML_ATTR_TYPE);
         out->message(out, "crmadmin-node", node_type,
                      crm_str(crm_element_value(node, XML_ATTR_UNAME)),
                      crm_str(crm_element_value(node, XML_ATTR_ID)),
                      BASH_EXPORT);
 
         found++;
     }
     // @TODO List Pacemaker Remote nodes that don't have a <node> entry
 
     out->end_list(out);
 
     if (found == 0) {
         out->info(out, "No nodes configured");
     }
 
     return pcmk_rc_ok;
 }
 
-PCMK__OUTPUT_ARGS("crmadmin-node", "char *", "char *", "char *", "gboolean")
+PCMK__OUTPUT_ARGS("crmadmin-node", "const char *", "const char *", "const char *", "gboolean")
 static int
 crmadmin_node_text(pcmk__output_t *out, va_list args)
 {
-    char *type = va_arg(args, char *);
-    char *name = va_arg(args, char *);
-    char *id = va_arg(args, char *);
+    const char *type = va_arg(args, const char *);
+    const char *name = va_arg(args, const char *);
+    const char *id = va_arg(args, const char *);
     gboolean BASH_EXPORT = va_arg(args, gboolean);
 
     if (BASH_EXPORT) {
         out->info(out, "export %s=%s", crm_str(name), crm_str(id));
     } else {
         out->info(out, "%s node: %s (%s)", type ? type : "member",
                   crm_str(name), crm_str(id));
     }
 
     return pcmk_rc_ok;
 }
 
-PCMK__OUTPUT_ARGS("crmadmin-node", "char *", "char *", "char *", "gboolean")
+PCMK__OUTPUT_ARGS("crmadmin-node", "const char *", "const char *", "const char *", "gboolean")
 static int
 crmadmin_node_xml(pcmk__output_t *out, va_list args)
 {
-    char *type = va_arg(args, char *);
-    char *name = va_arg(args, char *);
-    char *id = va_arg(args, char *);
+    const char *type = va_arg(args, const char *);
+    const char *name = va_arg(args, const char *);
+    const char *id = va_arg(args, const char *);
 
     pcmk__output_create_xml_node(out, "node",
                                  "type", type ? type : "member",
                                  "name", crm_str(name),
                                  "id", crm_str(id),
                                  NULL);
     return pcmk_rc_ok;
 }
 
 static pcmk__message_entry_t fmt_functions[] = {
     { "colocations-list", "default", colocations_list },
     { "colocations-list", "xml", colocations_list_xml },
     { "locations-list", "default", locations_list },
     { "locations-list", "xml", locations_list_xml },
     { "stacks-constraints", "default", stacks_and_constraints },
     { "stacks-constraints", "xml", stacks_and_constraints_xml },
     { "health", "default", health_text },
     { "health", "xml", health_xml },
     { "pacemakerd-health", "default", pacemakerd_health_text },
     { "pacemakerd-health", "xml", pacemakerd_health_xml },
     { "dc", "default", dc_text },
     { "dc", "xml", dc_xml },
     { "crmadmin-node-list", "default", crmadmin_node_list },
     { "crmadmin-node", "default", crmadmin_node_text },
     { "crmadmin-node", "xml", crmadmin_node_xml },
 
     { NULL, NULL, NULL }
 };
 
 void
 pcmk__register_lib_messages(pcmk__output_t *out) {
     pcmk__register_messages(out, fmt_functions);
 }
diff --git a/lib/pengine/bundle.c b/lib/pengine/bundle.c
index 543f5dc831..7a175a5766 100644
--- a/lib/pengine/bundle.c
+++ b/lib/pengine/bundle.c
@@ -1,2104 +1,2104 @@
 /*
  * Copyright 2004-2020 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <ctype.h>
 
 #include <crm/pengine/rules.h>
 #include <crm/pengine/status.h>
 #include <crm/pengine/internal.h>
 #include <crm/msg_xml.h>
 #include <crm/common/xml_internal.h>
 #include <pe_status_private.h>
 
 #define PE__VARIANT_BUNDLE 1
 #include "./variant.h"
 
 static char *
 next_ip(const char *last_ip)
 {
     unsigned int oct1 = 0;
     unsigned int oct2 = 0;
     unsigned int oct3 = 0;
     unsigned int oct4 = 0;
     int rc = sscanf(last_ip, "%u.%u.%u.%u", &oct1, &oct2, &oct3, &oct4);
 
     if (rc != 4) {
         /*@ TODO check for IPv6 */
         return NULL;
 
     } else if (oct3 > 253) {
         return NULL;
 
     } else if (oct4 > 253) {
         ++oct3;
         oct4 = 1;
 
     } else {
         ++oct4;
     }
 
     return crm_strdup_printf("%u.%u.%u.%u", oct1, oct2, oct3, oct4);
 }
 
 static int
 allocate_ip(pe__bundle_variant_data_t *data, pe__bundle_replica_t *replica,
             char *buffer, int max)
 {
     if(data->ip_range_start == NULL) {
         return 0;
 
     } else if(data->ip_last) {
         replica->ipaddr = next_ip(data->ip_last);
 
     } else {
         replica->ipaddr = strdup(data->ip_range_start);
     }
 
     data->ip_last = replica->ipaddr;
     switch (data->agent_type) {
         case PE__CONTAINER_AGENT_DOCKER:
         case PE__CONTAINER_AGENT_PODMAN:
             if (data->add_host) {
                 return snprintf(buffer, max, " --add-host=%s-%d:%s",
                                 data->prefix, replica->offset,
                                 replica->ipaddr);
             }
         case PE__CONTAINER_AGENT_RKT:
             return snprintf(buffer, max, " --hosts-entry=%s=%s-%d",
                             replica->ipaddr, data->prefix, replica->offset);
         default: // PE__CONTAINER_AGENT_UNKNOWN
             break;
     }
     return 0;
 }
 
 static xmlNode *
 create_resource(const char *name, const char *provider, const char *kind)
 {
     xmlNode *rsc = create_xml_node(NULL, XML_CIB_TAG_RESOURCE);
 
     crm_xml_add(rsc, XML_ATTR_ID, name);
     crm_xml_add(rsc, XML_AGENT_ATTR_CLASS, PCMK_RESOURCE_CLASS_OCF);
     crm_xml_add(rsc, XML_AGENT_ATTR_PROVIDER, provider);
     crm_xml_add(rsc, XML_ATTR_TYPE, kind);
 
     return rsc;
 }
 
 /*!
  * \internal
  * \brief Check whether cluster can manage resource inside container
  *
  * \param[in] data  Container variant data
  *
  * \return TRUE if networking configuration is acceptable, FALSE otherwise
  *
  * \note The resource is manageable if an IP range or control port has been
  *       specified. If a control port is used without an IP range, replicas per
  *       host must be 1.
  */
 static bool
 valid_network(pe__bundle_variant_data_t *data)
 {
     if(data->ip_range_start) {
         return TRUE;
     }
     if(data->control_port) {
         if(data->nreplicas_per_host > 1) {
             pe_err("Specifying the 'control-port' for %s requires 'replicas-per-host=1'", data->prefix);
             data->nreplicas_per_host = 1;
             // @TODO to be sure: pe__clear_resource_flags(rsc, pe_rsc_unique);
         }
         return TRUE;
     }
     return FALSE;
 }
 
 static bool
 create_ip_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
                    pe__bundle_replica_t *replica, pe_working_set_t *data_set)
 {
     if(data->ip_range_start) {
         char *id = NULL;
         xmlNode *xml_ip = NULL;
         xmlNode *xml_obj = NULL;
 
         id = crm_strdup_printf("%s-ip-%s", data->prefix, replica->ipaddr);
         crm_xml_sanitize_id(id);
         xml_ip = create_resource(id, "heartbeat", "IPaddr2");
         free(id);
 
         xml_obj = create_xml_node(xml_ip, XML_TAG_ATTR_SETS);
         crm_xml_set_id(xml_obj, "%s-attributes-%d",
                        data->prefix, replica->offset);
 
         crm_create_nvpair_xml(xml_obj, NULL, "ip", replica->ipaddr);
         if(data->host_network) {
             crm_create_nvpair_xml(xml_obj, NULL, "nic", data->host_network);
         }
 
         if(data->host_netmask) {
             crm_create_nvpair_xml(xml_obj, NULL,
                                   "cidr_netmask", data->host_netmask);
 
         } else {
             crm_create_nvpair_xml(xml_obj, NULL, "cidr_netmask", "32");
         }
 
         xml_obj = create_xml_node(xml_ip, "operations");
         crm_create_op_xml(xml_obj, ID(xml_ip), "monitor", "60s", NULL);
 
         // TODO: Other ops? Timeouts and intervals from underlying resource?
 
         if (!common_unpack(xml_ip, &replica->ip, parent, data_set)) {
             return FALSE;
         }
 
         parent->children = g_list_append(parent->children, replica->ip);
     }
     return TRUE;
 }
 
 static bool
 create_docker_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
                        pe__bundle_replica_t *replica,
                        pe_working_set_t *data_set)
 {
         int offset = 0, max = 4096;
         char *buffer = calloc(1, max+1);
 
         int doffset = 0, dmax = 1024;
         char *dbuffer = calloc(1, dmax+1);
 
         char *id = NULL;
         xmlNode *xml_container = NULL;
         xmlNode *xml_obj = NULL;
 
         id = crm_strdup_printf("%s-docker-%d", data->prefix, replica->offset);
         crm_xml_sanitize_id(id);
         xml_container = create_resource(id, "heartbeat",
                                         PE__CONTAINER_AGENT_DOCKER_S);
         free(id);
 
         xml_obj = create_xml_node(xml_container, XML_TAG_ATTR_SETS);
         crm_xml_set_id(xml_obj, "%s-attributes-%d",
                        data->prefix, replica->offset);
 
         crm_create_nvpair_xml(xml_obj, NULL, "image", data->image);
         crm_create_nvpair_xml(xml_obj, NULL, "allow_pull", XML_BOOLEAN_TRUE);
         crm_create_nvpair_xml(xml_obj, NULL, "force_kill", XML_BOOLEAN_FALSE);
         crm_create_nvpair_xml(xml_obj, NULL, "reuse", XML_BOOLEAN_FALSE);
 
         offset += snprintf(buffer+offset, max-offset, " --restart=no");
 
         /* Set a container hostname only if we have an IP to map it to.
          * The user can set -h or --uts=host themselves if they want a nicer
          * name for logs, but this makes applications happy who need their
          * hostname to match the IP they bind to.
          */
         if (data->ip_range_start != NULL) {
             offset += snprintf(buffer+offset, max-offset, " -h %s-%d",
                                data->prefix, replica->offset);
         }
 
         offset += snprintf(buffer+offset, max-offset, " -e PCMK_stderr=1");
 
         if (data->container_network) {
 #if 0
             offset += snprintf(buffer+offset, max-offset, " --link-local-ip=%s",
                                replica->ipaddr);
 #endif
             offset += snprintf(buffer+offset, max-offset, " --net=%s",
                                data->container_network);
         }
 
         if(data->control_port) {
             offset += snprintf(buffer+offset, max-offset, " -e PCMK_remote_port=%s", data->control_port);
         } else {
             offset += snprintf(buffer+offset, max-offset, " -e PCMK_remote_port=%d", DEFAULT_REMOTE_PORT);
         }
 
         for(GListPtr pIter = data->mounts; pIter != NULL; pIter = pIter->next) {
             pe__bundle_mount_t *mount = pIter->data;
 
             if (pcmk_is_set(mount->flags, pe__bundle_mount_subdir)) {
                 char *source = crm_strdup_printf(
                     "%s/%s-%d", mount->source, data->prefix, replica->offset);
 
                 if(doffset > 0) {
                     doffset += snprintf(dbuffer+doffset, dmax-doffset, ",");
                 }
                 doffset += snprintf(dbuffer+doffset, dmax-doffset, "%s", source);
                 offset += snprintf(buffer+offset, max-offset, " -v %s:%s", source, mount->target);
                 free(source);
 
             } else {
                 offset += snprintf(buffer+offset, max-offset, " -v %s:%s", mount->source, mount->target);
             }
             if(mount->options) {
                 offset += snprintf(buffer+offset, max-offset, ":%s", mount->options);
             }
         }
 
         for(GListPtr pIter = data->ports; pIter != NULL; pIter = pIter->next) {
             pe__bundle_port_t *port = pIter->data;
 
             if (replica->ipaddr) {
                 offset += snprintf(buffer+offset, max-offset, " -p %s:%s:%s",
                                    replica->ipaddr, port->source,
                                    port->target);
             } else if(!pcmk__str_eq(data->container_network, "host", pcmk__str_casei)) {
                 // No need to do port mapping if net=host
                 offset += snprintf(buffer+offset, max-offset, " -p %s:%s", port->source, port->target);
             }
         }
 
         if (data->launcher_options) {
             offset += snprintf(buffer+offset, max-offset, " %s",
                                data->launcher_options);
         }
 
         if (data->container_host_options) {
             offset += snprintf(buffer + offset, max - offset, " %s",
                                data->container_host_options);
         }
 
         crm_create_nvpair_xml(xml_obj, NULL, "run_opts", buffer);
         free(buffer);
 
         crm_create_nvpair_xml(xml_obj, NULL, "mount_points", dbuffer);
         free(dbuffer);
 
         if (replica->child) {
             if (data->container_command) {
                 crm_create_nvpair_xml(xml_obj, NULL,
                                       "run_cmd", data->container_command);
             } else {
                 crm_create_nvpair_xml(xml_obj, NULL,
                                       "run_cmd", SBIN_DIR "/pacemaker-remoted");
             }
 
             /* TODO: Allow users to specify their own?
              *
              * We just want to know if the container is alive, we'll
              * monitor the child independently
              */
             crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
         /* } else if(child && data->untrusted) {
          * Support this use-case?
          *
          * The ability to have resources started/stopped by us, but
          * unable to set attributes, etc.
          *
          * Arguably better to control API access this with ACLs like
          * "normal" remote nodes
          *
          *     crm_create_nvpair_xml(xml_obj, NULL,
          *                           "run_cmd",
          *                           "/usr/libexec/pacemaker/pacemaker-execd");
          *     crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd",
          *         "/usr/libexec/pacemaker/lrmd_internal_ctl -c poke");
          */
         } else {
             if (data->container_command) {
                 crm_create_nvpair_xml(xml_obj, NULL,
                                       "run_cmd", data->container_command);
             }
 
             /* TODO: Allow users to specify their own?
              *
              * We don't know what's in the container, so we just want
              * to know if it is alive
              */
             crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
         }
 
 
         xml_obj = create_xml_node(xml_container, "operations");
         crm_create_op_xml(xml_obj, ID(xml_container), "monitor", "60s", NULL);
 
         // TODO: Other ops? Timeouts and intervals from underlying resource?
         if (!common_unpack(xml_container, &replica->container, parent, data_set)) {
             return FALSE;
         }
         parent->children = g_list_append(parent->children, replica->container);
         return TRUE;
 }
 
 static bool
 create_podman_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
                        pe__bundle_replica_t *replica,
                        pe_working_set_t *data_set)
 {
         int offset = 0, max = 4096;
         char *buffer = calloc(1, max+1);
 
         int doffset = 0, dmax = 1024;
         char *dbuffer = calloc(1, dmax+1);
 
         char *id = NULL;
         xmlNode *xml_container = NULL;
         xmlNode *xml_obj = NULL;
 
         id = crm_strdup_printf("%s-podman-%d", data->prefix, replica->offset);
         crm_xml_sanitize_id(id);
         xml_container = create_resource(id, "heartbeat",
                                         PE__CONTAINER_AGENT_PODMAN_S);
         free(id);
 
         xml_obj = create_xml_node(xml_container, XML_TAG_ATTR_SETS);
         crm_xml_set_id(xml_obj, "%s-attributes-%d",
                        data->prefix, replica->offset);
 
         crm_create_nvpair_xml(xml_obj, NULL, "image", data->image);
         crm_create_nvpair_xml(xml_obj, NULL, "allow_pull", XML_BOOLEAN_TRUE);
         crm_create_nvpair_xml(xml_obj, NULL, "force_kill", XML_BOOLEAN_FALSE);
         crm_create_nvpair_xml(xml_obj, NULL, "reuse", XML_BOOLEAN_FALSE);
 
         // FIXME: (bandini 2018-08) podman has no restart policies
         //offset += snprintf(buffer+offset, max-offset, " --restart=no");
 
         /* Set a container hostname only if we have an IP to map it to.
          * The user can set -h or --uts=host themselves if they want a nicer
          * name for logs, but this makes applications happy who need their
          * hostname to match the IP they bind to.
          */
         if (data->ip_range_start != NULL) {
             offset += snprintf(buffer+offset, max-offset, " -h %s-%d",
                                data->prefix, replica->offset);
         }
 
         offset += snprintf(buffer+offset, max-offset, " -e PCMK_stderr=1");
 
         if (data->container_network) {
 #if 0
             // podman has no support for --link-local-ip
             offset += snprintf(buffer+offset, max-offset, " --link-local-ip=%s",
                                replica->ipaddr);
 #endif
             offset += snprintf(buffer+offset, max-offset, " --net=%s",
                                data->container_network);
         }
 
         if(data->control_port) {
             offset += snprintf(buffer+offset, max-offset, " -e PCMK_remote_port=%s", data->control_port);
         } else {
             offset += snprintf(buffer+offset, max-offset, " -e PCMK_remote_port=%d", DEFAULT_REMOTE_PORT);
         }
 
         for(GListPtr pIter = data->mounts; pIter != NULL; pIter = pIter->next) {
             pe__bundle_mount_t *mount = pIter->data;
 
             if (pcmk_is_set(mount->flags, pe__bundle_mount_subdir)) {
                 char *source = crm_strdup_printf(
                     "%s/%s-%d", mount->source, data->prefix, replica->offset);
 
                 if(doffset > 0) {
                     doffset += snprintf(dbuffer+doffset, dmax-doffset, ",");
                 }
                 doffset += snprintf(dbuffer+doffset, dmax-doffset, "%s", source);
                 offset += snprintf(buffer+offset, max-offset, " -v %s:%s", source, mount->target);
                 free(source);
 
             } else {
                 offset += snprintf(buffer+offset, max-offset, " -v %s:%s", mount->source, mount->target);
             }
             if(mount->options) {
                 offset += snprintf(buffer+offset, max-offset, ":%s", mount->options);
             }
         }
 
         for(GListPtr pIter = data->ports; pIter != NULL; pIter = pIter->next) {
             pe__bundle_port_t *port = pIter->data;
 
             if (replica->ipaddr) {
                 offset += snprintf(buffer+offset, max-offset, " -p %s:%s:%s",
                                    replica->ipaddr, port->source,
                                    port->target);
             } else if(!pcmk__str_eq(data->container_network, "host", pcmk__str_casei)) {
                 // No need to do port mapping if net=host
                 offset += snprintf(buffer+offset, max-offset, " -p %s:%s", port->source, port->target);
             }
         }
 
         if (data->launcher_options) {
             offset += snprintf(buffer+offset, max-offset, " %s",
                                data->launcher_options);
         }
 
         if (data->container_host_options) {
             offset += snprintf(buffer + offset, max - offset, " %s",
                                data->container_host_options);
         }
 
         crm_create_nvpair_xml(xml_obj, NULL, "run_opts", buffer);
         free(buffer);
 
         crm_create_nvpair_xml(xml_obj, NULL, "mount_points", dbuffer);
         free(dbuffer);
 
         if (replica->child) {
             if (data->container_command) {
                 crm_create_nvpair_xml(xml_obj, NULL,
                                       "run_cmd", data->container_command);
             } else {
                 crm_create_nvpair_xml(xml_obj, NULL,
                                       "run_cmd", SBIN_DIR "/pacemaker-remoted");
             }
 
             /* TODO: Allow users to specify their own?
              *
              * We just want to know if the container is alive, we'll
              * monitor the child independently
              */
             crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
         /* } else if(child && data->untrusted) {
          * Support this use-case?
          *
          * The ability to have resources started/stopped by us, but
          * unable to set attributes, etc.
          *
          * Arguably better to control API access this with ACLs like
          * "normal" remote nodes
          *
          *     crm_create_nvpair_xml(xml_obj, NULL,
          *                           "run_cmd",
          *                           "/usr/libexec/pacemaker/pacemaker-execd");
          *     crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd",
          *         "/usr/libexec/pacemaker/lrmd_internal_ctl -c poke");
          */
         } else {
             if (data->container_command) {
                 crm_create_nvpair_xml(xml_obj, NULL,
                                       "run_cmd", data->container_command);
             }
 
             /* TODO: Allow users to specify their own?
              *
              * We don't know what's in the container, so we just want
              * to know if it is alive
              */
             crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
         }
 
 
         xml_obj = create_xml_node(xml_container, "operations");
         crm_create_op_xml(xml_obj, ID(xml_container), "monitor", "60s", NULL);
 
         // TODO: Other ops? Timeouts and intervals from underlying resource?
         if (!common_unpack(xml_container, &replica->container, parent,
                            data_set)) {
             return FALSE;
         }
         parent->children = g_list_append(parent->children, replica->container);
         return TRUE;
 }
 
 static bool
 create_rkt_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
                     pe__bundle_replica_t *replica, pe_working_set_t *data_set)
 {
         int offset = 0, max = 4096;
         char *buffer = calloc(1, max+1);
 
         int doffset = 0, dmax = 1024;
         char *dbuffer = calloc(1, dmax+1);
 
         char *id = NULL;
         xmlNode *xml_container = NULL;
         xmlNode *xml_obj = NULL;
 
         int volid = 0;
 
         id = crm_strdup_printf("%s-rkt-%d", data->prefix, replica->offset);
         crm_xml_sanitize_id(id);
         xml_container = create_resource(id, "heartbeat",
                                         PE__CONTAINER_AGENT_RKT_S);
         free(id);
 
         xml_obj = create_xml_node(xml_container, XML_TAG_ATTR_SETS);
         crm_xml_set_id(xml_obj, "%s-attributes-%d",
                        data->prefix, replica->offset);
 
         crm_create_nvpair_xml(xml_obj, NULL, "image", data->image);
         crm_create_nvpair_xml(xml_obj, NULL, "allow_pull", "true");
         crm_create_nvpair_xml(xml_obj, NULL, "force_kill", "false");
         crm_create_nvpair_xml(xml_obj, NULL, "reuse", "false");
 
         /* Set a container hostname only if we have an IP to map it to.
          * The user can set -h or --uts=host themselves if they want a nicer
          * name for logs, but this makes applications happy who need their
          * hostname to match the IP they bind to.
          */
         if (data->ip_range_start != NULL) {
             offset += snprintf(buffer+offset, max-offset, " --hostname=%s-%d",
                                data->prefix, replica->offset);
         }
 
         offset += snprintf(buffer+offset, max-offset, " --environment=PCMK_stderr=1");
 
         if (data->container_network) {
 #if 0
             offset += snprintf(buffer+offset, max-offset, " --link-local-ip=%s",
                                replica->ipaddr);
 #endif
             offset += snprintf(buffer+offset, max-offset, " --net=%s",
                                data->container_network);
         }
 
         if(data->control_port) {
             offset += snprintf(buffer+offset, max-offset, " --environment=PCMK_remote_port=%s", data->control_port);
         } else {
             offset += snprintf(buffer+offset, max-offset, " --environment=PCMK_remote_port=%d", DEFAULT_REMOTE_PORT);
         }
 
         for(GListPtr pIter = data->mounts; pIter != NULL; pIter = pIter->next) {
             pe__bundle_mount_t *mount = pIter->data;
 
             if (pcmk_is_set(mount->flags, pe__bundle_mount_subdir)) {
                 char *source = crm_strdup_printf(
                     "%s/%s-%d", mount->source, data->prefix, replica->offset);
 
                 if(doffset > 0) {
                     doffset += snprintf(dbuffer+doffset, dmax-doffset, ",");
                 }
                 doffset += snprintf(dbuffer+doffset, dmax-doffset, "%s", source);
                 offset += snprintf(buffer+offset, max-offset, " --volume vol%d,kind=host,source=%s", volid, source);
                 if(mount->options) {
                     offset += snprintf(buffer+offset, max-offset, ",%s", mount->options);
                 }
                 offset += snprintf(buffer+offset, max-offset, " --mount volume=vol%d,target=%s", volid, mount->target);
                 free(source);
 
             } else {
                 offset += snprintf(buffer+offset, max-offset, " --volume vol%d,kind=host,source=%s", volid, mount->source);
                 if(mount->options) {
                     offset += snprintf(buffer+offset, max-offset, ",%s", mount->options);
                 }
                 offset += snprintf(buffer+offset, max-offset, " --mount volume=vol%d,target=%s", volid, mount->target);
             }
             volid++;
         }
 
         for(GListPtr pIter = data->ports; pIter != NULL; pIter = pIter->next) {
             pe__bundle_port_t *port = pIter->data;
 
             if (replica->ipaddr) {
                 offset += snprintf(buffer+offset, max-offset,
                                    " --port=%s:%s:%s", port->target,
                                    replica->ipaddr, port->source);
             } else {
                 offset += snprintf(buffer+offset, max-offset, " --port=%s:%s", port->target, port->source);
             }
         }
 
         if (data->launcher_options) {
             offset += snprintf(buffer+offset, max-offset, " %s",
                                data->launcher_options);
         }
 
         if (data->container_host_options) {
             offset += snprintf(buffer + offset, max - offset, " %s",
                                data->container_host_options);
         }
 
         crm_create_nvpair_xml(xml_obj, NULL, "run_opts", buffer);
         free(buffer);
 
         crm_create_nvpair_xml(xml_obj, NULL, "mount_points", dbuffer);
         free(dbuffer);
 
         if (replica->child) {
             if (data->container_command) {
                 crm_create_nvpair_xml(xml_obj, NULL, "run_cmd",
                                       data->container_command);
             } else {
                 crm_create_nvpair_xml(xml_obj, NULL, "run_cmd",
                                       SBIN_DIR "/pacemaker-remoted");
             }
 
             /* TODO: Allow users to specify their own?
              *
              * We just want to know if the container is alive, we'll
              * monitor the child independently
              */
             crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
         /* } else if(child && data->untrusted) {
          * Support this use-case?
          *
          * The ability to have resources started/stopped by us, but
          * unable to set attributes, etc.
          *
          * Arguably better to control API access this with ACLs like
          * "normal" remote nodes
          *
          *     crm_create_nvpair_xml(xml_obj, NULL,
          *                           "run_cmd",
          *                           "/usr/libexec/pacemaker/pacemaker-execd");
          *     crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd",
          *         "/usr/libexec/pacemaker/lrmd_internal_ctl -c poke");
          */
         } else {
             if (data->container_command) {
                 crm_create_nvpair_xml(xml_obj, NULL, "run_cmd",
                                       data->container_command);
             }
 
             /* TODO: Allow users to specify their own?
              *
              * We don't know what's in the container, so we just want
              * to know if it is alive
              */
             crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
         }
 
 
         xml_obj = create_xml_node(xml_container, "operations");
         crm_create_op_xml(xml_obj, ID(xml_container), "monitor", "60s", NULL);
 
         // TODO: Other ops? Timeouts and intervals from underlying resource?
 
         if (!common_unpack(xml_container, &replica->container, parent, data_set)) {
             return FALSE;
         }
         parent->children = g_list_append(parent->children, replica->container);
         return TRUE;
 }
 
 /*!
  * \brief Ban a node from a resource's (and its children's) allowed nodes list
  *
  * \param[in,out] rsc    Resource to modify
  * \param[in]     uname  Name of node to ban
  */
 static void
 disallow_node(pe_resource_t *rsc, const char *uname)
 {
     gpointer match = g_hash_table_lookup(rsc->allowed_nodes, uname);
 
     if (match) {
         ((pe_node_t *) match)->weight = -INFINITY;
         ((pe_node_t *) match)->rsc_discover_mode = pe_discover_never;
     }
     if (rsc->children) {
         GListPtr child;
 
         for (child = rsc->children; child != NULL; child = child->next) {
             disallow_node((pe_resource_t *) (child->data), uname);
         }
     }
 }
 
 static bool
 create_remote_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
                        pe__bundle_replica_t *replica,
                        pe_working_set_t *data_set)
 {
     if (replica->child && valid_network(data)) {
         GHashTableIter gIter;
         GListPtr rsc_iter = NULL;
         pe_node_t *node = NULL;
         xmlNode *xml_remote = NULL;
         char *id = crm_strdup_printf("%s-%d", data->prefix, replica->offset);
         char *port_s = NULL;
         const char *uname = NULL;
         const char *connect_name = NULL;
 
         if (pe_find_resource(data_set->resources, id) != NULL) {
             free(id);
             // The biggest hammer we have
             id = crm_strdup_printf("pcmk-internal-%s-remote-%d",
                                    replica->child->id, replica->offset);
             //@TODO return false instead of asserting?
             CRM_ASSERT(pe_find_resource(data_set->resources, id) == NULL);
         }
 
         /* REMOTE_CONTAINER_HACK: Using "#uname" as the server name when the
          * connection does not have its own IP is a magic string that we use to
          * support nested remotes (i.e. a bundle running on a remote node).
          */
         connect_name = (replica->ipaddr? replica->ipaddr : "#uname");
 
         if (data->control_port == NULL) {
             port_s = crm_itoa(DEFAULT_REMOTE_PORT);
         }
 
         /* This sets replica->container as replica->remote's container, which is
          * similar to what happens with guest nodes. This is how the scheduler
          * knows that the bundle node is fenced by recovering the container, and
          * that remote should be ordered relative to the container.
          */
         xml_remote = pe_create_remote_xml(NULL, id, replica->container->id,
                                           NULL, NULL, NULL,
                                           connect_name, (data->control_port?
                                           data->control_port : port_s));
         free(port_s);
 
         /* Abandon our created ID, and pull the copy from the XML, because we
          * need something that will get freed during data set cleanup to use as
          * the node ID and uname.
          */
         free(id);
         id = NULL;
         uname = ID(xml_remote);
 
         /* Ensure a node has been created for the guest (it may have already
          * been, if it has a permanent node attribute), and ensure its weight is
          * -INFINITY so no other resources can run on it.
          */
         node = pe_find_node(data_set->nodes, uname);
         if (node == NULL) {
             node = pe_create_node(uname, uname, "remote", "-INFINITY",
                                   data_set);
         } else {
             node->weight = -INFINITY;
         }
         node->rsc_discover_mode = pe_discover_never;
 
         /* unpack_remote_nodes() ensures that each remote node and guest node
          * has a pe_node_t entry. Ideally, it would do the same for bundle nodes.
          * Unfortunately, a bundle has to be mostly unpacked before it's obvious
          * what nodes will be needed, so we do it just above.
          *
          * Worse, that means that the node may have been utilized while
          * unpacking other resources, without our weight correction. The most
          * likely place for this to happen is when common_unpack() calls
          * resource_location() to set a default score in symmetric clusters.
          * This adds a node *copy* to each resource's allowed nodes, and these
          * copies will have the wrong weight.
          *
          * As a hacky workaround, fix those copies here.
          *
          * @TODO Possible alternative: ensure bundles are unpacked before other
          * resources, so the weight is correct before any copies are made.
          */
         for (rsc_iter = data_set->resources; rsc_iter; rsc_iter = rsc_iter->next) {
             disallow_node((pe_resource_t *) (rsc_iter->data), uname);
         }
 
         replica->node = pe__copy_node(node);
         replica->node->weight = 500;
         replica->node->rsc_discover_mode = pe_discover_exclusive;
 
         /* Ensure the node shows up as allowed and with the correct discovery set */
         if (replica->child->allowed_nodes != NULL) {
             g_hash_table_destroy(replica->child->allowed_nodes);
         }
         replica->child->allowed_nodes = g_hash_table_new_full(crm_str_hash,
                                                               g_str_equal,
                                                               NULL, free);
         g_hash_table_insert(replica->child->allowed_nodes,
                             (gpointer) replica->node->details->id,
                             pe__copy_node(replica->node));
 
         {
             pe_node_t *copy = pe__copy_node(replica->node);
             copy->weight = -INFINITY;
             g_hash_table_insert(replica->child->parent->allowed_nodes,
                                 (gpointer) replica->node->details->id, copy);
         }
         if (!common_unpack(xml_remote, &replica->remote, parent, data_set)) {
             return FALSE;
         }
 
         g_hash_table_iter_init(&gIter, replica->remote->allowed_nodes);
         while (g_hash_table_iter_next(&gIter, NULL, (void **)&node)) {
             if (pe__is_guest_or_remote_node(node)) {
                 /* Remote resources can only run on 'normal' cluster node */
                 node->weight = -INFINITY;
             }
         }
 
         replica->node->details->remote_rsc = replica->remote;
 
         // Ensure pe__is_guest_node() functions correctly immediately
         replica->remote->container = replica->container;
 
         /* A bundle's #kind is closer to "container" (guest node) than the
          * "remote" set by pe_create_node().
          */
         g_hash_table_insert(replica->node->details->attrs,
                             strdup(CRM_ATTR_KIND), strdup("container"));
 
         /* One effect of this is that setup_container() will add
          * replica->remote to replica->container's fillers, which will make
          * pe__resource_contains_guest_node() true for replica->container.
          *
          * replica->child does NOT get added to replica->container's fillers.
          * The only noticeable effect if it did would be for its fail count to
          * be taken into account when checking replica->container's migration
          * threshold.
          */
         parent->children = g_list_append(parent->children, replica->remote);
     }
     return TRUE;
 }
 
 static bool
 create_container(pe_resource_t *parent, pe__bundle_variant_data_t *data,
                  pe__bundle_replica_t *replica, pe_working_set_t *data_set)
 {
 
     switch (data->agent_type) {
         case PE__CONTAINER_AGENT_DOCKER:
             if (!create_docker_resource(parent, data, replica, data_set)) {
                 return FALSE;
             }
             break;
 
         case PE__CONTAINER_AGENT_PODMAN:
             if (!create_podman_resource(parent, data, replica, data_set)) {
                 return FALSE;
             }
             break;
 
         case PE__CONTAINER_AGENT_RKT:
             if (!create_rkt_resource(parent, data, replica, data_set)) {
                 return FALSE;
             }
             break;
         default: // PE__CONTAINER_AGENT_UNKNOWN
             return FALSE;
     }
 
     if (create_ip_resource(parent, data, replica, data_set) == FALSE) {
         return FALSE;
     }
     if(create_remote_resource(parent, data, replica, data_set) == FALSE) {
         return FALSE;
     }
     if (replica->child && replica->ipaddr) {
         add_hash_param(replica->child->meta, "external-ip", replica->ipaddr);
     }
 
     if (replica->remote) {
         /*
          * Allow the remote connection resource to be allocated to a
          * different node than the one on which the container is active.
          *
          * This makes it possible to have Pacemaker Remote nodes running
          * containers with pacemaker-remoted inside in order to start
          * services inside those containers.
          */
         pe__set_resource_flags(replica->remote, pe_rsc_allow_remote_remotes);
     }
 
     return TRUE;
 }
 
 static void
 mount_add(pe__bundle_variant_data_t *bundle_data, const char *source,
           const char *target, const char *options, uint32_t flags)
 {
     pe__bundle_mount_t *mount = calloc(1, sizeof(pe__bundle_mount_t));
 
     mount->source = strdup(source);
     mount->target = strdup(target);
     if (options) {
         mount->options = strdup(options);
     }
     mount->flags = flags;
     bundle_data->mounts = g_list_append(bundle_data->mounts, mount);
 }
 
 static void
 mount_free(pe__bundle_mount_t *mount)
 {
     free(mount->source);
     free(mount->target);
     free(mount->options);
     free(mount);
 }
 
 static void
 port_free(pe__bundle_port_t *port)
 {
     free(port->source);
     free(port->target);
     free(port);
 }
 
 static pe__bundle_replica_t *
 replica_for_remote(pe_resource_t *remote)
 {
     pe_resource_t *top = remote;
     pe__bundle_variant_data_t *bundle_data = NULL;
 
     if (top == NULL) {
         return NULL;
     }
 
     while (top->parent != NULL) {
         top = top->parent;
     }
 
     get_bundle_variant_data(bundle_data, top);
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         if (replica->remote == remote) {
             return replica;
         }
     }
     CRM_LOG_ASSERT(FALSE);
     return NULL;
 }
 
 bool
 pe__bundle_needs_remote_name(pe_resource_t *rsc)
 {
     const char *value;
 
     if (rsc == NULL) {
         return false;
     }
 
     value = g_hash_table_lookup(rsc->parameters, XML_RSC_ATTR_REMOTE_RA_ADDR);
 
     return pcmk__str_eq(value, "#uname", pcmk__str_casei)
            && xml_contains_remote_node(rsc->xml);
 }
 
 const char *
 pe__add_bundle_remote_name(pe_resource_t *rsc, xmlNode *xml, const char *field)
 {
     // REMOTE_CONTAINER_HACK: Allow remote nodes that start containers with pacemaker remote inside
 
     pe_node_t *node = NULL;
     pe__bundle_replica_t *replica = NULL;
 
     if (!pe__bundle_needs_remote_name(rsc)) {
         return NULL;
     }
 
     replica = replica_for_remote(rsc);
     if (replica == NULL) {
         return NULL;
     }
 
     node = replica->container->allocated_to;
     if (node == NULL) {
         /* If it won't be running anywhere after the
          * transition, go with where it's running now.
          */
         node = pe__current_node(replica->container);
     }
 
     if(node == NULL) {
         crm_trace("Cannot determine address for bundle connection %s", rsc->id);
         return NULL;
     }
 
     crm_trace("Setting address for bundle connection %s to bundle host %s",
               rsc->id, node->details->uname);
     if(xml != NULL && field != NULL) {
         crm_xml_add(xml, field, node->details->uname);
     }
 
     return node->details->uname;
 }
 
 #define pe__set_bundle_mount_flags(mount_xml, flags, flags_to_set) do {     \
         flags = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE,           \
                                    "Bundle mount", ID(mount_xml), flags,    \
                                    (flags_to_set), #flags_to_set);          \
     } while (0)
 
 gboolean
 pe__unpack_bundle(pe_resource_t *rsc, pe_working_set_t *data_set)
 {
     const char *value = NULL;
     xmlNode *xml_obj = NULL;
     xmlNode *xml_resource = NULL;
     pe__bundle_variant_data_t *bundle_data = NULL;
     bool need_log_mount = TRUE;
 
     CRM_ASSERT(rsc != NULL);
     pe_rsc_trace(rsc, "Processing resource %s...", rsc->id);
 
     bundle_data = calloc(1, sizeof(pe__bundle_variant_data_t));
     rsc->variant_opaque = bundle_data;
     bundle_data->prefix = strdup(rsc->id);
 
     xml_obj = first_named_child(rsc->xml, PE__CONTAINER_AGENT_DOCKER_S);
     if (xml_obj != NULL) {
         bundle_data->agent_type = PE__CONTAINER_AGENT_DOCKER;
     } else {
         xml_obj = first_named_child(rsc->xml, PE__CONTAINER_AGENT_RKT_S);
         if (xml_obj != NULL) {
             bundle_data->agent_type = PE__CONTAINER_AGENT_RKT;
         } else {
             xml_obj = first_named_child(rsc->xml, PE__CONTAINER_AGENT_PODMAN_S);
             if (xml_obj != NULL) {
                 bundle_data->agent_type = PE__CONTAINER_AGENT_PODMAN;
             } else {
                 return FALSE;
             }
         }
     }
 
     value = crm_element_value(xml_obj, XML_RSC_ATTR_PROMOTED_MAX);
     if (value == NULL) {
         // @COMPAT deprecated since 2.0.0
         value = crm_element_value(xml_obj, "masters");
     }
     bundle_data->promoted_max = crm_parse_int(value, "0");
     if (bundle_data->promoted_max < 0) {
         pe_err("%s for %s must be nonnegative integer, using 0",
                XML_RSC_ATTR_PROMOTED_MAX, rsc->id);
         bundle_data->promoted_max = 0;
     }
 
     value = crm_element_value(xml_obj, "replicas");
     if ((value == NULL) && bundle_data->promoted_max) {
         bundle_data->nreplicas = bundle_data->promoted_max;
     } else {
         bundle_data->nreplicas = crm_parse_int(value, "1");
     }
     if (bundle_data->nreplicas < 1) {
         pe_err("'replicas' for %s must be positive integer, using 1", rsc->id);
         bundle_data->nreplicas = 1;
     }
 
     /*
      * Communication between containers on the same host via the
      * floating IPs only works if the container is started with:
      *   --userland-proxy=false --ip-masq=false
      */
     value = crm_element_value(xml_obj, "replicas-per-host");
     bundle_data->nreplicas_per_host = crm_parse_int(value, "1");
     if (bundle_data->nreplicas_per_host < 1) {
         pe_err("'replicas-per-host' for %s must be positive integer, using 1",
                rsc->id);
         bundle_data->nreplicas_per_host = 1;
     }
     if (bundle_data->nreplicas_per_host == 1) {
         pe__clear_resource_flags(rsc, pe_rsc_unique);
     }
 
     bundle_data->container_command = crm_element_value_copy(xml_obj, "run-command");
     bundle_data->launcher_options = crm_element_value_copy(xml_obj, "options");
     bundle_data->image = crm_element_value_copy(xml_obj, "image");
     bundle_data->container_network = crm_element_value_copy(xml_obj, "network");
 
     xml_obj = first_named_child(rsc->xml, "network");
     if(xml_obj) {
 
         bundle_data->ip_range_start = crm_element_value_copy(xml_obj, "ip-range-start");
         bundle_data->host_netmask = crm_element_value_copy(xml_obj, "host-netmask");
         bundle_data->host_network = crm_element_value_copy(xml_obj, "host-interface");
         bundle_data->control_port = crm_element_value_copy(xml_obj, "control-port");
         value = crm_element_value(xml_obj, "add-host");
         if (crm_str_to_boolean(value, &bundle_data->add_host) != 1) {
             bundle_data->add_host = TRUE;
         }
 
         for (xmlNode *xml_child = pcmk__xe_first_child(xml_obj); xml_child != NULL;
              xml_child = pcmk__xe_next(xml_child)) {
 
             pe__bundle_port_t *port = calloc(1, sizeof(pe__bundle_port_t));
             port->source = crm_element_value_copy(xml_child, "port");
 
             if(port->source == NULL) {
                 port->source = crm_element_value_copy(xml_child, "range");
             } else {
                 port->target = crm_element_value_copy(xml_child, "internal-port");
             }
 
             if(port->source != NULL && strlen(port->source) > 0) {
                 if(port->target == NULL) {
                     port->target = strdup(port->source);
                 }
                 bundle_data->ports = g_list_append(bundle_data->ports, port);
 
             } else {
                 pe_err("Invalid port directive %s", ID(xml_child));
                 port_free(port);
             }
         }
     }
 
     xml_obj = first_named_child(rsc->xml, "storage");
     for (xmlNode *xml_child = pcmk__xe_first_child(xml_obj); xml_child != NULL;
          xml_child = pcmk__xe_next(xml_child)) {
 
         const char *source = crm_element_value(xml_child, "source-dir");
         const char *target = crm_element_value(xml_child, "target-dir");
         const char *options = crm_element_value(xml_child, "options");
         int flags = pe__bundle_mount_none;
 
         if (source == NULL) {
             source = crm_element_value(xml_child, "source-dir-root");
             pe__set_bundle_mount_flags(xml_child, flags,
                                        pe__bundle_mount_subdir);
         }
 
         if (source && target) {
             mount_add(bundle_data, source, target, options, flags);
             if (strcmp(target, "/var/log") == 0) {
                 need_log_mount = FALSE;
             }
         } else {
             pe_err("Invalid mount directive %s", ID(xml_child));
         }
     }
 
     xml_obj = first_named_child(rsc->xml, "primitive");
     if (xml_obj && valid_network(bundle_data)) {
         char *value = NULL;
         xmlNode *xml_set = NULL;
 
         xml_resource = create_xml_node(NULL, XML_CIB_TAG_INCARNATION);
 
         /* @COMPAT We no longer use the <master> tag, but we need to keep it as
          * part of the resource name, so that bundles don't restart in a rolling
          * upgrade. (It also avoids needing to change regression tests.)
          */
         crm_xml_set_id(xml_resource, "%s-%s", bundle_data->prefix,
                       (bundle_data->promoted_max? "master"
                       : (const char *)xml_resource->name));
 
         xml_set = create_xml_node(xml_resource, XML_TAG_META_SETS);
         crm_xml_set_id(xml_set, "%s-%s-meta", bundle_data->prefix, xml_resource->name);
 
         crm_create_nvpair_xml(xml_set, NULL,
                               XML_RSC_ATTR_ORDERED, XML_BOOLEAN_TRUE);
 
         value = crm_itoa(bundle_data->nreplicas);
         crm_create_nvpair_xml(xml_set, NULL,
                               XML_RSC_ATTR_INCARNATION_MAX, value);
         free(value);
 
         value = crm_itoa(bundle_data->nreplicas_per_host);
         crm_create_nvpair_xml(xml_set, NULL,
                               XML_RSC_ATTR_INCARNATION_NODEMAX, value);
         free(value);
 
         crm_create_nvpair_xml(xml_set, NULL, XML_RSC_ATTR_UNIQUE,
                               pcmk__btoa(bundle_data->nreplicas_per_host > 1));
 
         if (bundle_data->promoted_max) {
             crm_create_nvpair_xml(xml_set, NULL,
                                   XML_RSC_ATTR_PROMOTABLE, XML_BOOLEAN_TRUE);
 
             value = crm_itoa(bundle_data->promoted_max);
             crm_create_nvpair_xml(xml_set, NULL,
                                   XML_RSC_ATTR_PROMOTED_MAX, value);
             free(value);
         }
 
         //crm_xml_add(xml_obj, XML_ATTR_ID, bundle_data->prefix);
         add_node_copy(xml_resource, xml_obj);
 
     } else if(xml_obj) {
         pe_err("Cannot control %s inside %s without either ip-range-start or control-port",
                rsc->id, ID(xml_obj));
         return FALSE;
     }
 
     if(xml_resource) {
         int lpc = 0;
         GListPtr childIter = NULL;
         pe_resource_t *new_rsc = NULL;
         pe__bundle_port_t *port = NULL;
 
         int offset = 0, max = 1024;
         char *buffer = NULL;
 
         if (common_unpack(xml_resource, &new_rsc, rsc, data_set) == FALSE) {
             pe_err("Failed unpacking resource %s", ID(rsc->xml));
             if (new_rsc != NULL && new_rsc->fns != NULL) {
                 new_rsc->fns->free(new_rsc);
             }
             return FALSE;
         }
 
         bundle_data->child = new_rsc;
 
         /* Currently, we always map the default authentication key location
          * into the same location inside the container.
          *
          * Ideally, we would respect the host's PCMK_authkey_location, but:
          * - it may be different on different nodes;
          * - the actual connection will do extra checking to make sure the key
          *   file exists and is readable, that we can't do here on the DC
          * - tools such as crm_resource and crm_simulate may not have the same
          *   environment variables as the cluster, causing operation digests to
          *   differ
          *
          * Always using the default location inside the container is fine,
          * because we control the pacemaker_remote environment, and it avoids
          * having to pass another environment variable to the container.
          *
          * @TODO A better solution may be to have only pacemaker_remote use the
          * environment variable, and have the cluster nodes use a new
          * cluster option for key location. This would introduce the limitation
          * of the location being the same on all cluster nodes, but that's
          * reasonable.
          */
         mount_add(bundle_data, DEFAULT_REMOTE_KEY_LOCATION,
                   DEFAULT_REMOTE_KEY_LOCATION, NULL, pe__bundle_mount_none);
 
         if (need_log_mount) {
             mount_add(bundle_data, CRM_BUNDLE_DIR, "/var/log", NULL,
                       pe__bundle_mount_subdir);
         }
 
         port = calloc(1, sizeof(pe__bundle_port_t));
         if(bundle_data->control_port) {
             port->source = strdup(bundle_data->control_port);
         } else {
             /* If we wanted to respect PCMK_remote_port, we could use
              * crm_default_remote_port() here and elsewhere in this file instead
              * of DEFAULT_REMOTE_PORT.
              *
              * However, it gains nothing, since we control both the container
              * environment and the connection resource parameters, and the user
              * can use a different port if desired by setting control-port.
              */
             port->source = crm_itoa(DEFAULT_REMOTE_PORT);
         }
         port->target = strdup(port->source);
         bundle_data->ports = g_list_append(bundle_data->ports, port);
 
         buffer = calloc(1, max+1);
         for (childIter = bundle_data->child->children; childIter != NULL;
              childIter = childIter->next) {
 
             pe__bundle_replica_t *replica = calloc(1, sizeof(pe__bundle_replica_t));
 
             replica->child = childIter->data;
             replica->child->exclusive_discover = TRUE;
             replica->offset = lpc++;
 
             // Ensure the child's notify gets set based on the underlying primitive's value
             if (pcmk_is_set(replica->child->flags, pe_rsc_notify)) {
                 pe__set_resource_flags(bundle_data->child, pe_rsc_notify);
             }
 
             offset += allocate_ip(bundle_data, replica, buffer+offset,
                                   max-offset);
             bundle_data->replicas = g_list_append(bundle_data->replicas,
                                                   replica);
             bundle_data->attribute_target = g_hash_table_lookup(replica->child->meta,
                                                                 XML_RSC_ATTR_TARGET);
         }
         bundle_data->container_host_options = buffer;
         if (bundle_data->attribute_target) {
             g_hash_table_replace(rsc->meta, strdup(XML_RSC_ATTR_TARGET),
                                  strdup(bundle_data->attribute_target));
             g_hash_table_replace(bundle_data->child->meta,
                                  strdup(XML_RSC_ATTR_TARGET),
                                  strdup(bundle_data->attribute_target));
         }
 
     } else {
         // Just a naked container, no pacemaker-remote
         int offset = 0, max = 1024;
         char *buffer = calloc(1, max+1);
 
         for (int lpc = 0; lpc < bundle_data->nreplicas; lpc++) {
             pe__bundle_replica_t *replica = calloc(1, sizeof(pe__bundle_replica_t));
 
             replica->offset = lpc;
             offset += allocate_ip(bundle_data, replica, buffer+offset,
                                   max-offset);
             bundle_data->replicas = g_list_append(bundle_data->replicas,
                                                   replica);
         }
         bundle_data->container_host_options = buffer;
     }
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         if (!create_container(rsc, bundle_data, replica, data_set)) {
             pe_err("Failed unpacking resource %s", rsc->id);
             rsc->fns->free(rsc);
             return FALSE;
         }
     }
 
     if (bundle_data->child) {
         rsc->children = g_list_append(rsc->children, bundle_data->child);
     }
     return TRUE;
 }
 
 static int
 replica_resource_active(pe_resource_t *rsc, gboolean all)
 {
     if (rsc) {
         gboolean child_active = rsc->fns->active(rsc, all);
 
         if (child_active && !all) {
             return TRUE;
         } else if (!child_active && all) {
             return FALSE;
         }
     }
     return -1;
 }
 
 gboolean
 pe__bundle_active(pe_resource_t *rsc, gboolean all)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
     GListPtr iter = NULL;
 
     get_bundle_variant_data(bundle_data, rsc);
     for (iter = bundle_data->replicas; iter != NULL; iter = iter->next) {
         pe__bundle_replica_t *replica = iter->data;
         int rsc_active;
 
         rsc_active = replica_resource_active(replica->ip, all);
         if (rsc_active >= 0) {
             return (gboolean) rsc_active;
         }
 
         rsc_active = replica_resource_active(replica->child, all);
         if (rsc_active >= 0) {
             return (gboolean) rsc_active;
         }
 
         rsc_active = replica_resource_active(replica->container, all);
         if (rsc_active >= 0) {
             return (gboolean) rsc_active;
         }
 
         rsc_active = replica_resource_active(replica->remote, all);
         if (rsc_active >= 0) {
             return (gboolean) rsc_active;
         }
     }
 
     /* If "all" is TRUE, we've already checked that no resources were inactive,
      * so return TRUE; if "all" is FALSE, we didn't find any active resources,
      * so return FALSE.
      */
     return all;
 }
 
 /*!
  * \internal
  * \brief Find the bundle replica corresponding to a given node
  *
  * \param[in] bundle  Top-level bundle resource
  * \param[in] node    Node to search for
  *
  * \return Bundle replica if found, NULL otherwise
  */
 pe_resource_t *
 pe__find_bundle_replica(const pe_resource_t *bundle, const pe_node_t *node)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
     CRM_ASSERT(bundle && node);
 
     get_bundle_variant_data(bundle_data, bundle);
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         CRM_ASSERT(replica && replica->node);
         if (replica->node->details == node->details) {
             return replica->child;
         }
     }
     return NULL;
 }
 
 static void
 print_rsc_in_list(pe_resource_t *rsc, const char *pre_text, long options,
                   void *print_data)
 {
     if (rsc != NULL) {
         if (options & pe_print_html) {
             status_print("<li>");
         }
         rsc->fns->print(rsc, pre_text, options, print_data);
         if (options & pe_print_html) {
             status_print("</li>\n");
         }
     }
 }
 
 static const char*
 container_agent_str(enum pe__container_agent t)
 {
     switch (t) {
         case PE__CONTAINER_AGENT_DOCKER: return PE__CONTAINER_AGENT_DOCKER_S;
         case PE__CONTAINER_AGENT_RKT:    return PE__CONTAINER_AGENT_RKT_S;
         case PE__CONTAINER_AGENT_PODMAN: return PE__CONTAINER_AGENT_PODMAN_S;
         default: // PE__CONTAINER_AGENT_UNKNOWN
             break;
     }
     return PE__CONTAINER_AGENT_UNKNOWN_S;
 }
 
 static void
 bundle_print_xml(pe_resource_t *rsc, const char *pre_text, long options,
                  void *print_data)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
     char *child_text = NULL;
     CRM_CHECK(rsc != NULL, return);
 
     if (pre_text == NULL) {
         pre_text = "";
     }
     child_text = crm_strdup_printf("%s        ", pre_text);
 
     get_bundle_variant_data(bundle_data, rsc);
 
     status_print("%s<bundle ", pre_text);
     status_print("id=\"%s\" ", rsc->id);
     status_print("type=\"%s\" ", container_agent_str(bundle_data->agent_type));
     status_print("image=\"%s\" ", bundle_data->image);
     status_print("unique=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_unique));
     status_print("managed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_managed));
     status_print("failed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_failed));
     status_print(">\n");
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         CRM_ASSERT(replica);
         status_print("%s    <replica id=\"%d\">\n", pre_text, replica->offset);
         print_rsc_in_list(replica->ip, child_text, options, print_data);
         print_rsc_in_list(replica->child, child_text, options, print_data);
         print_rsc_in_list(replica->container, child_text, options, print_data);
         print_rsc_in_list(replica->remote, child_text, options, print_data);
         status_print("%s    </replica>\n", pre_text);
     }
     status_print("%s</bundle>\n", pre_text);
     free(child_text);
 }
 
-PCMK__OUTPUT_ARGS("bundle", "unsigned int", "pe_resource_t *", "GListPtr", "GListPtr")
+PCMK__OUTPUT_ARGS("bundle", "unsigned int", "pe_resource_t *", "GList *", "GList *")
 int
 pe__bundle_xml(pcmk__output_t *out, va_list args)
 {
     unsigned int options = va_arg(args, unsigned int);
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
-    GListPtr only_node = va_arg(args, GListPtr);
-    GListPtr only_rsc = va_arg(args, GListPtr);
+    GList *only_node = va_arg(args, GList *);
+    GList *only_rsc = va_arg(args, GList *);
 
     pe__bundle_variant_data_t *bundle_data = NULL;
     int rc = pcmk_rc_no_output;
     gboolean printed_header = FALSE;
     gboolean print_everything = TRUE;
 
     CRM_ASSERT(rsc != NULL);
 
     get_bundle_variant_data(bundle_data, rsc);
 
     if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
         return rc;
     }
 
     print_everything = pcmk__str_in_list(only_rsc, rsc->id);
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
         char *id = crm_itoa(replica->offset);
         gboolean print_ip, print_child, print_ctnr, print_remote;
 
         CRM_ASSERT(replica);
 
         if (pcmk__rsc_filtered_by_node(replica->container, only_node)) {
             continue;
         }
 
         print_ip = replica->ip != NULL &&
                    !replica->ip->fns->is_filtered(replica->ip, only_rsc, print_everything);
         print_child = replica->child != NULL &&
                       !replica->child->fns->is_filtered(replica->child, only_rsc, print_everything);
         print_ctnr = !replica->container->fns->is_filtered(replica->container, only_rsc, print_everything);
         print_remote = replica->remote != NULL &&
                        !replica->remote->fns->is_filtered(replica->remote, only_rsc, print_everything);
 
         if (!print_everything && !print_ip && !print_child && !print_ctnr && !print_remote) {
             continue;
         }
 
         if (!printed_header) {
             printed_header = TRUE;
 
             rc = pe__name_and_nvpairs_xml(out, true, "bundle", 6,
                      "id", rsc->id,
                      "type", container_agent_str(bundle_data->agent_type),
                      "image", bundle_data->image,
                      "unique", pe__rsc_bool_str(rsc, pe_rsc_unique),
                      "managed", pe__rsc_bool_str(rsc, pe_rsc_managed),
                      "failed", pe__rsc_bool_str(rsc, pe_rsc_failed));
             CRM_ASSERT(rc == pcmk_rc_ok);
         }
 
         rc = pe__name_and_nvpairs_xml(out, true, "replica", 1, "id", id);
         free(id);
         CRM_ASSERT(rc == pcmk_rc_ok);
 
         if (print_ip) {
             out->message(out, crm_map_element_name(replica->ip->xml), options,
                          replica->ip, only_node, only_rsc);
         }
 
         if (print_child) {
             out->message(out, crm_map_element_name(replica->child->xml), options,
                          replica->child, only_node, only_rsc);
         }
 
         if (print_ctnr) {
             out->message(out, crm_map_element_name(replica->container->xml), options,
                          replica->container, only_node, only_rsc);
         }
 
         if (print_remote) {
             out->message(out, crm_map_element_name(replica->remote->xml), options,
                          replica->remote, only_node, only_rsc);
         }
 
         pcmk__output_xml_pop_parent(out); // replica
     }
 
     if (printed_header) {
         pcmk__output_xml_pop_parent(out); // bundle
     }
 
     return rc;
 }
 
 static void
 pe__bundle_replica_output_html(pcmk__output_t *out, pe__bundle_replica_t *replica,
                                pe_node_t *node, long options)
 {
     pe_resource_t *rsc = replica->child;
 
     int offset = 0;
     char buffer[LINE_MAX];
 
     if(rsc == NULL) {
         rsc = replica->container;
     }
 
     if (replica->remote) {
         offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
                            rsc_printable_id(replica->remote));
     } else {
         offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
                            rsc_printable_id(replica->container));
     }
     if (replica->ipaddr) {
         offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)",
                            replica->ipaddr);
     }
 
     pe__common_output_html(out, rsc, buffer, node, options);
 }
 
-PCMK__OUTPUT_ARGS("bundle", "unsigned int", "pe_resource_t *", "GListPtr", "GListPtr")
+PCMK__OUTPUT_ARGS("bundle", "unsigned int", "pe_resource_t *", "GList *", "GList *")
 int
 pe__bundle_html(pcmk__output_t *out, va_list args)
 {
     unsigned int options = va_arg(args, unsigned int);
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
-    GListPtr only_node = va_arg(args, GListPtr);
-    GListPtr only_rsc = va_arg(args, GListPtr);
+    GList *only_node = va_arg(args, GList *);
+    GList *only_rsc = va_arg(args, GList *);
 
     pe__bundle_variant_data_t *bundle_data = NULL;
     char buffer[LINE_MAX];
     int rc = pcmk_rc_no_output;
     gboolean print_everything = TRUE;
 
     CRM_ASSERT(rsc != NULL);
 
     get_bundle_variant_data(bundle_data, rsc);
 
     if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
         return rc;
     }
 
     print_everything = pcmk__str_in_list(only_rsc, rsc->id);
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
         gboolean print_ip, print_child, print_ctnr, print_remote;
 
         CRM_ASSERT(replica);
 
         if (pcmk__rsc_filtered_by_node(replica->container, only_node)) {
             continue;
         }
 
         print_ip = replica->ip != NULL &&
                    !replica->ip->fns->is_filtered(replica->ip, only_rsc, print_everything);
         print_child = replica->child != NULL &&
                       !replica->child->fns->is_filtered(replica->child, only_rsc, print_everything);
         print_ctnr = !replica->container->fns->is_filtered(replica->container, only_rsc, print_everything);
         print_remote = replica->remote != NULL &&
                        !replica->remote->fns->is_filtered(replica->remote, only_rsc, print_everything);
 
         if (pcmk_is_set(options, pe_print_implicit) ||
             (print_everything == FALSE && (print_ip || print_child || print_ctnr || print_remote))) {
             /* The text output messages used below require pe_print_implicit to
              * be set to do anything.
              */
             unsigned int new_options = options;
 
             if (!pcmk_is_set(options, pe_print_implicit)) {
                 new_options |= pe_print_implicit;
             }
 
             if (rc == pcmk_rc_no_output) {
                 pcmk__output_create_xml_node(out, "br", NULL);
             }
 
             PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s",
                                      (bundle_data->nreplicas > 1)? " set" : "",
                                      rsc->id, bundle_data->image,
                                      pcmk_is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
                                      pcmk_is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)");
 
             pcmk__output_xml_create_parent(out, "li", NULL);
 
             if (pcmk__list_of_multiple(bundle_data->replicas)) {
                 snprintf(buffer, LINE_MAX, " Replica[%d]", replica->offset);
                 xmlNodeSetContent(pcmk__output_xml_peek_parent(out), (pcmkXmlStr) buffer);
             }
             pcmk__output_create_xml_node(out, "br", NULL);
             out->begin_list(out, NULL, NULL, NULL);
 
             if (print_ip) {
                 out->message(out, crm_map_element_name(replica->ip->xml),
                              new_options, replica->ip, only_node, only_rsc);
             }
 
             if (print_child) {
                 out->message(out, crm_map_element_name(replica->child->xml),
                              new_options, replica->child, only_node, only_rsc);
             }
 
             if (print_ctnr) {
                 out->message(out, crm_map_element_name(replica->container->xml),
                              new_options, replica->container, only_node, only_rsc);
             }
 
             if (print_remote) {
                 out->message(out, crm_map_element_name(replica->remote->xml),
                              new_options, replica->remote, only_node, only_rsc);
             }
 
             out->end_list(out);
         } else if (print_everything == FALSE && !(print_ip || print_child || print_ctnr || print_remote)) {
             continue;
         } else {
             PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s",
                                      (bundle_data->nreplicas > 1)? " set" : "",
                                      rsc->id, bundle_data->image,
                                      pcmk_is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
                                      pcmk_is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)");
 
             pe__bundle_replica_output_html(out, replica, pe__current_node(replica->container),
                                            options);
         }
 
         pcmk__output_xml_pop_parent(out);
     }
 
     PCMK__OUTPUT_LIST_FOOTER(out, rc);
     return rc;
 }
 
 static void
 pe__bundle_replica_output_text(pcmk__output_t *out, pe__bundle_replica_t *replica,
                                pe_node_t *node, long options)
 {
     pe_resource_t *rsc = replica->child;
 
     int offset = 0;
     char buffer[LINE_MAX];
 
     if(rsc == NULL) {
         rsc = replica->container;
     }
 
     if (replica->remote) {
         offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
                            rsc_printable_id(replica->remote));
     } else {
         offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
                            rsc_printable_id(replica->container));
     }
     if (replica->ipaddr) {
         offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)",
                            replica->ipaddr);
     }
 
     pe__common_output_text(out, rsc, buffer, node, options);
 }
 
-PCMK__OUTPUT_ARGS("bundle", "unsigned int", "pe_resource_t *", "GListPtr", "GListPtr")
+PCMK__OUTPUT_ARGS("bundle", "unsigned int", "pe_resource_t *", "GList *", "GList *")
 int
 pe__bundle_text(pcmk__output_t *out, va_list args)
 {
     unsigned int options = va_arg(args, unsigned int);
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
-    GListPtr only_node = va_arg(args, GListPtr);
-    GListPtr only_rsc = va_arg(args, GListPtr);
+    GList *only_node = va_arg(args, GList *);
+    GList *only_rsc = va_arg(args, GList *);
 
     pe__bundle_variant_data_t *bundle_data = NULL;
     int rc = pcmk_rc_no_output;
     gboolean print_everything = TRUE;
 
     get_bundle_variant_data(bundle_data, rsc);
 
     CRM_ASSERT(rsc != NULL);
 
     if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
         return rc;
     }
 
     print_everything = pcmk__str_in_list(only_rsc, rsc->id);
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
         gboolean print_ip, print_child, print_ctnr, print_remote;
 
         CRM_ASSERT(replica);
 
         if (pcmk__rsc_filtered_by_node(replica->container, only_node)) {
             continue;
         }
 
         print_ip = replica->ip != NULL &&
                    !replica->ip->fns->is_filtered(replica->ip, only_rsc, print_everything);
         print_child = replica->child != NULL &&
                       !replica->child->fns->is_filtered(replica->child, only_rsc, print_everything);
         print_ctnr = !replica->container->fns->is_filtered(replica->container, only_rsc, print_everything);
         print_remote = replica->remote != NULL &&
                        !replica->remote->fns->is_filtered(replica->remote, only_rsc, print_everything);
 
         if (pcmk_is_set(options, pe_print_implicit) ||
             (print_everything == FALSE && (print_ip || print_child || print_ctnr || print_remote))) {
             /* The text output messages used below require pe_print_implicit to
              * be set to do anything.
              */
             unsigned int new_options = options;
 
             if (!pcmk_is_set(options, pe_print_implicit)) {
                 new_options |= pe_print_implicit;
             }
             PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s",
                                      (bundle_data->nreplicas > 1)? " set" : "",
                                      rsc->id, bundle_data->image,
                                      pcmk_is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
                                      pcmk_is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)");
 
             if (pcmk__list_of_multiple(bundle_data->replicas)) {
                 out->list_item(out, NULL, "Replica[%d]", replica->offset);
             }
 
             out->begin_list(out, NULL, NULL, NULL);
 
             if (print_ip) {
                 out->message(out, crm_map_element_name(replica->ip->xml),
                              new_options, replica->ip, only_node, only_rsc);
             }
 
             if (print_child) {
                 out->message(out, crm_map_element_name(replica->child->xml),
                              new_options, replica->child, only_node, only_rsc);
             }
 
             if (print_ctnr) {
                 out->message(out, crm_map_element_name(replica->container->xml),
                              new_options, replica->container, only_node, only_rsc);
             }
 
             if (print_remote) {
                 out->message(out, crm_map_element_name(replica->remote->xml),
                              new_options, replica->remote, only_node, only_rsc);
             }
 
             out->end_list(out);
         } else if (print_everything == FALSE && !(print_ip || print_child || print_ctnr || print_remote)) {
             continue;
         } else {
             PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s",
                                      (bundle_data->nreplicas > 1)? " set" : "",
                                      rsc->id, bundle_data->image,
                                      pcmk_is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
                                      pcmk_is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)");
 
             pe__bundle_replica_output_text(out, replica, pe__current_node(replica->container),
                                            options);
         }
     }
 
     PCMK__OUTPUT_LIST_FOOTER(out, rc);
     return rc;
 }
 
 static void
 print_bundle_replica(pe__bundle_replica_t *replica, const char *pre_text,
                      long options, void *print_data)
 {
     pe_node_t *node = NULL;
     pe_resource_t *rsc = replica->child;
 
     int offset = 0;
     char buffer[LINE_MAX];
 
     if(rsc == NULL) {
         rsc = replica->container;
     }
 
     if (replica->remote) {
         offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
                            rsc_printable_id(replica->remote));
     } else {
         offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
                            rsc_printable_id(replica->container));
     }
     if (replica->ipaddr) {
         offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)",
                            replica->ipaddr);
     }
 
     node = pe__current_node(replica->container);
     common_print(rsc, pre_text, buffer, node, options, print_data);
 }
 
 void
 pe__print_bundle(pe_resource_t *rsc, const char *pre_text, long options,
                  void *print_data)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
     char *child_text = NULL;
     CRM_CHECK(rsc != NULL, return);
 
     if (options & pe_print_xml) {
         bundle_print_xml(rsc, pre_text, options, print_data);
         return;
     }
 
     get_bundle_variant_data(bundle_data, rsc);
 
     if (pre_text == NULL) {
         pre_text = " ";
     }
 
     status_print("%sContainer bundle%s: %s [%s]%s%s\n",
                  pre_text, ((bundle_data->nreplicas > 1)? " set" : ""),
                  rsc->id, bundle_data->image,
                  pcmk_is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
                  pcmk_is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)");
     if (options & pe_print_html) {
         status_print("<br />\n<ul>\n");
     }
 
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         CRM_ASSERT(replica);
         if (options & pe_print_html) {
             status_print("<li>");
         }
 
         if (pcmk_is_set(options, pe_print_implicit)) {
             child_text = crm_strdup_printf("     %s", pre_text);
             if (pcmk__list_of_multiple(bundle_data->replicas)) {
                 status_print("  %sReplica[%d]\n", pre_text, replica->offset);
             }
             if (options & pe_print_html) {
                 status_print("<br />\n<ul>\n");
             }
             print_rsc_in_list(replica->ip, child_text, options, print_data);
             print_rsc_in_list(replica->container, child_text, options, print_data);
             print_rsc_in_list(replica->remote, child_text, options, print_data);
             print_rsc_in_list(replica->child, child_text, options, print_data);
             if (options & pe_print_html) {
                 status_print("</ul>\n");
             }
         } else {
             child_text = crm_strdup_printf("%s  ", pre_text);
             print_bundle_replica(replica, child_text, options, print_data);
         }
         free(child_text);
 
         if (options & pe_print_html) {
             status_print("</li>\n");
         }
     }
     if (options & pe_print_html) {
         status_print("</ul>\n");
     }
 }
 
 static void
 free_bundle_replica(pe__bundle_replica_t *replica)
 {
     if (replica == NULL) {
         return;
     }
 
     if (replica->node) {
         free(replica->node);
         replica->node = NULL;
     }
 
     if (replica->ip) {
         free_xml(replica->ip->xml);
         replica->ip->xml = NULL;
         replica->ip->fns->free(replica->ip);
         replica->ip = NULL;
     }
     if (replica->container) {
         free_xml(replica->container->xml);
         replica->container->xml = NULL;
         replica->container->fns->free(replica->container);
         replica->container = NULL;
     }
     if (replica->remote) {
         free_xml(replica->remote->xml);
         replica->remote->xml = NULL;
         replica->remote->fns->free(replica->remote);
         replica->remote = NULL;
     }
     free(replica->ipaddr);
     free(replica);
 }
 
 void
 pe__free_bundle(pe_resource_t *rsc)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
     CRM_CHECK(rsc != NULL, return);
 
     get_bundle_variant_data(bundle_data, rsc);
     pe_rsc_trace(rsc, "Freeing %s", rsc->id);
 
     free(bundle_data->prefix);
     free(bundle_data->image);
     free(bundle_data->control_port);
     free(bundle_data->host_network);
     free(bundle_data->host_netmask);
     free(bundle_data->ip_range_start);
     free(bundle_data->container_network);
     free(bundle_data->launcher_options);
     free(bundle_data->container_command);
     free(bundle_data->container_host_options);
 
     g_list_free_full(bundle_data->replicas,
                      (GDestroyNotify) free_bundle_replica);
     g_list_free_full(bundle_data->mounts, (GDestroyNotify)mount_free);
     g_list_free_full(bundle_data->ports, (GDestroyNotify)port_free);
     g_list_free(rsc->children);
 
     if(bundle_data->child) {
         free_xml(bundle_data->child->xml);
         bundle_data->child->xml = NULL;
         bundle_data->child->fns->free(bundle_data->child);
     }
     common_free(rsc);
 }
 
 enum rsc_role_e
 pe__bundle_resource_state(const pe_resource_t *rsc, gboolean current)
 {
     enum rsc_role_e container_role = RSC_ROLE_UNKNOWN;
     return container_role;
 }
 
 /*!
  * \brief Get the number of configured replicas in a bundle
  *
  * \param[in] rsc  Bundle resource
  *
  * \return Number of configured replicas, or 0 on error
  */
 int
 pe_bundle_replicas(const pe_resource_t *rsc)
 {
     if ((rsc == NULL) || (rsc->variant != pe_container)) {
         return 0;
     } else {
         pe__bundle_variant_data_t *bundle_data = NULL;
 
         get_bundle_variant_data(bundle_data, rsc);
         return bundle_data->nreplicas;
     }
 }
 
 void
 pe__count_bundle(pe_resource_t *rsc)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
 
     get_bundle_variant_data(bundle_data, rsc);
     for (GList *item = bundle_data->replicas; item != NULL; item = item->next) {
         pe__bundle_replica_t *replica = item->data;
 
         if (replica->ip) {
             replica->ip->fns->count(replica->ip);
         }
         if (replica->child) {
             replica->child->fns->count(replica->child);
         }
         if (replica->container) {
             replica->container->fns->count(replica->container);
         }
         if (replica->remote) {
             replica->remote->fns->count(replica->remote);
         }
     }
 }
 
 gboolean
 pe__bundle_is_filtered(pe_resource_t *rsc, GListPtr only_rsc, gboolean check_parent)
 {
     gboolean passes = FALSE;
     pe__bundle_variant_data_t *bundle_data = NULL;
 
     if (pcmk__str_in_list(only_rsc, rsc_printable_id(rsc))) {
         passes = TRUE;
     } else {
         get_bundle_variant_data(bundle_data, rsc);
 
         for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) {
             pe__bundle_replica_t *replica = gIter->data;
 
             if (replica->ip != NULL && !replica->ip->fns->is_filtered(replica->ip, only_rsc, FALSE)) {
                 passes = TRUE;
                 break;
             } else if (replica->child != NULL && !replica->child->fns->is_filtered(replica->child, only_rsc, FALSE)) {
                 passes = TRUE;
                 break;
             } else if (!replica->container->fns->is_filtered(replica->container, only_rsc, FALSE)) {
                 passes = TRUE;
                 break;
             } else if (replica->remote != NULL && !replica->remote->fns->is_filtered(replica->remote, only_rsc, FALSE)) {
                 passes = TRUE;
                 break;
             }
         }
     }
 
     return !passes;
 }
diff --git a/lib/pengine/clone.c b/lib/pengine/clone.c
index 0708fdc9ea..9e9817873b 100644
--- a/lib/pengine/clone.c
+++ b/lib/pengine/clone.c
@@ -1,1209 +1,1209 @@
 /*
  * Copyright 2004-2020 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <crm/pengine/rules.h>
 #include <crm/pengine/status.h>
 #include <crm/pengine/internal.h>
 #include <pe_status_private.h>
 #include <crm/msg_xml.h>
 #include <crm/common/xml_internal.h>
 
 #define VARIANT_CLONE 1
 #include "./variant.h"
 
 void
 pe__force_anon(const char *standard, pe_resource_t *rsc, const char *rid,
                pe_working_set_t *data_set)
 {
     if (pe_rsc_is_clone(rsc)) {
         clone_variant_data_t *clone_data = NULL;
 
         get_clone_variant_data(clone_data, rsc);
 
         pe_warn("Ignoring " XML_RSC_ATTR_UNIQUE " for %s because %s resources "
                 "such as %s can be used only as anonymous clones",
                 rsc->id, standard, rid);
 
         clone_data->clone_node_max = 1;
         clone_data->clone_max = QB_MIN(clone_data->clone_max,
                                        g_list_length(data_set->nodes));
     }
 }
 
 pe_resource_t *
 find_clone_instance(pe_resource_t * rsc, const char *sub_id, pe_working_set_t * data_set)
 {
     char *child_id = NULL;
     pe_resource_t *child = NULL;
     const char *child_base = NULL;
     clone_variant_data_t *clone_data = NULL;
 
     get_clone_variant_data(clone_data, rsc);
 
     child_base = ID(clone_data->xml_obj_child);
     child_id = crm_strdup_printf("%s:%s", child_base, sub_id);
     child = pe_find_resource(rsc->children, child_id);
 
     free(child_id);
     return child;
 }
 
 pe_resource_t *
 pe__create_clone_child(pe_resource_t *rsc, pe_working_set_t *data_set)
 {
     gboolean as_orphan = FALSE;
     char *inc_num = NULL;
     char *inc_max = NULL;
     pe_resource_t *child_rsc = NULL;
     xmlNode *child_copy = NULL;
     clone_variant_data_t *clone_data = NULL;
 
     get_clone_variant_data(clone_data, rsc);
 
     CRM_CHECK(clone_data->xml_obj_child != NULL, return FALSE);
 
     if (clone_data->total_clones >= clone_data->clone_max) {
         // If we've already used all available instances, this is an orphan
         as_orphan = TRUE;
     }
 
     // Allocate instance numbers in numerical order (starting at 0)
     inc_num = crm_itoa(clone_data->total_clones);
     inc_max = crm_itoa(clone_data->clone_max);
 
     child_copy = copy_xml(clone_data->xml_obj_child);
 
     crm_xml_add(child_copy, XML_RSC_ATTR_INCARNATION, inc_num);
 
     if (common_unpack(child_copy, &child_rsc, rsc, data_set) == FALSE) {
         pe_err("Failed unpacking resource %s", crm_element_value(child_copy, XML_ATTR_ID));
         child_rsc = NULL;
         goto bail;
     }
 /*  child_rsc->globally_unique = rsc->globally_unique; */
 
     CRM_ASSERT(child_rsc);
     clone_data->total_clones += 1;
     pe_rsc_trace(child_rsc, "Setting clone attributes for: %s", child_rsc->id);
     rsc->children = g_list_append(rsc->children, child_rsc);
     if (as_orphan) {
         pe__set_resource_flags_recursive(child_rsc, pe_rsc_orphan);
     }
 
     add_hash_param(child_rsc->meta, XML_RSC_ATTR_INCARNATION_MAX, inc_max);
     pe_rsc_trace(rsc, "Added %s instance %s", rsc->id, child_rsc->id);
 
   bail:
     free(inc_num);
     free(inc_max);
 
     return child_rsc;
 }
 
 gboolean
 clone_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
 {
     int lpc = 0;
     xmlNode *a_child = NULL;
     xmlNode *xml_obj = rsc->xml;
     clone_variant_data_t *clone_data = NULL;
 
     const char *ordered = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_ORDERED);
     const char *max_clones = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION_MAX);
     const char *max_clones_node = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION_NODEMAX);
 
     pe_rsc_trace(rsc, "Processing resource %s...", rsc->id);
 
     clone_data = calloc(1, sizeof(clone_variant_data_t));
     rsc->variant_opaque = clone_data;
 
     if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
         const char *promoted_max = NULL;
         const char *promoted_node_max = NULL;
 
         promoted_max = g_hash_table_lookup(rsc->meta,
                                            XML_RSC_ATTR_PROMOTED_MAX);
         if (promoted_max == NULL) {
             // @COMPAT deprecated since 2.0.0
             promoted_max = g_hash_table_lookup(rsc->meta,
                                                XML_RSC_ATTR_MASTER_MAX);
         }
 
         promoted_node_max = g_hash_table_lookup(rsc->meta,
                                                 XML_RSC_ATTR_PROMOTED_NODEMAX);
         if (promoted_node_max == NULL) {
             // @COMPAT deprecated since 2.0.0
             promoted_node_max = g_hash_table_lookup(rsc->meta,
                                                     XML_RSC_ATTR_MASTER_NODEMAX);
         }
 
         clone_data->promoted_max = crm_parse_int(promoted_max, "1");
         clone_data->promoted_node_max = crm_parse_int(promoted_node_max, "1");
     }
 
     // Implied by calloc()
     /* clone_data->xml_obj_child = NULL; */
 
     clone_data->clone_node_max = crm_parse_int(max_clones_node, "1");
 
     if (max_clones) {
         clone_data->clone_max = crm_parse_int(max_clones, "1");
 
     } else if (pcmk__list_of_multiple(data_set->nodes)) {
         clone_data->clone_max = g_list_length(data_set->nodes);
 
     } else {
         clone_data->clone_max = 1;      /* Handy during crm_verify */
     }
 
     clone_data->ordered = crm_is_true(ordered);
 
     if ((rsc->flags & pe_rsc_unique) == 0 && clone_data->clone_node_max > 1) {
         pcmk__config_err("Ignoring " XML_RSC_ATTR_PROMOTED_MAX " for %s "
                          "because anonymous clones support only one instance "
                          "per node", rsc->id);
         clone_data->clone_node_max = 1;
     }
 
     pe_rsc_trace(rsc, "Options for %s", rsc->id);
     pe_rsc_trace(rsc, "\tClone max: %d", clone_data->clone_max);
     pe_rsc_trace(rsc, "\tClone node max: %d", clone_data->clone_node_max);
     pe_rsc_trace(rsc, "\tClone is unique: %s",
                  pe__rsc_bool_str(rsc, pe_rsc_unique));
     pe_rsc_trace(rsc, "\tClone is promotable: %s",
                  pe__rsc_bool_str(rsc, pe_rsc_promotable));
 
     // Clones may contain a single group or primitive
     for (a_child = pcmk__xe_first_child(xml_obj); a_child != NULL;
          a_child = pcmk__xe_next(a_child)) {
 
         if (pcmk__str_any_of((const char *)a_child->name, XML_CIB_TAG_RESOURCE, XML_CIB_TAG_GROUP, NULL)) {
             clone_data->xml_obj_child = a_child;
             break;
         }
     }
 
     if (clone_data->xml_obj_child == NULL) {
         pcmk__config_err("%s has nothing to clone", rsc->id);
         return FALSE;
     }
 
     /*
      * Make clones ever so slightly sticky by default
      *
      * This helps ensure clone instances are not shuffled around the cluster
      * for no benefit in situations when pre-allocation is not appropriate
      */
     if (g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_STICKINESS) == NULL) {
         add_hash_param(rsc->meta, XML_RSC_ATTR_STICKINESS, "1");
     }
 
     /* This ensures that the globally-unique value always exists for children to
      * inherit when being unpacked, as well as in resource agents' environment.
      */
     add_hash_param(rsc->meta, XML_RSC_ATTR_UNIQUE,
                    pe__rsc_bool_str(rsc, pe_rsc_unique));
 
     if (clone_data->clone_max <= 0) {
         /* Create one child instance so that unpack_find_resource() will hook up
          * any orphans up to the parent correctly.
          */
         if (pe__create_clone_child(rsc, data_set) == NULL) {
             return FALSE;
         }
 
     } else {
         // Create a child instance for each available instance number
         for (lpc = 0; lpc < clone_data->clone_max; lpc++) {
             if (pe__create_clone_child(rsc, data_set) == NULL) {
                 return FALSE;
             }
         }
     }
 
     pe_rsc_trace(rsc, "Added %d children to resource %s...", clone_data->clone_max, rsc->id);
     return TRUE;
 }
 
 gboolean
 clone_active(pe_resource_t * rsc, gboolean all)
 {
     GListPtr gIter = rsc->children;
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
         gboolean child_active = child_rsc->fns->active(child_rsc, all);
 
         if (all == FALSE && child_active) {
             return TRUE;
         } else if (all && child_active == FALSE) {
             return FALSE;
         }
     }
 
     if (all) {
         return TRUE;
     } else {
         return FALSE;
     }
 }
 
 static void
 short_print(char *list, const char *prefix, const char *type, const char *suffix, long options, void *print_data)
 {
     if(suffix == NULL) {
         suffix = "";
     }
 
     if (list) {
         if (options & pe_print_html) {
             status_print("<li>");
         }
         status_print("%s%s: [ %s ]%s", prefix, type, list, suffix);
 
         if (options & pe_print_html) {
             status_print("</li>\n");
 
         } else if (options & pe_print_suppres_nl) {
             /* nothing */
         } else if ((options & pe_print_printf) || (options & pe_print_ncurses)) {
             status_print("\n");
         }
 
     }
 }
 
 static const char *
 configured_role_str(pe_resource_t * rsc)
 {
     const char *target_role = g_hash_table_lookup(rsc->meta,
                                                   XML_RSC_ATTR_TARGET_ROLE);
 
     if ((target_role == NULL) && rsc->children && rsc->children->data) {
         target_role = g_hash_table_lookup(((pe_resource_t*)rsc->children->data)->meta,
                                           XML_RSC_ATTR_TARGET_ROLE);
     }
     return target_role;
 }
 
 static enum rsc_role_e
 configured_role(pe_resource_t * rsc)
 {
     const char *target_role = configured_role_str(rsc);
 
     if (target_role) {
         return text2role(target_role);
     }
     return RSC_ROLE_UNKNOWN;
 }
 
 static void
 clone_print_xml(pe_resource_t * rsc, const char *pre_text, long options, void *print_data)
 {
     char *child_text = crm_strdup_printf("%s    ", pre_text);
     const char *target_role = configured_role_str(rsc);
     GListPtr gIter = rsc->children;
 
     status_print("%s<clone ", pre_text);
     status_print("id=\"%s\" ", rsc->id);
     status_print("multi_state=\"%s\" ",
                  pe__rsc_bool_str(rsc, pe_rsc_promotable));
     status_print("unique=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_unique));
     status_print("managed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_managed));
     status_print("failed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_failed));
     status_print("failure_ignored=\"%s\" ",
                  pe__rsc_bool_str(rsc, pe_rsc_failure_ignored));
     if (target_role) {
         status_print("target_role=\"%s\" ", target_role);
     }
     status_print(">\n");
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         child_rsc->fns->print(child_rsc, child_text, options, print_data);
     }
 
     status_print("%s</clone>\n", pre_text);
     free(child_text);
 }
 
 bool is_set_recursive(pe_resource_t * rsc, long long flag, bool any)
 {
     GListPtr gIter;
     bool all = !any;
 
     if (pcmk_is_set(rsc->flags, flag)) {
         if(any) {
             return TRUE;
         }
     } else if(all) {
         return FALSE;
     }
 
     for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
         if(is_set_recursive(gIter->data, flag, any)) {
             if(any) {
                 return TRUE;
             }
 
         } else if(all) {
             return FALSE;
         }
     }
 
     if(all) {
         return TRUE;
     }
     return FALSE;
 }
 
 void
 clone_print(pe_resource_t * rsc, const char *pre_text, long options, void *print_data)
 {
     char *list_text = NULL;
     char *child_text = NULL;
     char *stopped_list = NULL;
     size_t list_text_len = 0;
     size_t stopped_list_len = 0;
 
     GListPtr master_list = NULL;
     GListPtr started_list = NULL;
     GListPtr gIter = rsc->children;
 
     clone_variant_data_t *clone_data = NULL;
     int active_instances = 0;
 
     if (pre_text == NULL) {
         pre_text = " ";
     }
 
     if (options & pe_print_xml) {
         clone_print_xml(rsc, pre_text, options, print_data);
         return;
     }
 
     get_clone_variant_data(clone_data, rsc);
 
     child_text = crm_strdup_printf("%s    ", pre_text);
 
     status_print("%sClone Set: %s [%s]%s%s%s",
                  pre_text ? pre_text : "", rsc->id, ID(clone_data->xml_obj_child),
                  pcmk_is_set(rsc->flags, pe_rsc_promotable)? " (promotable)" : "",
                  pcmk_is_set(rsc->flags, pe_rsc_unique)? " (unique)" : "",
                  pcmk_is_set(rsc->flags, pe_rsc_managed)? "" : " (unmanaged)");
 
     if (options & pe_print_html) {
         status_print("\n<ul>\n");
 
     } else if ((options & pe_print_log) == 0) {
         status_print("\n");
     }
 
     for (; gIter != NULL; gIter = gIter->next) {
         gboolean print_full = FALSE;
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
         gboolean partially_active = child_rsc->fns->active(child_rsc, FALSE);
 
         if (options & pe_print_clone_details) {
             print_full = TRUE;
         }
 
         if (pcmk_is_set(rsc->flags, pe_rsc_unique)) {
             // Print individual instance when unique (except stopped orphans)
             if (partially_active || !pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
                 print_full = TRUE;
             }
 
         // Everything else in this block is for anonymous clones
 
         } else if (pcmk_is_set(options, pe_print_pending)
                    && (child_rsc->pending_task != NULL)
                    && strcmp(child_rsc->pending_task, "probe")) {
             // Print individual instance when non-probe action is pending
             print_full = TRUE;
 
         } else if (partially_active == FALSE) {
             // List stopped instances when requested (except orphans)
             if (!pcmk_is_set(child_rsc->flags, pe_rsc_orphan)
                 && !pcmk_is_set(options, pe_print_clone_active)) {
                 pcmk__add_word(&stopped_list, &stopped_list_len, child_rsc->id);
             }
 
         } else if (is_set_recursive(child_rsc, pe_rsc_orphan, TRUE)
                    || is_set_recursive(child_rsc, pe_rsc_managed, FALSE) == FALSE
                    || is_set_recursive(child_rsc, pe_rsc_failed, TRUE)) {
 
             // Print individual instance when active orphaned/unmanaged/failed
             print_full = TRUE;
 
         } else if (child_rsc->fns->active(child_rsc, TRUE)) {
             // Instance of fully active anonymous clone
 
             pe_node_t *location = child_rsc->fns->location(child_rsc, NULL, TRUE);
 
             if (location) {
                 // Instance is active on a single node
 
                 enum rsc_role_e a_role = child_rsc->fns->state(child_rsc, TRUE);
 
                 if (location->details->online == FALSE && location->details->unclean) {
                     print_full = TRUE;
 
                 } else if (a_role > RSC_ROLE_SLAVE) {
                     master_list = g_list_append(master_list, location);
 
                 } else {
                     started_list = g_list_append(started_list, location);
                 }
 
             } else {
                 /* uncolocated group - bleh */
                 print_full = TRUE;
             }
 
         } else {
             // Instance of partially active anonymous clone
             print_full = TRUE;
         }
 
         if (print_full) {
             if (options & pe_print_html) {
                 status_print("<li>\n");
             }
             child_rsc->fns->print(child_rsc, child_text, options, print_data);
             if (options & pe_print_html) {
                 status_print("</li>\n");
             }
         }
     }
 
     /* Masters */
     master_list = g_list_sort(master_list, sort_node_uname);
     for (gIter = master_list; gIter; gIter = gIter->next) {
         pe_node_t *host = gIter->data;
 
         pcmk__add_word(&list_text, &list_text_len, host->details->uname);
 	active_instances++;
     }
 
     short_print(list_text, child_text, "Masters", NULL, options, print_data);
     g_list_free(master_list);
     free(list_text);
     list_text = NULL;
     list_text_len = 0;
 
     /* Started/Slaves */
     started_list = g_list_sort(started_list, sort_node_uname);
     for (gIter = started_list; gIter; gIter = gIter->next) {
         pe_node_t *host = gIter->data;
 
         pcmk__add_word(&list_text, &list_text_len, host->details->uname);
         active_instances++;
     }
 
     if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
         enum rsc_role_e role = configured_role(rsc);
 
         if(role == RSC_ROLE_SLAVE) {
             short_print(list_text, child_text, "Slaves (target-role)", NULL, options, print_data);
         } else {
             short_print(list_text, child_text, "Slaves", NULL, options, print_data);
         }
 
     } else {
         short_print(list_text, child_text, "Started", NULL, options, print_data);
     }
 
     g_list_free(started_list);
     free(list_text);
     list_text = NULL;
     list_text_len = 0;
 
     if (!pcmk_is_set(options, pe_print_clone_active)) {
         const char *state = "Stopped";
         enum rsc_role_e role = configured_role(rsc);
 
         if (role == RSC_ROLE_STOPPED) {
             state = "Stopped (disabled)";
         }
 
         if (!pcmk_is_set(rsc->flags, pe_rsc_unique)
             && (clone_data->clone_max > active_instances)) {
 
             GListPtr nIter;
             GListPtr list = g_hash_table_get_values(rsc->allowed_nodes);
 
             /* Custom stopped list for non-unique clones */
             free(stopped_list);
             stopped_list = NULL;
             stopped_list_len = 0;
 
             if (list == NULL) {
                 /* Clusters with symmetrical=false haven't calculated allowed_nodes yet
                  * If we've not probed for them yet, the Stopped list will be empty
                  */
                 list = g_hash_table_get_values(rsc->known_on);
             }
 
             list = g_list_sort(list, sort_node_uname);
             for (nIter = list; nIter != NULL; nIter = nIter->next) {
                 pe_node_t *node = (pe_node_t *)nIter->data;
 
                 if (pe_find_node(rsc->running_on, node->details->uname) == NULL) {
                     pcmk__add_word(&stopped_list, &stopped_list_len,
                                    node->details->uname);
                 }
             }
             g_list_free(list);
         }
 
         short_print(stopped_list, child_text, state, NULL, options, print_data);
         free(stopped_list);
     }
 
     if (options & pe_print_html) {
         status_print("</ul>\n");
     }
 
     free(child_text);
 }
 
-PCMK__OUTPUT_ARGS("clone", "unsigned int", "pe_resource_t *", "GListPtr", "GListPtr")
+PCMK__OUTPUT_ARGS("clone", "unsigned int", "pe_resource_t *", "GList *", "GList *")
 int
 pe__clone_xml(pcmk__output_t *out, va_list args)
 {
     unsigned int options = va_arg(args, unsigned int);
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
-    GListPtr only_node = va_arg(args, GListPtr);
-    GListPtr only_rsc = va_arg(args, GListPtr);
+    GList *only_node = va_arg(args, GList *);
+    GList *only_rsc = va_arg(args, GList *);
 
-    GListPtr gIter = rsc->children;
+    GList *gIter = rsc->children;
     int rc = pcmk_rc_no_output;
     gboolean printed_header = FALSE;
     gboolean print_everything = TRUE;
 
     if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
         return rc;
     }
 
     print_everything = pcmk__str_in_list(only_rsc, rsc_printable_id(rsc)) ||
                        (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(only_rsc, rsc->id));
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         if (pcmk__rsc_filtered_by_node(child_rsc, only_node)) {
             continue;
         }
 
         if (child_rsc->fns->is_filtered(child_rsc, only_rsc, print_everything)) {
             continue;
         }
 
         if (!printed_header) {
             printed_header = TRUE;
 
             rc = pe__name_and_nvpairs_xml(out, true, "clone", 8,
                     "id", rsc->id,
                     "multi_state", pe__rsc_bool_str(rsc, pe_rsc_promotable),
                     "unique", pe__rsc_bool_str(rsc, pe_rsc_unique),
                     "managed", pe__rsc_bool_str(rsc, pe_rsc_managed),
                     "disabled", pcmk__btoa(pe__resource_is_disabled(rsc)),
                     "failed", pe__rsc_bool_str(rsc, pe_rsc_failed),
                     "failure_ignored", pe__rsc_bool_str(rsc, pe_rsc_failure_ignored),
                     "target_role", configured_role_str(rsc));
             CRM_ASSERT(rc == pcmk_rc_ok);
         }
 
         out->message(out, crm_map_element_name(child_rsc->xml), options,
                      child_rsc, only_node, only_rsc);
     }
 
     if (printed_header) {
         pcmk__output_xml_pop_parent(out);
     }
 
     return rc;
 }
 
-PCMK__OUTPUT_ARGS("clone", "unsigned int", "pe_resource_t *", "GListPtr", "GListPtr")
+PCMK__OUTPUT_ARGS("clone", "unsigned int", "pe_resource_t *", "GList *", "GList *")
 int
 pe__clone_html(pcmk__output_t *out, va_list args)
 {
     unsigned int options = va_arg(args, unsigned int);
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
-    GListPtr only_node = va_arg(args, GListPtr);
-    GListPtr only_rsc = va_arg(args, GListPtr);
+    GList *only_node = va_arg(args, GList *);
+    GList *only_rsc = va_arg(args, GList *);
 
     char *list_text = NULL;
     char *stopped_list = NULL;
     size_t list_text_len = 0;
     size_t stopped_list_len = 0;
 
-    GListPtr master_list = NULL;
-    GListPtr started_list = NULL;
-    GListPtr gIter = rsc->children;
+    GList *master_list = NULL;
+    GList *started_list = NULL;
+    GList *gIter = rsc->children;
 
     clone_variant_data_t *clone_data = NULL;
     int active_instances = 0;
     int rc = pcmk_rc_no_output;
     gboolean print_everything = TRUE;
 
     get_clone_variant_data(clone_data, rsc);
 
     if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
         return rc;
     }
 
     print_everything = pcmk__str_in_list(only_rsc, rsc_printable_id(rsc)) ||
                        (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(only_rsc, rsc->id));
 
     out->begin_list(out, NULL, NULL, "Clone Set: %s [%s]%s%s%s%s",
                     rsc->id, ID(clone_data->xml_obj_child),
                     pcmk_is_set(rsc->flags, pe_rsc_promotable) ? " (promotable)" : "",
                     pcmk_is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
                     pcmk_is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)",
                     pe__resource_is_disabled(rsc) ? " (disabled)" : "");
     rc = pcmk_rc_ok;
 
     for (; gIter != NULL; gIter = gIter->next) {
         gboolean print_full = FALSE;
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
         gboolean partially_active = child_rsc->fns->active(child_rsc, FALSE);
 
         if (pcmk__rsc_filtered_by_node(child_rsc, only_node)) {
             continue;
         }
 
         if (child_rsc->fns->is_filtered(child_rsc, only_rsc, print_everything)) {
             continue;
         }
 
         if (options & pe_print_clone_details) {
             print_full = TRUE;
         }
 
         if (pcmk_is_set(rsc->flags, pe_rsc_unique)) {
             // Print individual instance when unique (except stopped orphans)
             if (partially_active || !pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
                 print_full = TRUE;
             }
 
         // Everything else in this block is for anonymous clones
 
         } else if (pcmk_is_set(options, pe_print_pending)
                    && (child_rsc->pending_task != NULL)
                    && strcmp(child_rsc->pending_task, "probe")) {
             // Print individual instance when non-probe action is pending
             print_full = TRUE;
 
         } else if (partially_active == FALSE) {
             // List stopped instances when requested (except orphans)
             if (!pcmk_is_set(child_rsc->flags, pe_rsc_orphan)
                 && !pcmk_is_set(options, pe_print_clone_active)) {
                 pcmk__add_word(&stopped_list, &stopped_list_len, child_rsc->id);
             }
 
         } else if (is_set_recursive(child_rsc, pe_rsc_orphan, TRUE)
                    || is_set_recursive(child_rsc, pe_rsc_managed, FALSE) == FALSE
                    || is_set_recursive(child_rsc, pe_rsc_failed, TRUE)) {
 
             // Print individual instance when active orphaned/unmanaged/failed
             print_full = TRUE;
 
         } else if (child_rsc->fns->active(child_rsc, TRUE)) {
             // Instance of fully active anonymous clone
 
             pe_node_t *location = child_rsc->fns->location(child_rsc, NULL, TRUE);
 
             if (location) {
                 // Instance is active on a single node
 
                 enum rsc_role_e a_role = child_rsc->fns->state(child_rsc, TRUE);
 
                 if (location->details->online == FALSE && location->details->unclean) {
                     print_full = TRUE;
 
                 } else if (a_role > RSC_ROLE_SLAVE) {
                     master_list = g_list_append(master_list, location);
 
                 } else {
                     started_list = g_list_append(started_list, location);
                 }
 
             } else {
                 /* uncolocated group - bleh */
                 print_full = TRUE;
             }
 
         } else {
             // Instance of partially active anonymous clone
             print_full = TRUE;
         }
 
         if (print_full) {
-            GListPtr all = NULL;
+            GList *all = NULL;
 
             /* Print every resource that's a child of this clone. */
             all = g_list_prepend(all, strdup("*"));
             out->message(out, crm_map_element_name(child_rsc->xml), options,
                          child_rsc, only_node, all);
             g_list_free_full(all, free);
         }
     }
 
     if (pcmk_is_set(options, pe_print_clone_details)) {
         free(stopped_list);
         out->end_list(out);
         return pcmk_rc_ok;
     }
 
     /* Masters */
     master_list = g_list_sort(master_list, sort_node_uname);
     for (gIter = master_list; gIter; gIter = gIter->next) {
         pe_node_t *host = gIter->data;
 
         if (!pcmk__str_in_list(only_node, host->details->uname)) {
             continue;
         }
 
         pcmk__add_word(&list_text, &list_text_len, host->details->uname);
         active_instances++;
     }
 
     if (list_text != NULL) {
-        out->list_item(out, NULL, " Masters: [ %s ]", list_text);
+        out->list_item(out, NULL, "Masters: [ %s ]", list_text);
         g_list_free(master_list);
         free(list_text);
         list_text = NULL;
         list_text_len = 0;
     }
 
     /* Started/Slaves */
     started_list = g_list_sort(started_list, sort_node_uname);
     for (gIter = started_list; gIter; gIter = gIter->next) {
         pe_node_t *host = gIter->data;
 
         if (!pcmk__str_in_list(only_node, host->details->uname)) {
             continue;
         }
 
         pcmk__add_word(&list_text, &list_text_len, host->details->uname);
         active_instances++;
     }
 
     if (list_text != NULL) {
         if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
             enum rsc_role_e role = configured_role(rsc);
 
             if(role == RSC_ROLE_SLAVE) {
-                out->list_item(out, NULL, " Slaves (target-role): [ %s ]", list_text);
+                out->list_item(out, NULL, "Slaves (target-role): [ %s ]", list_text);
             } else {
-                out->list_item(out, NULL, " Slaves: [ %s ]", list_text);
+                out->list_item(out, NULL, "Slaves: [ %s ]", list_text);
             }
 
         } else {
-            out->list_item(out, NULL, " Started: [ %s ]", list_text);
+            out->list_item(out, NULL, "Started: [ %s ]", list_text);
         }
 
         g_list_free(started_list);
         free(list_text);
         list_text = NULL;
         list_text_len = 0;
     }
 
     if (!pcmk_is_set(options, pe_print_clone_active)) {
         const char *state = "Stopped";
         enum rsc_role_e role = configured_role(rsc);
 
         if (role == RSC_ROLE_STOPPED) {
             state = "Stopped (disabled)";
         }
 
         if (!pcmk_is_set(rsc->flags, pe_rsc_unique)
             && (clone_data->clone_max > active_instances)) {
 
-            GListPtr nIter;
-            GListPtr list = g_hash_table_get_values(rsc->allowed_nodes);
+            GList *nIter;
+            GList *list = g_hash_table_get_values(rsc->allowed_nodes);
 
             /* Custom stopped list for non-unique clones */
             free(stopped_list);
             stopped_list = NULL;
             stopped_list_len = 0;
 
             if (list == NULL) {
                 /* Clusters with symmetrical=false haven't calculated allowed_nodes yet
                  * If we've not probed for them yet, the Stopped list will be empty
                  */
                 list = g_hash_table_get_values(rsc->known_on);
             }
 
             list = g_list_sort(list, sort_node_uname);
             for (nIter = list; nIter != NULL; nIter = nIter->next) {
                 pe_node_t *node = (pe_node_t *)nIter->data;
 
                 if (pe_find_node(rsc->running_on, node->details->uname) == NULL &&
                     pcmk__str_in_list(only_node, node->details->uname)) {
                     pcmk__add_word(&stopped_list, &stopped_list_len,
                                    node->details->uname);
                 }
             }
             g_list_free(list);
         }
 
         if (stopped_list != NULL) {
-            out->list_item(out, NULL, " %s: [ %s ]", state, stopped_list);
+            out->list_item(out, NULL, "%s: [ %s ]", state, stopped_list);
             free(stopped_list);
             stopped_list_len = 0;
         }
     }
 
     out->end_list(out);
 
     return rc;
 }
 
-PCMK__OUTPUT_ARGS("clone", "unsigned int", "pe_resource_t *", "GListPtr", "GListPtr")
+PCMK__OUTPUT_ARGS("clone", "unsigned int", "pe_resource_t *", "GList *", "GList *")
 int
 pe__clone_text(pcmk__output_t *out, va_list args)
 {
     unsigned int options = va_arg(args, unsigned int);
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
-    GListPtr only_node = va_arg(args, GListPtr);
-    GListPtr only_rsc = va_arg(args, GListPtr);
+    GList *only_node = va_arg(args, GList *);
+    GList *only_rsc = va_arg(args, GList *);
 
     char *list_text = NULL;
     char *stopped_list = NULL;
     size_t list_text_len = 0;
     size_t stopped_list_len = 0;
 
-    GListPtr master_list = NULL;
-    GListPtr started_list = NULL;
-    GListPtr gIter = rsc->children;
+    GList *master_list = NULL;
+    GList *started_list = NULL;
+    GList *gIter = rsc->children;
 
     clone_variant_data_t *clone_data = NULL;
     int active_instances = 0;
     int rc = pcmk_rc_no_output;
     gboolean print_everything = TRUE;
 
     get_clone_variant_data(clone_data, rsc);
 
     if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
         return rc;
     }
 
     print_everything = pcmk__str_in_list(only_rsc, rsc_printable_id(rsc)) ||
                        (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(only_rsc, rsc->id));
 
     out->begin_list(out, NULL, NULL, "Clone Set: %s [%s]%s%s%s%s",
                     rsc->id, ID(clone_data->xml_obj_child),
                     pcmk_is_set(rsc->flags, pe_rsc_promotable) ? " (promotable)" : "",
                     pcmk_is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
                     pcmk_is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)",
                     pe__resource_is_disabled(rsc) ? " (disabled)" : "");
     rc = pcmk_rc_ok;
 
     for (; gIter != NULL; gIter = gIter->next) {
         gboolean print_full = FALSE;
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
         gboolean partially_active = child_rsc->fns->active(child_rsc, FALSE);
 
         if (pcmk__rsc_filtered_by_node(child_rsc, only_node)) {
             continue;
         }
 
         if (child_rsc->fns->is_filtered(child_rsc, only_rsc, print_everything)) {
             continue;
         }
 
         if (options & pe_print_clone_details) {
             print_full = TRUE;
         }
 
         if (pcmk_is_set(rsc->flags, pe_rsc_unique)) {
             // Print individual instance when unique (except stopped orphans)
             if (partially_active || !pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
                 print_full = TRUE;
             }
 
         // Everything else in this block is for anonymous clones
 
         } else if (pcmk_is_set(options, pe_print_pending)
                    && (child_rsc->pending_task != NULL)
                    && strcmp(child_rsc->pending_task, "probe")) {
             // Print individual instance when non-probe action is pending
             print_full = TRUE;
 
         } else if (partially_active == FALSE) {
             // List stopped instances when requested (except orphans)
             if (!pcmk_is_set(child_rsc->flags, pe_rsc_orphan)
                 && !pcmk_is_set(options, pe_print_clone_active)) {
                 pcmk__add_word(&stopped_list, &stopped_list_len, child_rsc->id);
             }
 
         } else if (is_set_recursive(child_rsc, pe_rsc_orphan, TRUE)
                    || is_set_recursive(child_rsc, pe_rsc_managed, FALSE) == FALSE
                    || is_set_recursive(child_rsc, pe_rsc_failed, TRUE)) {
 
             // Print individual instance when active orphaned/unmanaged/failed
             print_full = TRUE;
 
         } else if (child_rsc->fns->active(child_rsc, TRUE)) {
             // Instance of fully active anonymous clone
 
             pe_node_t *location = child_rsc->fns->location(child_rsc, NULL, TRUE);
 
             if (location) {
                 // Instance is active on a single node
 
                 enum rsc_role_e a_role = child_rsc->fns->state(child_rsc, TRUE);
 
                 if (location->details->online == FALSE && location->details->unclean) {
                     print_full = TRUE;
 
                 } else if (a_role > RSC_ROLE_SLAVE) {
                     master_list = g_list_append(master_list, location);
 
                 } else {
                     started_list = g_list_append(started_list, location);
                 }
 
             } else {
                 /* uncolocated group - bleh */
                 print_full = TRUE;
             }
 
         } else {
             // Instance of partially active anonymous clone
             print_full = TRUE;
         }
 
         if (print_full) {
-            GListPtr all = NULL;
+            GList *all = NULL;
 
             /* Print every resource that's a child of this clone. */
             all = g_list_prepend(all, strdup("*"));
             out->message(out, crm_map_element_name(child_rsc->xml), options,
                          child_rsc, only_node, all);
             g_list_free_full(all, free);
         }
     }
 
     if (pcmk_is_set(options, pe_print_clone_details)) {
         free(stopped_list);
         out->end_list(out);
         return pcmk_rc_ok;
     }
 
     /* Masters */
     master_list = g_list_sort(master_list, sort_node_uname);
     for (gIter = master_list; gIter; gIter = gIter->next) {
         pe_node_t *host = gIter->data;
 
         if (!pcmk__str_in_list(only_node, host->details->uname)) {
             continue;
         }
 
         pcmk__add_word(&list_text, &list_text_len, host->details->uname);
         active_instances++;
     }
 
     if (list_text != NULL) {
         out->list_item(out, "Masters", "[ %s ]", list_text);
         g_list_free(master_list);
         free(list_text);
         list_text = NULL;
         list_text_len = 0;
     }
 
     /* Started/Slaves */
     started_list = g_list_sort(started_list, sort_node_uname);
     for (gIter = started_list; gIter; gIter = gIter->next) {
         pe_node_t *host = gIter->data;
 
         if (!pcmk__str_in_list(only_node, host->details->uname)) {
             continue;
         }
 
         pcmk__add_word(&list_text, &list_text_len, host->details->uname);
         active_instances++;
     }
 
     if (list_text != NULL) {
         if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
             enum rsc_role_e role = configured_role(rsc);
 
             if(role == RSC_ROLE_SLAVE) {
                 out->list_item(out, "Slaves (target-role)", "[ %s ]", list_text);
             } else {
                 out->list_item(out, "Slaves", "[ %s ]", list_text);
             }
         } else {
             out->list_item(out, "Started", "[ %s ]", list_text);
         }
 
         g_list_free(started_list);
         free(list_text);
         list_text = NULL;
     }
 
     if (!pcmk_is_set(options, pe_print_clone_active)) {
         const char *state = "Stopped";
         enum rsc_role_e role = configured_role(rsc);
 
         if (role == RSC_ROLE_STOPPED) {
             state = "Stopped (disabled)";
         }
 
         if (!pcmk_is_set(rsc->flags, pe_rsc_unique)
             && (clone_data->clone_max > active_instances)) {
 
-            GListPtr nIter;
-            GListPtr list = g_hash_table_get_values(rsc->allowed_nodes);
+            GList *nIter;
+            GList *list = g_hash_table_get_values(rsc->allowed_nodes);
 
             /* Custom stopped list for non-unique clones */
             free(stopped_list);
             stopped_list = NULL;
             stopped_list_len = 0;
 
             if (list == NULL) {
                 /* Clusters with symmetrical=false haven't calculated allowed_nodes yet
                  * If we've not probed for them yet, the Stopped list will be empty
                  */
                 list = g_hash_table_get_values(rsc->known_on);
             }
 
             list = g_list_sort(list, sort_node_uname);
             for (nIter = list; nIter != NULL; nIter = nIter->next) {
                 pe_node_t *node = (pe_node_t *)nIter->data;
 
                 if (pe_find_node(rsc->running_on, node->details->uname) == NULL &&
                     pcmk__str_in_list(only_node, node->details->uname)) {
                     pcmk__add_word(&stopped_list, &stopped_list_len,
                                    node->details->uname);
                 }
             }
             g_list_free(list);
         }
 
         if (stopped_list != NULL) {
             out->list_item(out, state, "[ %s ]", stopped_list);
             free(stopped_list);
         }
     }
 
     out->end_list(out);
 
     return rc;
 }
 
 void
 clone_free(pe_resource_t * rsc)
 {
     clone_variant_data_t *clone_data = NULL;
 
     get_clone_variant_data(clone_data, rsc);
 
     pe_rsc_trace(rsc, "Freeing %s", rsc->id);
 
     for (GListPtr gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         CRM_ASSERT(child_rsc);
         pe_rsc_trace(child_rsc, "Freeing child %s", child_rsc->id);
         free_xml(child_rsc->xml);
         child_rsc->xml = NULL;
         /* There could be a saved unexpanded xml */
         free_xml(child_rsc->orig_xml);
         child_rsc->orig_xml = NULL;
         child_rsc->fns->free(child_rsc);
     }
 
     g_list_free(rsc->children);
 
     if (clone_data) {
         CRM_ASSERT(clone_data->demote_notify == NULL);
         CRM_ASSERT(clone_data->stop_notify == NULL);
         CRM_ASSERT(clone_data->start_notify == NULL);
         CRM_ASSERT(clone_data->promote_notify == NULL);
     }
 
     common_free(rsc);
 }
 
 enum rsc_role_e
 clone_resource_state(const pe_resource_t * rsc, gboolean current)
 {
     enum rsc_role_e clone_role = RSC_ROLE_UNKNOWN;
     GListPtr gIter = rsc->children;
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
         enum rsc_role_e a_role = child_rsc->fns->state(child_rsc, current);
 
         if (a_role > clone_role) {
             clone_role = a_role;
         }
     }
 
     pe_rsc_trace(rsc, "%s role: %s", rsc->id, role2text(clone_role));
     return clone_role;
 }
 
 /*!
  * \internal
  * \brief Check whether a clone has an instance for every node
  *
  * \param[in] rsc       Clone to check
  * \param[in] data_set  Cluster state
  */
 bool
 pe__is_universal_clone(pe_resource_t *rsc,
                        pe_working_set_t *data_set)
 {
     if (pe_rsc_is_clone(rsc)) {
         clone_variant_data_t *clone_data = NULL;
 
         get_clone_variant_data(clone_data, rsc);
         if (clone_data->clone_max == g_list_length(data_set->nodes)) {
             return TRUE;
         }
     }
     return FALSE;
 }
 
 gboolean
 pe__clone_is_filtered(pe_resource_t *rsc, GListPtr only_rsc, gboolean check_parent)
 {
     gboolean passes = FALSE;
     clone_variant_data_t *clone_data = NULL;
 
     if (pcmk__str_in_list(only_rsc, rsc_printable_id(rsc))) {
         passes = TRUE;
     } else {
         get_clone_variant_data(clone_data, rsc);
         passes = pcmk__str_in_list(only_rsc, ID(clone_data->xml_obj_child));
 
         if (!passes) {
             for (GListPtr gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
                 pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
                 if (!child_rsc->fns->is_filtered(child_rsc, only_rsc, FALSE)) {
                     passes = TRUE;
                     break;
                 }
             }
         }
     }
 
     return !passes;
 }
diff --git a/lib/pengine/group.c b/lib/pengine/group.c
index 33aa177dfb..58c9f7c9bc 100644
--- a/lib/pengine/group.c
+++ b/lib/pengine/group.c
@@ -1,407 +1,407 @@
 /*
  * Copyright 2004-2020 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <crm/pengine/rules.h>
 #include <crm/pengine/status.h>
 #include <crm/pengine/internal.h>
 #include <crm/msg_xml.h>
 #include <crm/common/xml_internal.h>
 #include <pe_status_private.h>
 
 #define VARIANT_GROUP 1
 #include "./variant.h"
 
 gboolean
 group_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
 {
     xmlNode *xml_obj = rsc->xml;
     xmlNode *xml_native_rsc = NULL;
     group_variant_data_t *group_data = NULL;
     const char *group_ordered = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_ORDERED);
     const char *group_colocated = g_hash_table_lookup(rsc->meta, "collocated");
     const char *clone_id = NULL;
 
     pe_rsc_trace(rsc, "Processing resource %s...", rsc->id);
 
     group_data = calloc(1, sizeof(group_variant_data_t));
     group_data->num_children = 0;
     group_data->first_child = NULL;
     group_data->last_child = NULL;
     rsc->variant_opaque = group_data;
 
     // We don't actually need the null checks but it speeds up the common case
     if ((group_ordered == NULL)
         || (crm_str_to_boolean(group_ordered, &(group_data->ordered)) < 0)) {
         group_data->ordered = TRUE;
     }
     if ((group_colocated == NULL)
         || (crm_str_to_boolean(group_colocated, &(group_data->colocated)) < 0)) {
         group_data->colocated = TRUE;
     }
 
     clone_id = crm_element_value(rsc->xml, XML_RSC_ATTR_INCARNATION);
 
     for (xml_native_rsc = pcmk__xe_first_child(xml_obj); xml_native_rsc != NULL;
          xml_native_rsc = pcmk__xe_next(xml_native_rsc)) {
 
         if (pcmk__str_eq((const char *)xml_native_rsc->name,
                          XML_CIB_TAG_RESOURCE, pcmk__str_none)) {
             pe_resource_t *new_rsc = NULL;
 
             crm_xml_add(xml_native_rsc, XML_RSC_ATTR_INCARNATION, clone_id);
             if (common_unpack(xml_native_rsc, &new_rsc, rsc, data_set) == FALSE) {
                 pe_err("Failed unpacking resource %s", crm_element_value(xml_obj, XML_ATTR_ID));
                 if (new_rsc != NULL && new_rsc->fns != NULL) {
                     new_rsc->fns->free(new_rsc);
                 }
                 continue;
             }
 
             group_data->num_children++;
             rsc->children = g_list_append(rsc->children, new_rsc);
 
             if (group_data->first_child == NULL) {
                 group_data->first_child = new_rsc;
             }
             group_data->last_child = new_rsc;
             pe_rsc_trace(rsc, "Added %s member %s", rsc->id, new_rsc->id);
         }
     }
 
     if (group_data->num_children == 0) {
         pcmk__config_warn("Group %s does not have any children", rsc->id);
         return TRUE; // Allow empty groups, children can be added later
     }
 
     pe_rsc_trace(rsc, "Added %d children to resource %s...", group_data->num_children, rsc->id);
 
     return TRUE;
 }
 
 gboolean
 group_active(pe_resource_t * rsc, gboolean all)
 {
     gboolean c_all = TRUE;
     gboolean c_any = FALSE;
     GListPtr gIter = rsc->children;
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         if (child_rsc->fns->active(child_rsc, all)) {
             c_any = TRUE;
         } else {
             c_all = FALSE;
         }
     }
 
     if (c_any == FALSE) {
         return FALSE;
     } else if (all && c_all == FALSE) {
         return FALSE;
     }
     return TRUE;
 }
 
 static void
 group_print_xml(pe_resource_t * rsc, const char *pre_text, long options, void *print_data)
 {
     GListPtr gIter = rsc->children;
     char *child_text = crm_strdup_printf("%s     ", pre_text);
 
     status_print("%s<group id=\"%s\" ", pre_text, rsc->id);
     status_print("number_resources=\"%d\" ", g_list_length(rsc->children));
     status_print(">\n");
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         child_rsc->fns->print(child_rsc, child_text, options, print_data);
     }
 
     status_print("%s</group>\n", pre_text);
     free(child_text);
 }
 
 void
 group_print(pe_resource_t * rsc, const char *pre_text, long options, void *print_data)
 {
     char *child_text = NULL;
     GListPtr gIter = rsc->children;
 
     if (pre_text == NULL) {
         pre_text = " ";
     }
 
     if (options & pe_print_xml) {
         group_print_xml(rsc, pre_text, options, print_data);
         return;
     }
 
     child_text = crm_strdup_printf("%s    ", pre_text);
 
     status_print("%sResource Group: %s", pre_text ? pre_text : "", rsc->id);
 
     if (options & pe_print_html) {
         status_print("\n<ul>\n");
 
     } else if ((options & pe_print_log) == 0) {
         status_print("\n");
     }
 
     if (options & pe_print_brief) {
         print_rscs_brief(rsc->children, child_text, options, print_data, TRUE);
 
     } else {
         for (; gIter != NULL; gIter = gIter->next) {
             pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
             if (options & pe_print_html) {
                 status_print("<li>\n");
             }
             child_rsc->fns->print(child_rsc, child_text, options, print_data);
             if (options & pe_print_html) {
                 status_print("</li>\n");
             }
         }
     }
 
     if (options & pe_print_html) {
         status_print("</ul>\n");
     }
     free(child_text);
 }
 
-PCMK__OUTPUT_ARGS("group", "unsigned int", "pe_resource_t *", "GListPtr", "GListPtr")
+PCMK__OUTPUT_ARGS("group", "unsigned int", "pe_resource_t *", "GList *", "GList *")
 int
 pe__group_xml(pcmk__output_t *out, va_list args)
 {
     unsigned int options = va_arg(args, unsigned int);
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
-    GListPtr only_node = va_arg(args, GListPtr);
-    GListPtr only_rsc = va_arg(args, GListPtr);
+    GList *only_node = va_arg(args, GList *);
+    GList *only_rsc = va_arg(args, GList *);
 
-    GListPtr gIter = rsc->children;
+    GList *gIter = rsc->children;
     char *count = crm_itoa(g_list_length(gIter));
 
     int rc = pcmk_rc_no_output;
     gboolean print_everything = TRUE;
 
     if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
         free(count);
         return rc;
     }
 
     print_everything = pcmk__str_in_list(only_rsc, rsc_printable_id(rsc)) ||
                        (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(only_rsc, rsc->id));
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         if (child_rsc->fns->is_filtered(child_rsc, only_rsc, print_everything)) {
             continue;
         }
 
         if (rc == pcmk_rc_no_output) {
             rc = pe__name_and_nvpairs_xml(out, true, "group", 4
                                           , "id", rsc->id
                                           , "number_resources", count
                                           , "managed", pe__rsc_bool_str(rsc, pe_rsc_managed)
                                           , "disabled", pcmk__btoa(pe__resource_is_disabled(rsc)));
             free(count);
             CRM_ASSERT(rc == pcmk_rc_ok);
         }
 
         out->message(out, crm_map_element_name(child_rsc->xml), options, child_rsc,
 					 only_node, only_rsc);
     }
 
     if (rc == pcmk_rc_ok) {
         pcmk__output_xml_pop_parent(out);
     }
 
     return rc;
 }
 
-PCMK__OUTPUT_ARGS("group", "unsigned int", "pe_resource_t *", "GListPtr", "GListPtr")
+PCMK__OUTPUT_ARGS("group", "unsigned int", "pe_resource_t *", "GList *", "GList *")
 int
 pe__group_html(pcmk__output_t *out, va_list args)
 {
     unsigned int options = va_arg(args, unsigned int);
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
-    GListPtr only_node = va_arg(args, GListPtr);
-    GListPtr only_rsc = va_arg(args, GListPtr);
+    GList *only_node = va_arg(args, GList *);
+    GList *only_rsc = va_arg(args, GList *);
 
     int rc = pcmk_rc_no_output;
     gboolean print_everything = TRUE;
 
     if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
         return rc;
     }
 
     print_everything = pcmk__str_in_list(only_rsc, rsc_printable_id(rsc)) ||
                        (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(only_rsc, rsc->id));
 
     if (options & pe_print_brief) {
-        GListPtr rscs = pe__filter_rsc_list(rsc->children, only_rsc);
+        GList *rscs = pe__filter_rsc_list(rsc->children, only_rsc);
 
         if (rscs != NULL) {
             out->begin_list(out, NULL, NULL, "Resource Group: %s%s%s", rsc->id,
                             pcmk_is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)",
                             pe__resource_is_disabled(rsc) ? " (disabled)" : "");
 
             pe__rscs_brief_output(out, rscs, options, TRUE);
 
             rc = pcmk_rc_ok;
             g_list_free(rscs);
         }
 
     } else {
-        for (GListPtr gIter = rsc->children; gIter; gIter = gIter->next) {
+        for (GList *gIter = rsc->children; gIter; gIter = gIter->next) {
             pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
             if (child_rsc->fns->is_filtered(child_rsc, only_rsc, print_everything)) {
                 continue;
             }
 
             PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Resource Group: %s%s%s", rsc->id,
                                      pcmk_is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)",
                                      pe__resource_is_disabled(rsc) ? " (disabled)" : "");
 
             out->message(out, crm_map_element_name(child_rsc->xml), options,
                          child_rsc, only_node, only_rsc);
         }
     }
 
 	PCMK__OUTPUT_LIST_FOOTER(out, rc);
 
     return rc;
 }
 
-PCMK__OUTPUT_ARGS("group", "unsigned int", "pe_resource_t *", "GListPtr", "GListPtr")
+PCMK__OUTPUT_ARGS("group", "unsigned int", "pe_resource_t *", "GList *", "GList *")
 int
 pe__group_text(pcmk__output_t *out, va_list args)
 {
     unsigned int options = va_arg(args, unsigned int);
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
-    GListPtr only_node = va_arg(args, GListPtr);
-    GListPtr only_rsc = va_arg(args, GListPtr);
+    GList *only_node = va_arg(args, GList *);
+    GList *only_rsc = va_arg(args, GList *);
 
     int rc = pcmk_rc_no_output;
     gboolean print_everything = TRUE;
 
     if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
         return rc;
     }
 
     print_everything = pcmk__str_in_list(only_rsc, rsc_printable_id(rsc)) ||
                        (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(only_rsc, rsc->id));
 
     if (options & pe_print_brief) {
-        GListPtr rscs = pe__filter_rsc_list(rsc->children, only_rsc);
+        GList *rscs = pe__filter_rsc_list(rsc->children, only_rsc);
 
         if (rscs != NULL) {
             out->begin_list(out, NULL, NULL, "Resource Group: %s%s%s", rsc->id,
                             pcmk_is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)",
                             pe__resource_is_disabled(rsc) ? " (disabled)" : "");
 
             pe__rscs_brief_output(out, rscs, options, TRUE);
 
             rc = pcmk_rc_ok;
             g_list_free(rscs);
         }
 
     } else {
-        for (GListPtr gIter = rsc->children; gIter; gIter = gIter->next) {
+        for (GList *gIter = rsc->children; gIter; gIter = gIter->next) {
             pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
             if (child_rsc->fns->is_filtered(child_rsc, only_rsc, print_everything)) {
                 continue;
             }
 
             PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Resource Group: %s%s%s", rsc->id,
                                      pcmk_is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)",
                                      pe__resource_is_disabled(rsc) ? " (disabled)" : "");
 
             out->message(out, crm_map_element_name(child_rsc->xml), options,
                          child_rsc, only_node, only_rsc);
          }
     }
 
     PCMK__OUTPUT_LIST_FOOTER(out, rc);
 
     return rc;
 }
 
 void
 group_free(pe_resource_t * rsc)
 {
     CRM_CHECK(rsc != NULL, return);
 
     pe_rsc_trace(rsc, "Freeing %s", rsc->id);
 
     for (GListPtr gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         CRM_ASSERT(child_rsc);
         pe_rsc_trace(child_rsc, "Freeing child %s", child_rsc->id);
         child_rsc->fns->free(child_rsc);
     }
 
     pe_rsc_trace(rsc, "Freeing child list");
     g_list_free(rsc->children);
 
     common_free(rsc);
 }
 
 enum rsc_role_e
 group_resource_state(const pe_resource_t * rsc, gboolean current)
 {
     enum rsc_role_e group_role = RSC_ROLE_UNKNOWN;
     GListPtr gIter = rsc->children;
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
         enum rsc_role_e role = child_rsc->fns->state(child_rsc, current);
 
         if (role > group_role) {
             group_role = role;
         }
     }
 
     pe_rsc_trace(rsc, "%s role: %s", rsc->id, role2text(group_role));
     return group_role;
 }
 
 gboolean
 pe__group_is_filtered(pe_resource_t *rsc, GListPtr only_rsc, gboolean check_parent)
 {
     gboolean passes = FALSE;
 
     if (check_parent && pcmk__str_in_list(only_rsc, rsc_printable_id(uber_parent(rsc)))) {
         passes = TRUE;
     } else if (pcmk__str_in_list(only_rsc, rsc_printable_id(rsc))) {
         passes = TRUE;
     } else if (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(only_rsc, rsc->id)) {
         passes = TRUE;
     } else {
         for (GListPtr gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
             pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
             if (!child_rsc->fns->is_filtered(child_rsc, only_rsc, FALSE)) {
                 passes = TRUE;
                 break;
             }
         }
     }
 
     return !passes;
 }
diff --git a/lib/pengine/native.c b/lib/pengine/native.c
index 38e935053e..193be170aa 100644
--- a/lib/pengine/native.c
+++ b/lib/pengine/native.c
@@ -1,1376 +1,1376 @@
 /*
  * Copyright 2004-2020 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <crm/pengine/rules.h>
 #include <crm/pengine/status.h>
 #include <crm/pengine/complex.h>
 #include <crm/pengine/internal.h>
 #include <crm/msg_xml.h>
 #include <pe_status_private.h>
 
 #define VARIANT_NATIVE 1
 #include "./variant.h"
 
 /*!
  * \internal
  * \brief Check whether a resource is active on multiple nodes
  */
 static bool
 is_multiply_active(pe_resource_t *rsc)
 {
     unsigned int count = 0;
 
     if (rsc->variant == pe_native) {
         pe__find_active_requires(rsc, &count);
     }
     return count > 1;
 }
 
 static void
 native_priority_to_node(pe_resource_t * rsc, pe_node_t * node)
 {
     int priority = 0;
 
     if (rsc->priority == 0) {
         return;
     }
 
     if (rsc->role == RSC_ROLE_MASTER) {
         // Promoted instance takes base priority + 1
         priority = rsc->priority + 1;
 
     } else {
         priority = rsc->priority;
     }
 
     node->details->priority += priority;
     pe_rsc_trace(rsc, "Node '%s' now has priority %d with %s'%s' (priority: %d%s)",
                  node->details->uname, node->details->priority,
                  rsc->role == RSC_ROLE_MASTER ? "promoted " : "",
                  rsc->id, rsc->priority,
                  rsc->role == RSC_ROLE_MASTER ? " + 1" : "");
 
     /* Priority of a resource running on a guest node is added to the cluster
      * node as well. */
     if (node->details->remote_rsc
         && node->details->remote_rsc->container) {
         GListPtr gIter = node->details->remote_rsc->container->running_on;
 
         for (; gIter != NULL; gIter = gIter->next) {
             pe_node_t *a_node = gIter->data;
 
             a_node->details->priority += priority;
             pe_rsc_trace(rsc, "Node '%s' now has priority %d with %s'%s' (priority: %d%s) "
                          "from guest node '%s'",
                          a_node->details->uname, a_node->details->priority,
                          rsc->role == RSC_ROLE_MASTER ? "promoted " : "",
                          rsc->id, rsc->priority,
                          rsc->role == RSC_ROLE_MASTER ? " + 1" : "",
                          node->details->uname);
         }
     }
 }
 
 void
 native_add_running(pe_resource_t * rsc, pe_node_t * node, pe_working_set_t * data_set)
 {
     GListPtr gIter = rsc->running_on;
 
     CRM_CHECK(node != NULL, return);
     for (; gIter != NULL; gIter = gIter->next) {
         pe_node_t *a_node = (pe_node_t *) gIter->data;
 
         CRM_CHECK(a_node != NULL, return);
         if (pcmk__str_eq(a_node->details->id, node->details->id, pcmk__str_casei)) {
             return;
         }
     }
 
     pe_rsc_trace(rsc, "Adding %s to %s %s", rsc->id, node->details->uname,
                  pcmk_is_set(rsc->flags, pe_rsc_managed)? "" : "(unmanaged)");
 
     rsc->running_on = g_list_append(rsc->running_on, node);
     if (rsc->variant == pe_native) {
         node->details->running_rsc = g_list_append(node->details->running_rsc, rsc);
 
         native_priority_to_node(rsc, node);
     }
 
     if (rsc->variant == pe_native && node->details->maintenance) {
         pe__clear_resource_flags(rsc, pe_rsc_managed);
     }
 
     if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
         pe_resource_t *p = rsc->parent;
 
         pe_rsc_info(rsc, "resource %s isn't managed", rsc->id);
         resource_location(rsc, node, INFINITY, "not_managed_default", data_set);
 
         while(p && node->details->online) {
             /* add without the additional location constraint */
             p->running_on = g_list_append(p->running_on, node);
             p = p->parent;
         }
         return;
     }
 
     if (is_multiply_active(rsc)) {
         switch (rsc->recovery_type) {
             case recovery_stop_only:
                 {
                     GHashTableIter gIter;
                     pe_node_t *local_node = NULL;
 
                     /* make sure it doesn't come up again */
                     if (rsc->allowed_nodes != NULL) {
                         g_hash_table_destroy(rsc->allowed_nodes);
                     }
                     rsc->allowed_nodes = pe__node_list2table(data_set->nodes);
                     g_hash_table_iter_init(&gIter, rsc->allowed_nodes);
                     while (g_hash_table_iter_next(&gIter, NULL, (void **)&local_node)) {
                         local_node->weight = -INFINITY;
                     }
                 }
                 break;
             case recovery_stop_start:
                 break;
             case recovery_block:
                 pe__clear_resource_flags(rsc, pe_rsc_managed);
                 pe__set_resource_flags(rsc, pe_rsc_block);
 
                 /* If the resource belongs to a group or bundle configured with
                  * multiple-active=block, block the entire entity.
                  */
                 if (rsc->parent
                     && (rsc->parent->variant == pe_group || rsc->parent->variant == pe_container)
                     && rsc->parent->recovery_type == recovery_block) {
                     GListPtr gIter = rsc->parent->children;
 
                     for (; gIter != NULL; gIter = gIter->next) {
                         pe_resource_t *child = (pe_resource_t *) gIter->data;
 
                         pe__clear_resource_flags(child, pe_rsc_managed);
                         pe__set_resource_flags(child, pe_rsc_block);
                     }
                 }
                 break;
         }
         crm_debug("%s is active on multiple nodes including %s: %s",
                   rsc->id, node->details->uname,
                   recovery2text(rsc->recovery_type));
 
     } else {
         pe_rsc_trace(rsc, "Resource %s is active on: %s", rsc->id, node->details->uname);
     }
 
     if (rsc->parent != NULL) {
         native_add_running(rsc->parent, node, data_set);
     }
 }
 
 static void
 recursive_clear_unique(pe_resource_t *rsc)
 {
     pe__clear_resource_flags(rsc, pe_rsc_unique);
     add_hash_param(rsc->meta, XML_RSC_ATTR_UNIQUE, XML_BOOLEAN_FALSE);
 
     for (GList *child = rsc->children; child != NULL; child = child->next) {
         recursive_clear_unique((pe_resource_t *) child->data);
     }
 }
 
 gboolean
 native_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
 {
     pe_resource_t *parent = uber_parent(rsc);
     native_variant_data_t *native_data = NULL;
     const char *standard = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
     uint32_t ra_caps = pcmk_get_ra_caps(standard);
 
     pe_rsc_trace(rsc, "Processing resource %s...", rsc->id);
 
     native_data = calloc(1, sizeof(native_variant_data_t));
     rsc->variant_opaque = native_data;
 
     // Only some agent standards support unique and promotable clones
     if (!pcmk_is_set(ra_caps, pcmk_ra_cap_unique)
         && pcmk_is_set(rsc->flags, pe_rsc_unique) && pe_rsc_is_clone(parent)) {
 
         /* @COMPAT We should probably reject this situation as an error (as we
          * do for promotable below) rather than warn and convert, but that would
          * be a backward-incompatible change that we should probably do with a
          * transform at a schema major version bump.
          */
         pe__force_anon(standard, parent, rsc->id, data_set);
 
         /* Clear globally-unique on the parent and all its descendents unpacked
          * so far (clearing the parent should make any future children unpacking
          * correct). We have to clear this resource explicitly because it isn't
          * hooked into the parent's children yet.
          */
         recursive_clear_unique(parent);
         recursive_clear_unique(rsc);
     }
     if (!pcmk_is_set(ra_caps, pcmk_ra_cap_promotable)
         && pcmk_is_set(parent->flags, pe_rsc_promotable)) {
 
         pe_err("Resource %s is of type %s and therefore "
                "cannot be used as a promotable clone resource",
                rsc->id, standard);
         return FALSE;
     }
     return TRUE;
 }
 
 static bool
 rsc_is_on_node(pe_resource_t *rsc, const pe_node_t *node, int flags)
 {
     pe_rsc_trace(rsc, "Checking whether %s is on %s",
                  rsc->id, node->details->uname);
 
     if (pcmk_is_set(flags, pe_find_current) && rsc->running_on) {
 
         for (GListPtr iter = rsc->running_on; iter; iter = iter->next) {
             pe_node_t *loc = (pe_node_t *) iter->data;
 
             if (loc->details == node->details) {
                 return TRUE;
             }
         }
 
     } else if (pcmk_is_set(flags, pe_find_inactive)
                && (rsc->running_on == NULL)) {
         return TRUE;
 
     } else if (!pcmk_is_set(flags, pe_find_current) && rsc->allocated_to
                && (rsc->allocated_to->details == node->details)) {
         return TRUE;
     }
     return FALSE;
 }
 
 pe_resource_t *
 native_find_rsc(pe_resource_t * rsc, const char *id, const pe_node_t *on_node,
                 int flags)
 {
     bool match = FALSE;
     pe_resource_t *result = NULL;
 
     CRM_CHECK(id && rsc && rsc->id, return NULL);
 
     if (flags & pe_find_clone) {
         const char *rid = ID(rsc->xml);
 
         if (!pe_rsc_is_clone(uber_parent(rsc))) {
             match = FALSE;
 
         } else if (!strcmp(id, rsc->id) || pcmk__str_eq(id, rid, pcmk__str_casei)) {
             match = TRUE;
         }
 
     } else if (!strcmp(id, rsc->id)) {
         match = TRUE;
 
     } else if (pcmk_is_set(flags, pe_find_renamed)
                && rsc->clone_name && strcmp(rsc->clone_name, id) == 0) {
         match = TRUE;
 
     } else if (pcmk_is_set(flags, pe_find_any)
                || (pcmk_is_set(flags, pe_find_anon)
                    && !pcmk_is_set(rsc->flags, pe_rsc_unique))) {
         match = pe_base_name_eq(rsc, id);
     }
 
     if (match && on_node) {
         bool match_node = rsc_is_on_node(rsc, on_node, flags);
 
         if (match_node == FALSE) {
             match = FALSE;
         }
     }
 
     if (match) {
         return rsc;
     }
 
     for (GListPtr gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child = (pe_resource_t *) gIter->data;
 
         result = rsc->fns->find_rsc(child, id, on_node, flags);
         if (result) {
             return result;
         }
     }
     return NULL;
 }
 
 char *
 native_parameter(pe_resource_t * rsc, pe_node_t * node, gboolean create, const char *name,
                  pe_working_set_t * data_set)
 {
     char *value_copy = NULL;
     const char *value = NULL;
     GHashTable *hash = NULL;
     GHashTable *local_hash = NULL;
 
     CRM_CHECK(rsc != NULL, return NULL);
     CRM_CHECK(name != NULL && strlen(name) != 0, return NULL);
 
     pe_rsc_trace(rsc, "Looking up %s in %s", name, rsc->id);
 
     if (create || g_hash_table_size(rsc->parameters) == 0) {
         if (node != NULL) {
             pe_rsc_trace(rsc, "Creating hash with node %s", node->details->uname);
         } else {
             pe_rsc_trace(rsc, "Creating default hash");
         }
 
         local_hash = crm_str_table_new();
 
         get_rsc_attributes(local_hash, rsc, node, data_set);
 
         hash = local_hash;
     } else {
         hash = rsc->parameters;
     }
 
     value = g_hash_table_lookup(hash, name);
     if (value == NULL) {
         /* try meta attributes instead */
         value = g_hash_table_lookup(rsc->meta, name);
     }
 
     if (value != NULL) {
         value_copy = strdup(value);
     }
     if (local_hash != NULL) {
         g_hash_table_destroy(local_hash);
     }
     return value_copy;
 }
 
 gboolean
 native_active(pe_resource_t * rsc, gboolean all)
 {
     for (GList *gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
         pe_node_t *a_node = (pe_node_t *) gIter->data;
 
         if (a_node->details->unclean) {
             pe_rsc_trace(rsc, "Resource %s: node %s is unclean",
                          rsc->id, a_node->details->uname);
             return TRUE;
         } else if (a_node->details->online == FALSE) {
             pe_rsc_trace(rsc, "Resource %s: node %s is offline",
                          rsc->id, a_node->details->uname);
         } else {
             pe_rsc_trace(rsc, "Resource %s active on %s",
                          rsc->id, a_node->details->uname);
             return TRUE;
         }
     }
     return FALSE;
 }
 
 struct print_data_s {
     long options;
     void *print_data;
 };
 
 static const char *
 native_pending_state(pe_resource_t * rsc)
 {
     const char *pending_state = NULL;
 
     if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_START, pcmk__str_casei)) {
         pending_state = "Starting";
 
     } else if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_STOP, pcmk__str_casei)) {
         pending_state = "Stopping";
 
     } else if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_MIGRATE, pcmk__str_casei)) {
         pending_state = "Migrating";
 
     } else if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_MIGRATED, pcmk__str_casei)) {
        /* Work might be done in here. */
         pending_state = "Migrating";
 
     } else if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_PROMOTE, pcmk__str_casei)) {
         pending_state = "Promoting";
 
     } else if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_DEMOTE, pcmk__str_casei)) {
         pending_state = "Demoting";
     }
 
     return pending_state;
 }
 
 static const char *
 native_pending_task(pe_resource_t * rsc)
 {
     const char *pending_task = NULL;
 
     if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_STATUS, pcmk__str_casei)) {
         pending_task = "Monitoring";
 
     /* Pending probes are not printed, even if pending
      * operations are requested. If someone ever requests that
      * behavior, uncomment this and the corresponding part of
      * unpack.c:unpack_rsc_op().
      */
     /*
     } else if (pcmk__str_eq(rsc->pending_task, "probe", pcmk__str_casei)) {
         pending_task = "Checking";
     */
     }
 
     return pending_task;
 }
 
 static enum rsc_role_e
 native_displayable_role(pe_resource_t *rsc)
 {
     enum rsc_role_e role = rsc->role;
 
     if ((role == RSC_ROLE_STARTED)
         && pcmk_is_set(uber_parent(rsc)->flags, pe_rsc_promotable)) {
 
         role = RSC_ROLE_SLAVE;
     }
     return role;
 }
 
 static const char *
 native_displayable_state(pe_resource_t *rsc, long options)
 {
     const char *rsc_state = NULL;
 
     if (options & pe_print_pending) {
         rsc_state = native_pending_state(rsc);
     }
     if (rsc_state == NULL) {
         rsc_state = role2text(native_displayable_role(rsc));
     }
     return rsc_state;
 }
 
 static void
 native_print_xml(pe_resource_t * rsc, const char *pre_text, long options, void *print_data)
 {
     const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
     const char *prov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER);
     const char *rsc_state = native_displayable_state(rsc, options);
     const char *target_role = NULL;
 
     /* resource information. */
     status_print("%s<resource ", pre_text);
     status_print("id=\"%s\" ", rsc_printable_id(rsc));
     status_print("resource_agent=\"%s%s%s:%s\" ",
                  class,
                  prov ? "::" : "", prov ? prov : "", crm_element_value(rsc->xml, XML_ATTR_TYPE));
 
     status_print("role=\"%s\" ", rsc_state);
     if (rsc->meta) {
         target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
     }
     if (target_role) {
         status_print("target_role=\"%s\" ", target_role);
     }
     status_print("active=\"%s\" ", pcmk__btoa(rsc->fns->active(rsc, TRUE)));
     status_print("orphaned=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_orphan));
     status_print("blocked=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_block));
     status_print("managed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_managed));
     status_print("failed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_failed));
     status_print("failure_ignored=\"%s\" ",
                  pe__rsc_bool_str(rsc, pe_rsc_failure_ignored));
     status_print("nodes_running_on=\"%d\" ", g_list_length(rsc->running_on));
 
     if (options & pe_print_pending) {
         const char *pending_task = native_pending_task(rsc);
 
         if (pending_task) {
             status_print("pending=\"%s\" ", pending_task);
         }
     }
 
     /* print out the nodes this resource is running on */
     if (options & pe_print_rsconly) {
         status_print("/>\n");
         /* do nothing */
     } else if (rsc->running_on != NULL) {
         GListPtr gIter = rsc->running_on;
 
         status_print(">\n");
         for (; gIter != NULL; gIter = gIter->next) {
             pe_node_t *node = (pe_node_t *) gIter->data;
 
             status_print("%s    <node name=\"%s\" id=\"%s\" cached=\"%s\"/>\n", pre_text,
                          node->details->uname, node->details->id,
                          pcmk__btoa(node->details->online == FALSE));
         }
         status_print("%s</resource>\n", pre_text);
     } else {
         status_print("/>\n");
     }
 }
 
 // Append a flag to resource description string's flags list
 static bool
 add_output_flag(GString *s, const char *flag_desc, bool have_flags)
 {
     g_string_append(s, (have_flags? ", " : " ("));
     g_string_append(s, flag_desc);
     return true;
 }
 
 // Append a node name to resource description string's node list
 static bool
 add_output_node(GString *s, const char *node, bool have_nodes)
 {
     g_string_append(s, (have_nodes? " " : " [ "));
     g_string_append(s, node);
     return true;
 }
 
 /*!
  * \internal
  * \brief Create a string description of a resource
  *
  * \param[in] rsc          Resource to describe
  * \param[in] name         Desired identifier for the resource
  * \param[in] node         If not NULL, node that resource is "on"
  * \param[in] options      Bitmask of pe_print_*
  * \param[in] target_role  Resource's target role
  * \param[in] show_nodes   Whether to display nodes when multiply active
  *
  * \return Newly allocated string description of resource
  * \note Caller must free the result with g_free().
  */
 gchar *
 pcmk__native_output_string(pe_resource_t *rsc, const char *name, pe_node_t *node,
                            long options, const char *target_role, bool show_nodes)
 {
     const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
     const char *provider = NULL;
     const char *kind = crm_element_value(rsc->xml, XML_ATTR_TYPE);
     gchar *retval = NULL;
     GString *outstr = NULL;
     bool have_flags = false;
 
     if (rsc->variant != pe_native) {
         return NULL;
     }
 
     CRM_CHECK(name != NULL, name = "unknown");
     CRM_CHECK(kind != NULL, kind = "unknown");
     CRM_CHECK(class != NULL, class = "unknown");
 
     if (pcmk_is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_provider)) {
         provider = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER);
     }
 
     if ((node == NULL) && (rsc->lock_node != NULL)) {
         node = rsc->lock_node;
     }
     if (pcmk_is_set(options, pe_print_rsconly)
         || pcmk__list_of_multiple(rsc->running_on)) {
         node = NULL;
     }
 
     // We need a string of at least this size
     outstr = g_string_sized_new(strlen(name) + strlen(class) + strlen(kind)
                                 + (provider? (strlen(provider) + 2) : 0)
                                 + (node? strlen(node->details->uname) + 1 : 0)
                                 + 11);
 
     // Resource name and agent
     g_string_printf(outstr, "%s\t(%s%s%s:%s):\t", name, class,
                     /* @COMPAT This should be a single ':' (see CLBZ#5395) but
                      * to avoid breaking anything relying on it, we're keeping
                      * it like this until the next minor version bump.
                      */
                     (provider? "::" : ""), (provider? provider : ""), kind);
 
     // State on node
     if (pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
         g_string_append(outstr, " ORPHANED");
     }
     if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
         enum rsc_role_e role = native_displayable_role(rsc);
 
         if (role > RSC_ROLE_SLAVE) {
             g_string_append_printf(outstr, " FAILED %s", role2text(role));
         } else {
             g_string_append(outstr, " FAILED");
         }
     } else {
         g_string_append_printf(outstr, " %s", native_displayable_state(rsc, options));
     }
     if (node) {
         g_string_append_printf(outstr, " %s", node->details->uname);
     }
 
     // Flags, as: (<flag> [...])
     if (node && !(node->details->online) && node->details->unclean) {
         have_flags = add_output_flag(outstr, "UNCLEAN", have_flags);
     }
     if (node && (node == rsc->lock_node)) {
         have_flags = add_output_flag(outstr, "LOCKED", have_flags);
     }
     if (pcmk_is_set(options, pe_print_pending)) {
         const char *pending_task = native_pending_task(rsc);
 
         if (pending_task) {
             have_flags = add_output_flag(outstr, pending_task, have_flags);
         }
     }
     if (target_role) {
         enum rsc_role_e target_role_e = text2role(target_role);
 
         /* Only show target role if it limits our abilities (i.e. ignore
          * Started, as it is the default anyways, and doesn't prevent the
          * resource from becoming Master).
          */
         if (target_role_e == RSC_ROLE_STOPPED) {
             have_flags = add_output_flag(outstr, "disabled", have_flags);
 
         } else if (pcmk_is_set(uber_parent(rsc)->flags, pe_rsc_promotable)
                    && target_role_e == RSC_ROLE_SLAVE) {
             have_flags = add_output_flag(outstr, "target-role:", have_flags);
             g_string_append(outstr, target_role);
         }
     }
     if (pcmk_is_set(rsc->flags, pe_rsc_block)) {
         have_flags = add_output_flag(outstr, "blocked", have_flags);
     } else if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
         have_flags = add_output_flag(outstr, "unmanaged", have_flags);
     }
     if (pcmk_is_set(rsc->flags, pe_rsc_failure_ignored)) {
         have_flags = add_output_flag(outstr, "failure ignored", have_flags);
     }
     if (have_flags) {
         g_string_append(outstr, ")");
     }
 
     // User-supplied description
     if (pcmk_is_set(options, pe_print_rsconly)
         || pcmk__list_of_multiple(rsc->running_on)) {
         const char *desc = crm_element_value(rsc->xml, XML_ATTR_DESC);
 
         if (desc) {
             g_string_append_printf(outstr, " %s", desc);
         }
     }
 
     if (show_nodes && !pcmk_is_set(options, pe_print_rsconly)
         && pcmk__list_of_multiple(rsc->running_on)) {
         bool have_nodes = false;
 
         for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) {
             pe_node_t *n = (pe_node_t *) iter->data;
 
             have_nodes = add_output_node(outstr, n->details->uname, have_nodes);
         }
         if (have_nodes) {
             g_string_append(outstr, " ]");
         }
     }
 
     retval = outstr->str;
     g_string_free(outstr, FALSE);
     return retval;
 }
 
 int
 pe__common_output_html(pcmk__output_t *out, pe_resource_t * rsc,
                        const char *name, pe_node_t *node, long options)
 {
     const char *kind = crm_element_value(rsc->xml, XML_ATTR_TYPE);
     const char *target_role = NULL;
 
     xmlNodePtr list_node = NULL;
     const char *cl = NULL;
 
     CRM_ASSERT(rsc->variant == pe_native);
     CRM_ASSERT(kind != NULL);
 
     if (rsc->meta) {
         const char *is_internal = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INTERNAL_RSC);
 
         if (crm_is_true(is_internal)
             && !pcmk_is_set(options, pe_print_implicit)) {
 
             crm_trace("skipping print of internal resource %s", rsc->id);
             return pcmk_rc_no_output;
         }
         target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
     }
 
     if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
         cl = "rsc-managed";
 
     } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
         cl = "rsc-failed";
 
     } else if (rsc->variant == pe_native && (rsc->running_on == NULL)) {
         cl = "rsc-failed";
 
     } else if (pcmk__list_of_multiple(rsc->running_on)) {
         cl = "rsc-multiple";
 
     } else if (pcmk_is_set(rsc->flags, pe_rsc_failure_ignored)) {
         cl = "rsc-failure-ignored";
 
     } else {
         cl = "rsc-ok";
     }
 
     {
         gchar *s = pcmk__native_output_string(rsc, name, node, options,
                                               target_role, true);
 
         list_node = pcmk__output_create_html_node(out, "li", NULL, NULL, NULL);
         pcmk_create_html_node(list_node, "span", NULL, cl, s);
         g_free(s);
     }
 
     return pcmk_rc_ok;
 }
 
 int
 pe__common_output_text(pcmk__output_t *out, pe_resource_t * rsc,
                        const char *name, pe_node_t *node, long options)
 {
     const char *target_role = NULL;
 
     CRM_ASSERT(rsc->variant == pe_native);
 
     if (rsc->meta) {
         const char *is_internal = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INTERNAL_RSC);
 
         if (crm_is_true(is_internal)
             && !pcmk_is_set(options, pe_print_implicit)) {
 
             crm_trace("skipping print of internal resource %s", rsc->id);
             return pcmk_rc_no_output;
         }
         target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
     }
 
     {
         gchar *s = pcmk__native_output_string(rsc, name, node, options,
                                               target_role, true);
 
         out->list_item(out, NULL, "%s", s);
         g_free(s);
     }
 
     return pcmk_rc_ok;
 }
 
 void
 common_print(pe_resource_t * rsc, const char *pre_text, const char *name, pe_node_t *node, long options, void *print_data)
 {
     const char *target_role = NULL;
 
     CRM_ASSERT(rsc->variant == pe_native);
 
     if (rsc->meta) {
         const char *is_internal = g_hash_table_lookup(rsc->meta,
                                                       XML_RSC_ATTR_INTERNAL_RSC);
 
         if (crm_is_true(is_internal)
             && !pcmk_is_set(options, pe_print_implicit)) {
 
             crm_trace("skipping print of internal resource %s", rsc->id);
             return;
         }
         target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
     }
 
     if (options & pe_print_xml) {
         native_print_xml(rsc, pre_text, options, print_data);
         return;
     }
 
     if ((pre_text == NULL) && (options & pe_print_printf)) {
         pre_text = " ";
     }
 
     if (options & pe_print_html) {
         if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
             status_print("<font color=\"yellow\">");
 
         } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
             status_print("<font color=\"red\">");
 
         } else if (rsc->running_on == NULL) {
             status_print("<font color=\"red\">");
 
         } else if (pcmk__list_of_multiple(rsc->running_on)) {
             status_print("<font color=\"orange\">");
 
         } else if (pcmk_is_set(rsc->flags, pe_rsc_failure_ignored)) {
             status_print("<font color=\"yellow\">");
 
         } else {
             status_print("<font color=\"green\">");
         }
     }
 
     {
         gchar *resource_s = pcmk__native_output_string(rsc, name, node, options,
                                                        target_role, false);
         status_print("%s%s", (pre_text? pre_text : ""), resource_s);
         g_free(resource_s);
     }
 
 #if CURSES_ENABLED
     if (pcmk_is_set(options, pe_print_ncurses)
         && !pcmk_is_set(options, pe_print_rsconly)
         && !pcmk__list_of_multiple(rsc->running_on)) {
         /* coverity[negative_returns] False positive */
         move(-1, 0);
     }
 #endif
 
     if (pcmk_is_set(options, pe_print_html)) {
         status_print(" </font> ");
     }
 
     if (!pcmk_is_set(options, pe_print_rsconly)
         && pcmk__list_of_multiple(rsc->running_on)) {
 
         GListPtr gIter = rsc->running_on;
         int counter = 0;
 
         if (options & pe_print_html) {
             status_print("<ul>\n");
         } else if ((options & pe_print_printf)
                    || (options & pe_print_ncurses)) {
             status_print("[");
         }
 
         for (; gIter != NULL; gIter = gIter->next) {
             pe_node_t *n = (pe_node_t *) gIter->data;
 
             counter++;
 
             if (options & pe_print_html) {
                 status_print("<li>\n%s", n->details->uname);
 
             } else if ((options & pe_print_printf)
                        || (options & pe_print_ncurses)) {
                 status_print(" %s", n->details->uname);
 
             } else if ((options & pe_print_log)) {
                 status_print("\t%d : %s", counter, n->details->uname);
 
             } else {
                 status_print("%s", n->details->uname);
             }
             if (options & pe_print_html) {
                 status_print("</li>\n");
 
             }
         }
 
         if (options & pe_print_html) {
             status_print("</ul>\n");
         } else if ((options & pe_print_printf)
                    || (options & pe_print_ncurses)) {
             status_print(" ]");
         }
     }
 
     if (options & pe_print_html) {
         status_print("<br/>\n");
     } else if (options & pe_print_suppres_nl) {
         /* nothing */
     } else if ((options & pe_print_printf) || (options & pe_print_ncurses)) {
         status_print("\n");
     }
 }
 
 void
 native_print(pe_resource_t * rsc, const char *pre_text, long options, void *print_data)
 {
     pe_node_t *node = NULL;
 
     CRM_ASSERT(rsc->variant == pe_native);
     if (options & pe_print_xml) {
         native_print_xml(rsc, pre_text, options, print_data);
         return;
     }
 
     node = pe__current_node(rsc);
 
     if (node == NULL) {
         // This is set only if a non-probe action is pending on this node
         node = rsc->pending_node;
     }
 
     common_print(rsc, pre_text, rsc_printable_id(rsc), node, options, print_data);
 }
 
-PCMK__OUTPUT_ARGS("primitive", "unsigned int", "pe_resource_t *", "GListPtr", "GListPtr")
+PCMK__OUTPUT_ARGS("primitive", "unsigned int", "pe_resource_t *", "GList *", "GList *")
 int
 pe__resource_xml(pcmk__output_t *out, va_list args)
 {
     unsigned int options = va_arg(args, unsigned int);
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
-    GListPtr only_node G_GNUC_UNUSED = va_arg(args, GListPtr);
-    GListPtr only_rsc = va_arg(args, GListPtr);
+    GList *only_node G_GNUC_UNUSED = va_arg(args, GList *);
+    GList *only_rsc = va_arg(args, GList *);
 
     const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
     const char *prov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER);
     const char *rsc_state = native_displayable_state(rsc, options);
 
     long is_print_pending = options & pe_print_pending;
 
     char ra_name[LINE_MAX];
     char *nodes_running_on = NULL;
     char *priority = NULL;
     int rc = pcmk_rc_no_output;
     const char *target_role = NULL;
 
     if (rsc->meta != NULL) {
        target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
     }
 
     CRM_ASSERT(rsc->variant == pe_native);
 
     if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
         return pcmk_rc_no_output;
     }
 
     /* resource information. */
     sprintf(ra_name, "%s%s%s:%s", class, prov ? "::" : "", prov ? prov : ""
            , crm_element_value(rsc->xml, XML_ATTR_TYPE));
 
     nodes_running_on = crm_itoa(g_list_length(rsc->running_on));
     priority = crm_ftoa(rsc->priority);
 
     rc = pe__name_and_nvpairs_xml(out, true, "resource", 12,
              "id", rsc_printable_id(rsc),
              "resource_agent", ra_name,
              "role", rsc_state,
              "target_role", target_role,
              "active", pcmk__btoa(rsc->fns->active(rsc, TRUE)),
              "orphaned", pe__rsc_bool_str(rsc, pe_rsc_orphan),
              "blocked", pe__rsc_bool_str(rsc, pe_rsc_block),
              "managed", pe__rsc_bool_str(rsc, pe_rsc_managed),
              "failed", pe__rsc_bool_str(rsc, pe_rsc_failed),
              "failure_ignored", pe__rsc_bool_str(rsc, pe_rsc_failure_ignored),
              "nodes_running_on", nodes_running_on,
              "pending", (is_print_pending? native_pending_task(rsc) : NULL));
     free(priority);
     free(nodes_running_on);
 
     CRM_ASSERT(rc == pcmk_rc_ok);
 
     if (rsc->running_on != NULL) {
-        GListPtr gIter = rsc->running_on;
+        GList *gIter = rsc->running_on;
 
         for (; gIter != NULL; gIter = gIter->next) {
             pe_node_t *node = (pe_node_t *) gIter->data;
 
             rc = pe__name_and_nvpairs_xml(out, false, "node", 3,
                      "name", node->details->uname,
                      "id", node->details->id,
                      "cached", pcmk__btoa(node->details->online));
             CRM_ASSERT(rc == pcmk_rc_ok);
         }
     }
 
     pcmk__output_xml_pop_parent(out);
     return rc;
 }
 
-PCMK__OUTPUT_ARGS("primitive", "unsigned int", "pe_resource_t *", "GListPtr", "GListPtr")
+PCMK__OUTPUT_ARGS("primitive", "unsigned int", "pe_resource_t *", "GList *", "GList *")
 int
 pe__resource_html(pcmk__output_t *out, va_list args)
 {
     unsigned int options = va_arg(args, unsigned int);
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
-    GListPtr only_node G_GNUC_UNUSED = va_arg(args, GListPtr);
-    GListPtr only_rsc = va_arg(args, GListPtr);
+    GList *only_node G_GNUC_UNUSED = va_arg(args, GList *);
+    GList *only_rsc = va_arg(args, GList *);
 
     pe_node_t *node = pe__current_node(rsc);
 
     if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
         return pcmk_rc_no_output;
     }
 
     CRM_ASSERT(rsc->variant == pe_native);
 
     if (node == NULL) {
         // This is set only if a non-probe action is pending on this node
         node = rsc->pending_node;
     }
     return pe__common_output_html(out, rsc, rsc_printable_id(rsc), node, options);
 }
 
-PCMK__OUTPUT_ARGS("primitive", "unsigned int", "pe_resource_t *", "GListPtr", "GListPtr")
+PCMK__OUTPUT_ARGS("primitive", "unsigned int", "pe_resource_t *", "GList *", "GList *")
 int
 pe__resource_text(pcmk__output_t *out, va_list args)
 {
     unsigned int options = va_arg(args, unsigned int);
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
-    GListPtr only_node G_GNUC_UNUSED = va_arg(args, GListPtr);
-    GListPtr only_rsc = va_arg(args, GListPtr);
+    GList *only_node G_GNUC_UNUSED = va_arg(args, GList *);
+    GList *only_rsc = va_arg(args, GList *);
 
     pe_node_t *node = pe__current_node(rsc);
 
     CRM_ASSERT(rsc->variant == pe_native);
 
     if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
         return pcmk_rc_no_output;
     }
 
     if (node == NULL) {
         // This is set only if a non-probe action is pending on this node
         node = rsc->pending_node;
     }
     return pe__common_output_text(out, rsc, rsc_printable_id(rsc), node, options);
 }
 
 void
 native_free(pe_resource_t * rsc)
 {
     pe_rsc_trace(rsc, "Freeing resource action list (not the data)");
     common_free(rsc);
 }
 
 enum rsc_role_e
 native_resource_state(const pe_resource_t * rsc, gboolean current)
 {
     enum rsc_role_e role = rsc->next_role;
 
     if (current) {
         role = rsc->role;
     }
     pe_rsc_trace(rsc, "%s state: %s", rsc->id, role2text(role));
     return role;
 }
 
 /*!
  * \internal
  * \brief List nodes where a resource (or any of its children) is
  *
  * \param[in]  rsc      Resource to check
  * \param[out] list     List to add result to
  * \param[in]  current  0 = where known, 1 = running, 2 = running or pending
  *
  * \return If list contains only one node, that node
  */
 pe_node_t *
 native_location(const pe_resource_t *rsc, GList **list, int current)
 {
     pe_node_t *one = NULL;
     GListPtr result = NULL;
 
     if (rsc->children) {
         GListPtr gIter = rsc->children;
 
         for (; gIter != NULL; gIter = gIter->next) {
             pe_resource_t *child = (pe_resource_t *) gIter->data;
 
             child->fns->location(child, &result, current);
         }
 
     } else if (current) {
 
         if (rsc->running_on) {
             result = g_list_copy(rsc->running_on);
         }
         if ((current == 2) && rsc->pending_node
             && !pe_find_node_id(result, rsc->pending_node->details->id)) {
                 result = g_list_append(result, rsc->pending_node);
         }
 
     } else if (current == FALSE && rsc->allocated_to) {
         result = g_list_append(NULL, rsc->allocated_to);
     }
 
     if (result && (result->next == NULL)) {
         one = result->data;
     }
 
     if (list) {
         GListPtr gIter = result;
 
         for (; gIter != NULL; gIter = gIter->next) {
             pe_node_t *node = (pe_node_t *) gIter->data;
 
             if (*list == NULL || pe_find_node_id(*list, node->details->id) == NULL) {
                 *list = g_list_append(*list, node);
             }
         }
     }
 
     g_list_free(result);
     return one;
 }
 
 static void
 get_rscs_brief(GListPtr rsc_list, GHashTable * rsc_table, GHashTable * active_table)
 {
     GListPtr gIter = rsc_list;
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *rsc = (pe_resource_t *) gIter->data;
 
         const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
         const char *kind = crm_element_value(rsc->xml, XML_ATTR_TYPE);
 
         int offset = 0;
         char buffer[LINE_MAX];
 
         int *rsc_counter = NULL;
         int *active_counter = NULL;
 
         if (rsc->variant != pe_native) {
             continue;
         }
 
         offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", class);
         if (pcmk_is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_provider)) {
             const char *prov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER);
             offset += snprintf(buffer + offset, LINE_MAX - offset, "::%s", prov);
         }
         offset += snprintf(buffer + offset, LINE_MAX - offset, ":%s", kind);
         CRM_LOG_ASSERT(offset > 0);
 
         if (rsc_table) {
             rsc_counter = g_hash_table_lookup(rsc_table, buffer);
             if (rsc_counter == NULL) {
                 rsc_counter = calloc(1, sizeof(int));
                 *rsc_counter = 0;
                 g_hash_table_insert(rsc_table, strdup(buffer), rsc_counter);
             }
             (*rsc_counter)++;
         }
 
         if (active_table) {
             GListPtr gIter2 = rsc->running_on;
 
             for (; gIter2 != NULL; gIter2 = gIter2->next) {
                 pe_node_t *node = (pe_node_t *) gIter2->data;
                 GHashTable *node_table = NULL;
 
                 if (node->details->unclean == FALSE && node->details->online == FALSE) {
                     continue;
                 }
 
                 node_table = g_hash_table_lookup(active_table, node->details->uname);
                 if (node_table == NULL) {
                     node_table = crm_str_table_new();
                     g_hash_table_insert(active_table, strdup(node->details->uname), node_table);
                 }
 
                 active_counter = g_hash_table_lookup(node_table, buffer);
                 if (active_counter == NULL) {
                     active_counter = calloc(1, sizeof(int));
                     *active_counter = 0;
                     g_hash_table_insert(node_table, strdup(buffer), active_counter);
                 }
                 (*active_counter)++;
             }
         }
     }
 }
 
 static void
 destroy_node_table(gpointer data)
 {
     GHashTable *node_table = data;
 
     if (node_table) {
         g_hash_table_destroy(node_table);
     }
 }
 
 void
 print_rscs_brief(GListPtr rsc_list, const char *pre_text, long options,
                  void *print_data, gboolean print_all)
 {
     GHashTable *rsc_table = crm_str_table_new();
     GHashTable *active_table = g_hash_table_new_full(crm_str_hash, g_str_equal,
                                                      free, destroy_node_table);
     GHashTableIter hash_iter;
     char *type = NULL;
     int *rsc_counter = NULL;
 
     get_rscs_brief(rsc_list, rsc_table, active_table);
 
     g_hash_table_iter_init(&hash_iter, rsc_table);
     while (g_hash_table_iter_next(&hash_iter, (gpointer *)&type, (gpointer *)&rsc_counter)) {
         GHashTableIter hash_iter2;
         char *node_name = NULL;
         GHashTable *node_table = NULL;
         int active_counter_all = 0;
 
         g_hash_table_iter_init(&hash_iter2, active_table);
         while (g_hash_table_iter_next(&hash_iter2, (gpointer *)&node_name, (gpointer *)&node_table)) {
             int *active_counter = g_hash_table_lookup(node_table, type);
 
             if (active_counter == NULL || *active_counter == 0) {
                 continue;
 
             } else {
                 active_counter_all += *active_counter;
             }
 
             if (options & pe_print_rsconly) {
                 node_name = NULL;
             }
 
             if (options & pe_print_html) {
                 status_print("<li>\n");
             }
 
             if (print_all) {
                 status_print("%s%d/%d\t(%s):\tActive %s\n", pre_text ? pre_text : "",
                              active_counter ? *active_counter : 0,
                              rsc_counter ? *rsc_counter : 0, type,
                              active_counter && (*active_counter > 0) && node_name ? node_name : "");
             } else {
                 status_print("%s%d\t(%s):\tActive %s\n", pre_text ? pre_text : "",
                              active_counter ? *active_counter : 0, type,
                              active_counter && (*active_counter > 0) && node_name ? node_name : "");
             }
 
             if (options & pe_print_html) {
                 status_print("</li>\n");
             }
         }
 
         if (print_all && active_counter_all == 0) {
             if (options & pe_print_html) {
                 status_print("<li>\n");
             }
 
             status_print("%s%d/%d\t(%s):\tActive\n", pre_text ? pre_text : "",
                          active_counter_all,
                          rsc_counter ? *rsc_counter : 0, type);
 
             if (options & pe_print_html) {
                 status_print("</li>\n");
             }
         }
     }
 
     if (rsc_table) {
         g_hash_table_destroy(rsc_table);
         rsc_table = NULL;
     }
     if (active_table) {
         g_hash_table_destroy(active_table);
         active_table = NULL;
     }
 }
 
 int
 pe__rscs_brief_output(pcmk__output_t *out, GListPtr rsc_list, long options, gboolean print_all)
 {
     GHashTable *rsc_table = crm_str_table_new();
     GHashTable *active_table = g_hash_table_new_full(crm_str_hash, g_str_equal,
                                                      free, destroy_node_table);
     GListPtr sorted_rscs;
     int rc = pcmk_rc_no_output;
 
     get_rscs_brief(rsc_list, rsc_table, active_table);
 
     /* Make a list of the rsc_table keys so that it can be sorted.  This is to make sure
      * output order stays consistent between systems.
      */
     sorted_rscs = g_hash_table_get_keys(rsc_table);
     sorted_rscs = g_list_sort(sorted_rscs, (GCompareFunc) strcmp);
 
     for (GListPtr gIter = sorted_rscs; gIter; gIter = gIter->next) {
         char *type = (char *) gIter->data;
         int *rsc_counter = g_hash_table_lookup(rsc_table, type);
 
         GHashTableIter hash_iter2;
         char *node_name = NULL;
         GHashTable *node_table = NULL;
         int active_counter_all = 0;
 
         g_hash_table_iter_init(&hash_iter2, active_table);
         while (g_hash_table_iter_next(&hash_iter2, (gpointer *)&node_name, (gpointer *)&node_table)) {
             int *active_counter = g_hash_table_lookup(node_table, type);
 
             if (active_counter == NULL || *active_counter == 0) {
                 continue;
 
             } else {
                 active_counter_all += *active_counter;
             }
 
             if (options & pe_print_rsconly) {
                 node_name = NULL;
             }
 
             if (print_all) {
-                out->list_item(out, NULL, " %d/%d\t(%s):\tActive %s",
+                out->list_item(out, NULL, "%d/%d\t(%s):\tActive %s",
                                *active_counter,
                                rsc_counter ? *rsc_counter : 0, type,
                                (*active_counter > 0) && node_name ? node_name : "");
             } else {
-                out->list_item(out, NULL, " %d\t(%s):\tActive %s",
+                out->list_item(out, NULL, "%d\t(%s):\tActive %s",
                                *active_counter, type,
                                (*active_counter > 0) && node_name ? node_name : "");
             }
 
             rc = pcmk_rc_ok;
         }
 
         if (print_all && active_counter_all == 0) {
-            out->list_item(out, NULL, " %d/%d\t(%s):\tActive",
+            out->list_item(out, NULL, "%d/%d\t(%s):\tActive",
                            active_counter_all,
                            rsc_counter ? *rsc_counter : 0, type);
             rc = pcmk_rc_ok;
         }
     }
 
     if (rsc_table) {
         g_hash_table_destroy(rsc_table);
         rsc_table = NULL;
     }
     if (active_table) {
         g_hash_table_destroy(active_table);
         active_table = NULL;
     }
     if (sorted_rscs) {
         g_list_free(sorted_rscs);
     }
 
     return rc;
 }
 
 gboolean
 pe__native_is_filtered(pe_resource_t *rsc, GListPtr only_rsc, gboolean check_parent)
 {
     if (pcmk__str_in_list(only_rsc, rsc_printable_id(rsc)) ||
         pcmk__str_in_list(only_rsc, rsc->id)) {
         return FALSE;
     } else if (check_parent) {
         pe_resource_t *up = uber_parent(rsc);
 
         if (pe_rsc_is_bundled(rsc)) {
             return up->parent->fns->is_filtered(up->parent, only_rsc, FALSE);
         } else {
             return up->fns->is_filtered(up, only_rsc, FALSE);
         }
     }
 
     return TRUE;
 }
diff --git a/lib/pengine/pe_output.c b/lib/pengine/pe_output.c
index ecb5c2cb56..5562eb6b55 100644
--- a/lib/pengine/pe_output.c
+++ b/lib/pengine/pe_output.c
@@ -1,2038 +1,2040 @@
 /*
  * Copyright 2019-2020 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 #include <crm/common/iso8601_internal.h>
 #include <crm/common/xml_internal.h>
 #include <crm/msg_xml.h>
 #include <crm/pengine/internal.h>
 
 static char *
 failed_action_string(xmlNodePtr xml_op) {
     const char *op_key = crm_element_value(xml_op, XML_LRM_ATTR_TASK_KEY);
     int rc = crm_parse_int(crm_element_value(xml_op, XML_LRM_ATTR_RC), "0");
     int status = crm_parse_int(crm_element_value(xml_op, XML_LRM_ATTR_OPSTATUS), "0");
     const char *exit_reason = crm_element_value(xml_op, XML_LRM_ATTR_EXIT_REASON);
 
     time_t last_change = 0;
 
     if (crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE,
                                 &last_change) == pcmk_ok) {
         crm_time_t *crm_when = crm_time_new(NULL);
         char *time_s = NULL;
         char *buf = NULL;
 
         crm_time_set_timet(crm_when, &last_change);
         time_s = crm_time_as_string(crm_when, crm_time_log_date | crm_time_log_timeofday | crm_time_log_with_timezone);
 
         buf = crm_strdup_printf("%s on %s '%s' (%d): call=%s, status='%s', "
                                 "exitreason='%s', " XML_RSC_OP_LAST_CHANGE
                                 "='%s', queued=%sms, exec=%sms",
                                 op_key ? op_key : ID(xml_op),
                                 crm_element_value(xml_op, XML_ATTR_UNAME),
                                 services_ocf_exitcode_str(rc), rc,
                                 crm_element_value(xml_op, XML_LRM_ATTR_CALLID),
                                 services_lrm_status_str(status),
                                 exit_reason ? exit_reason : "none",
                                 time_s,
                                 crm_element_value(xml_op, XML_RSC_OP_T_QUEUE),
                                 crm_element_value(xml_op, XML_RSC_OP_T_EXEC));
 
         crm_time_free(crm_when);
         free(time_s);
         return buf;
     } else {
         return crm_strdup_printf("%s on %s '%s' (%d): call=%s, status=%s, exitreason='%s'",
                                  op_key ? op_key : ID(xml_op),
                                  crm_element_value(xml_op, XML_ATTR_UNAME),
                                  services_ocf_exitcode_str(rc), rc,
                                  crm_element_value(xml_op, XML_LRM_ATTR_CALLID),
                                  services_lrm_status_str(status),
                                  exit_reason ? exit_reason : "none");
     }
 }
 
 static const char *
 get_cluster_stack(pe_working_set_t *data_set)
 {
     xmlNode *stack = get_xpath_object("//nvpair[@name='cluster-infrastructure']",
                                       data_set->input, LOG_DEBUG);
     return stack? crm_element_value(stack, XML_NVPAIR_ATTR_VALUE) : "unknown";
 }
 
 static char *
 last_changed_string(const char *last_written, const char *user,
                     const char *client, const char *origin) {
     if (last_written != NULL || user != NULL || client != NULL || origin != NULL) {
         return crm_strdup_printf("%s%s%s%s%s%s%s",
                                  last_written ? last_written : "",
                                  user ? " by " : "",
                                  user ? user : "",
                                  client ? " via " : "",
                                  client ? client : "",
                                  origin ? " on " : "",
                                  origin ? origin : "");
     } else {
         return strdup("");
     }
 }
 
 static char *
 op_history_string(xmlNode *xml_op, const char *task, const char *interval_ms_s,
                   int rc, gboolean print_timing) {
     const char *call = crm_element_value(xml_op, XML_LRM_ATTR_CALLID);
     char *interval_str = NULL;
     char *buf = NULL;
 
     if (interval_ms_s && !pcmk__str_eq(interval_ms_s, "0", pcmk__str_casei)) {
         char *pair = pcmk_format_nvpair("interval", interval_ms_s, "ms");
         interval_str = crm_strdup_printf(" %s", pair);
         free(pair);
     }
 
     if (print_timing) {
         char *last_change_str = NULL;
         char *last_run_str = NULL;
         char *exec_str = NULL;
         char *queue_str = NULL;
 
         const char *value = NULL;
 
         time_t epoch = 0;
 
         if ((crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE, &epoch) == pcmk_ok)
             && (epoch > 0)) {
             char *time = pcmk_format_named_time(XML_RSC_OP_LAST_CHANGE, epoch);
             last_change_str = crm_strdup_printf(" %s", time);
             free(time);
         }
 
         if ((crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_RUN, &epoch) == pcmk_ok)
             && (epoch > 0)) {
             char *time = pcmk_format_named_time(XML_RSC_OP_LAST_RUN, epoch);
             last_run_str = crm_strdup_printf(" %s", time);
             free(time);
         }
 
         value = crm_element_value(xml_op, XML_RSC_OP_T_EXEC);
         if (value) {
             char *pair = pcmk_format_nvpair(XML_RSC_OP_T_EXEC, value, "ms");
             exec_str = crm_strdup_printf(" %s", pair);
             free(pair);
         }
 
         value = crm_element_value(xml_op, XML_RSC_OP_T_QUEUE);
         if (value) {
             char *pair = pcmk_format_nvpair(XML_RSC_OP_T_QUEUE, value, "ms");
             queue_str = crm_strdup_printf(" %s", pair);
             free(pair);
         }
 
         buf = crm_strdup_printf("(%s) %s:%s%s%s%s%s rc=%d (%s)", call, task,
                                 interval_str ? interval_str : "",
                                 last_change_str ? last_change_str : "",
                                 last_run_str ? last_run_str : "",
                                 exec_str ? exec_str : "",
                                 queue_str ? queue_str : "",
                                 rc, services_ocf_exitcode_str(rc));
 
         if (last_change_str) {
             free(last_change_str);
         }
 
         if (last_run_str) {
             free(last_run_str);
         }
 
         if (exec_str) {
             free(exec_str);
         }
 
         if (queue_str) {
             free(queue_str);
         }
     } else {
         buf = crm_strdup_printf("(%s) %s%s%s", call, task,
                                 interval_str ? ":" : "",
                                 interval_str ? interval_str : "");
     }
 
     if (interval_str) {
         free(interval_str);
     }
 
     return buf;
 }
 
 static char *
 resource_history_string(pe_resource_t *rsc, const char *rsc_id, gboolean all,
                         int failcount, time_t last_failure) {
     char *buf = NULL;
 
     if (rsc == NULL) {
         buf = crm_strdup_printf("%s: orphan", rsc_id);
     } else if (all || failcount || last_failure > 0) {
         char *failcount_s = NULL;
         char *lastfail_s = NULL;
 
         if (failcount > 0) {
             failcount_s = crm_strdup_printf(" %s=%d", PCMK__FAIL_COUNT_PREFIX,
                                             failcount);
         } else {
             failcount_s = strdup("");
         }
         if (last_failure > 0) {
             lastfail_s = crm_strdup_printf(" %s='%s'",
                                            PCMK__LAST_FAILURE_PREFIX,
                                            pcmk__epoch2str(&last_failure));
         }
 
         buf = crm_strdup_printf("%s: migration-threshold=%d%s%s",
                                 rsc_id, rsc->migration_threshold, failcount_s,
                                 lastfail_s? lastfail_s : "");
         free(failcount_s);
         free(lastfail_s);
     } else {
         buf = crm_strdup_printf("%s:", rsc_id);
     }
 
     return buf;
 }
 
 PCMK__OUTPUT_ARGS("cluster-summary", "pe_working_set_t *", "gboolean", "gboolean", "gboolean",
                   "gboolean", "gboolean", "gboolean")
 int
 pe__cluster_summary(pcmk__output_t *out, va_list args) {
     pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
     gboolean print_clone_detail = va_arg(args, gboolean);
     gboolean show_stack = va_arg(args, gboolean);
     gboolean show_dc = va_arg(args, gboolean);
     gboolean show_times = va_arg(args, gboolean);
     gboolean show_counts = va_arg(args, gboolean);
     gboolean show_options = va_arg(args, gboolean);
-    int rc = pcmk_rc_no_output;
 
+    int rc = pcmk_rc_no_output;
     const char *stack_s = get_cluster_stack(data_set);
 
     if (show_stack) {
         PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Cluster Summary");
         out->message(out, "cluster-stack", stack_s);
     }
 
     /* Always print DC if none, even if not requested */
     if (data_set->dc_node == NULL || show_dc) {
         xmlNode *dc_version = get_xpath_object("//nvpair[@name='dc-version']",
                                                data_set->input, LOG_DEBUG);
         const char *dc_version_s = dc_version?
                                    crm_element_value(dc_version, XML_NVPAIR_ATTR_VALUE)
                                    : NULL;
         const char *quorum = crm_element_value(data_set->input, XML_ATTR_HAVE_QUORUM);
         char *dc_name = data_set->dc_node ? pe__node_display_name(data_set->dc_node, print_clone_detail) : NULL;
 
         PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Cluster Summary");
         out->message(out, "cluster-dc", data_set->dc_node, quorum, dc_version_s, dc_name);
         free(dc_name);
     }
 
     if (show_times) {
         const char *last_written = crm_element_value(data_set->input, XML_CIB_ATTR_WRITTEN);
         const char *user = crm_element_value(data_set->input, XML_ATTR_UPDATE_USER);
         const char *client = crm_element_value(data_set->input, XML_ATTR_UPDATE_CLIENT);
         const char *origin = crm_element_value(data_set->input, XML_ATTR_UPDATE_ORIG);
 
         PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Cluster Summary");
         out->message(out, "cluster-times", last_written, user, client, origin);
     }
 
     if (show_counts) {
         PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Cluster Summary");
         out->message(out, "cluster-counts", g_list_length(data_set->nodes),
                      data_set->ninstances, data_set->disabled_resources,
                      data_set->blocked_resources);
     }
 
     if (show_options) {
         PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Cluster Summary");
         out->message(out, "cluster-options", data_set);
     }
 
     PCMK__OUTPUT_LIST_FOOTER(out, rc);
 
     if (out->message(out, "maint-mode", data_set->flags) == pcmk_rc_ok) {
         rc = pcmk_rc_ok;
     }
 
     return rc;
 }
 
 PCMK__OUTPUT_ARGS("cluster-summary", "pe_working_set_t *", "gboolean", "gboolean", "gboolean",
                   "gboolean", "gboolean", "gboolean")
-int
-pe__cluster_summary_html(pcmk__output_t *out, va_list args) {
+static int
+cluster_summary_html(pcmk__output_t *out, va_list args) {
     pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
     gboolean print_clone_detail = va_arg(args, gboolean);
     gboolean show_stack = va_arg(args, gboolean);
     gboolean show_dc = va_arg(args, gboolean);
     gboolean show_times = va_arg(args, gboolean);
     gboolean show_counts = va_arg(args, gboolean);
     gboolean show_options = va_arg(args, gboolean);
-    int rc = pcmk_rc_no_output;
 
+    int rc = pcmk_rc_no_output;
     const char *stack_s = get_cluster_stack(data_set);
 
     if (show_stack) {
         PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Cluster Summary");
         out->message(out, "cluster-stack", stack_s);
     }
 
     /* Always print DC if none, even if not requested */
     if (data_set->dc_node == NULL || show_dc) {
         xmlNode *dc_version = get_xpath_object("//nvpair[@name='dc-version']",
                                                data_set->input, LOG_DEBUG);
         const char *dc_version_s = dc_version?
                                    crm_element_value(dc_version, XML_NVPAIR_ATTR_VALUE)
                                    : NULL;
         const char *quorum = crm_element_value(data_set->input, XML_ATTR_HAVE_QUORUM);
         char *dc_name = data_set->dc_node ? pe__node_display_name(data_set->dc_node, print_clone_detail) : NULL;
 
         PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Cluster Summary");
         out->message(out, "cluster-dc", data_set->dc_node, quorum, dc_version_s, dc_name);
         free(dc_name);
     }
 
     if (show_times) {
         const char *last_written = crm_element_value(data_set->input, XML_CIB_ATTR_WRITTEN);
         const char *user = crm_element_value(data_set->input, XML_ATTR_UPDATE_USER);
         const char *client = crm_element_value(data_set->input, XML_ATTR_UPDATE_CLIENT);
         const char *origin = crm_element_value(data_set->input, XML_ATTR_UPDATE_ORIG);
 
         PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Cluster Summary");
         out->message(out, "cluster-times", last_written, user, client, origin);
     }
 
     if (show_counts) {
         PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Cluster Summary");
         out->message(out, "cluster-counts", g_list_length(data_set->nodes),
                      data_set->ninstances, data_set->disabled_resources,
                      data_set->blocked_resources);
     }
 
     if (show_options) {
         /* Kind of a hack - close the list we may have opened earlier in this
          * function so we can put all the options into their own list.  We
          * only want to do this on HTML output, though.
          */
         PCMK__OUTPUT_LIST_FOOTER(out, rc);
 
         out->begin_list(out, NULL, NULL, "Config Options");
         out->message(out, "cluster-options", data_set);
     }
 
     PCMK__OUTPUT_LIST_FOOTER(out, rc);
 
     if (out->message(out, "maint-mode", data_set->flags) == pcmk_rc_ok) {
         rc = pcmk_rc_ok;
     }
 
     return rc;
 }
 
 char *
 pe__node_display_name(pe_node_t *node, bool print_detail)
 {
     char *node_name;
     const char *node_host = NULL;
     const char *node_id = NULL;
     int name_len;
 
     CRM_ASSERT((node != NULL) && (node->details != NULL) && (node->details->uname != NULL));
 
     /* Host is displayed only if this is a guest node */
     if (pe__is_guest_node(node)) {
         pe_node_t *host_node = pe__current_node(node->details->remote_rsc);
 
         if (host_node && host_node->details) {
             node_host = host_node->details->uname;
         }
         if (node_host == NULL) {
             node_host = ""; /* so we at least get "uname@" to indicate guest */
         }
     }
 
     /* Node ID is displayed if different from uname and detail is requested */
     if (print_detail && !pcmk__str_eq(node->details->uname, node->details->id, pcmk__str_casei)) {
         node_id = node->details->id;
     }
 
     /* Determine name length */
     name_len = strlen(node->details->uname) + 1;
     if (node_host) {
         name_len += strlen(node_host) + 1; /* "@node_host" */
     }
     if (node_id) {
         name_len += strlen(node_id) + 3; /* + " (node_id)" */
     }
 
     /* Allocate and populate display name */
     node_name = malloc(name_len);
     CRM_ASSERT(node_name != NULL);
     strcpy(node_name, node->details->uname);
     if (node_host) {
         strcat(node_name, "@");
         strcat(node_name, node_host);
     }
     if (node_id) {
         strcat(node_name, " (");
         strcat(node_name, node_id);
         strcat(node_name, ")");
     }
     return node_name;
 }
 
 int
 pe__name_and_nvpairs_xml(pcmk__output_t *out, bool is_list, const char *tag_name
                          , size_t pairs_count, ...)
 {
     xmlNodePtr xml_node = NULL;
     va_list args;
 
     CRM_ASSERT(tag_name != NULL);
 
     xml_node = pcmk__output_xml_peek_parent(out);
     CRM_ASSERT(xml_node != NULL);
     xml_node = is_list
         ? create_xml_node(xml_node, tag_name)
         : xmlNewChild(xml_node, NULL, (pcmkXmlStr) tag_name, NULL);
 
     va_start(args, pairs_count);
     while(pairs_count--) {
         const char *param_name = va_arg(args, const char *);
         const char *param_value = va_arg(args, const char *);
         if (param_name && param_value) {
             crm_xml_add(xml_node, param_name, param_value);
         }
     };
     va_end(args);
 
     if (is_list) {
         pcmk__output_xml_push_parent(out, xml_node);
     }
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("ban", "pe_node_t *", "pe__location_t *", "gboolean")
-int
-pe__ban_html(pcmk__output_t *out, va_list args) {
+static int
+ban_html(pcmk__output_t *out, va_list args) {
     pe_node_t *pe_node = va_arg(args, pe_node_t *);
     pe__location_t *location = va_arg(args, pe__location_t *);
     gboolean print_clone_detail = va_arg(args, gboolean);
 
     char *node_name = pe__node_display_name(pe_node, print_clone_detail);
     char *buf = crm_strdup_printf("%s\tprevents %s from running %son %s",
                                   location->id, location->rsc_lh->id,
                                   location->role_filter == RSC_ROLE_MASTER ? "as Master " : "",
                                   node_name);
 
     pcmk__output_create_html_node(out, "li", NULL, NULL, buf);
 
     free(node_name);
     free(buf);
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("ban", "pe_node_t *", "pe__location_t *", "gboolean")
 int
 pe__ban_text(pcmk__output_t *out, va_list args) {
     pe_node_t *pe_node = va_arg(args, pe_node_t *);
     pe__location_t *location = va_arg(args, pe__location_t *);
     gboolean print_clone_detail = va_arg(args, gboolean);
 
     char *node_name = pe__node_display_name(pe_node, print_clone_detail);
     out->list_item(out, NULL, "%s\tprevents %s from running %son %s",
                    location->id, location->rsc_lh->id,
                    location->role_filter == RSC_ROLE_MASTER ? "as Master " : "",
                    node_name);
 
     free(node_name);
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("ban", "pe_node_t *", "pe__location_t *", "gboolean")
-int
-pe__ban_xml(pcmk__output_t *out, va_list args) {
+static int
+ban_xml(pcmk__output_t *out, va_list args) {
     pe_node_t *pe_node = va_arg(args, pe_node_t *);
     pe__location_t *location = va_arg(args, pe__location_t *);
     gboolean print_clone_detail G_GNUC_UNUSED = va_arg(args, gboolean);
 
     char *weight_s = crm_itoa(pe_node->weight);
 
     pcmk__output_create_xml_node(out, "ban",
                                  "id", location->id,
                                  "resource", location->rsc_lh->id,
                                  "node", pe_node->details->uname,
                                  "weight", weight_s,
                                  "master_only", pcmk__btoa(location->role_filter == RSC_ROLE_MASTER),
                                  NULL);
 
     free(weight_s);
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("cluster-counts", "unsigned int", "int", "int", "int")
-int
-pe__cluster_counts_html(pcmk__output_t *out, va_list args) {
-    xmlNodePtr nodes_node = pcmk__output_create_xml_node(out, "li", NULL);
-    xmlNodePtr resources_node = pcmk__output_create_xml_node(out, "li", NULL);
-
+static int
+cluster_counts_html(pcmk__output_t *out, va_list args) {
     unsigned int nnodes = va_arg(args, unsigned int);
     int nresources = va_arg(args, int);
     int ndisabled = va_arg(args, int);
     int nblocked = va_arg(args, int);
 
+    xmlNodePtr nodes_node = pcmk__output_create_xml_node(out, "li", NULL);
+    xmlNodePtr resources_node = pcmk__output_create_xml_node(out, "li", NULL);
+
     char *nnodes_str = crm_strdup_printf("%d node%s configured",
                                          nnodes, pcmk__plural_s(nnodes));
 
     pcmk_create_html_node(nodes_node, "span", NULL, NULL, nnodes_str);
     free(nnodes_str);
 
     if (ndisabled && nblocked) {
         char *s = crm_strdup_printf("%d resource instance%s configured (%d ",
                                     nresources, pcmk__plural_s(nresources),
                                     ndisabled);
         pcmk_create_html_node(resources_node, "span", NULL, NULL, s);
         free(s);
 
         pcmk_create_html_node(resources_node, "span", NULL, "bold", "DISABLED");
 
         s = crm_strdup_printf(", %d ", nblocked);
         pcmk_create_html_node(resources_node, "span", NULL, NULL, s);
         free(s);
 
         pcmk_create_html_node(resources_node, "span", NULL, "bold", "BLOCKED");
         pcmk_create_html_node(resources_node, "span", NULL, NULL,
                               " from further action due to failure)");
     } else if (ndisabled && !nblocked) {
         char *s = crm_strdup_printf("%d resource instance%s configured (%d ",
                                     nresources, pcmk__plural_s(nresources),
                                     ndisabled);
         pcmk_create_html_node(resources_node, "span", NULL, NULL, s);
         free(s);
 
         pcmk_create_html_node(resources_node, "span", NULL, "bold", "DISABLED");
         pcmk_create_html_node(resources_node, "span", NULL, NULL, ")");
     } else if (!ndisabled && nblocked) {
         char *s = crm_strdup_printf("%d resource instance%s configured (%d ",
                                     nresources, pcmk__plural_s(nresources),
                                     nblocked);
         pcmk_create_html_node(resources_node, "span", NULL, NULL, s);
         free(s);
 
         pcmk_create_html_node(resources_node, "span", NULL, "bold", "BLOCKED");
         pcmk_create_html_node(resources_node, "span", NULL, NULL,
                               " from further action due to failure)");
     } else {
         char *s = crm_strdup_printf("%d resource instance%s configured",
                                     nresources, pcmk__plural_s(nresources));
         pcmk_create_html_node(resources_node, "span", NULL, NULL, s);
         free(s);
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("cluster-counts", "unsigned int", "int", "int", "int")
 int
 pe__cluster_counts_text(pcmk__output_t *out, va_list args) {
     unsigned int nnodes = va_arg(args, unsigned int);
     int nresources = va_arg(args, int);
     int ndisabled = va_arg(args, int);
     int nblocked = va_arg(args, int);
 
     out->list_item(out, NULL, "%d node%s configured",
                    nnodes, pcmk__plural_s(nnodes));
 
     if (ndisabled && nblocked) {
         out->list_item(out, NULL, "%d resource instance%s configured "
                                   "(%d DISABLED, %d BLOCKED from "
                                   "further action due to failure)",
                        nresources, pcmk__plural_s(nresources), ndisabled,
                        nblocked);
     } else if (ndisabled && !nblocked) {
         out->list_item(out, NULL, "%d resource instance%s configured "
                                   "(%d DISABLED)",
                        nresources, pcmk__plural_s(nresources), ndisabled);
     } else if (!ndisabled && nblocked) {
         out->list_item(out, NULL, "%d resource instance%s configured "
                                   "(%d BLOCKED from further action "
                                   "due to failure)",
                        nresources, pcmk__plural_s(nresources), nblocked);
     } else {
         out->list_item(out, NULL, "%d resource instance%s configured",
                        nresources, pcmk__plural_s(nresources));
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("cluster-counts", "unsigned int", "int", "int", "int")
-int
-pe__cluster_counts_xml(pcmk__output_t *out, va_list args) {
-    xmlNodePtr nodes_node = pcmk__output_create_xml_node(out, "nodes_configured", NULL);
-    xmlNodePtr resources_node = pcmk__output_create_xml_node(out, "resources_configured", NULL);
-
+static int
+cluster_counts_xml(pcmk__output_t *out, va_list args) {
     unsigned int nnodes = va_arg(args, unsigned int);
     int nresources = va_arg(args, int);
     int ndisabled = va_arg(args, int);
     int nblocked = va_arg(args, int);
 
+    xmlNodePtr nodes_node = pcmk__output_create_xml_node(out, "nodes_configured", NULL);
+    xmlNodePtr resources_node = pcmk__output_create_xml_node(out, "resources_configured", NULL);
+
     char *s = crm_itoa(nnodes);
     crm_xml_add(nodes_node, "number", s);
     free(s);
 
     s = crm_itoa(nresources);
     crm_xml_add(resources_node, "number", s);
     free(s);
 
     s = crm_itoa(ndisabled);
     crm_xml_add(resources_node, "disabled", s);
     free(s);
 
     s = crm_itoa(nblocked);
     crm_xml_add(resources_node, "blocked", s);
     free(s);
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("cluster-dc", "pe_node_t *", "const char *", "const char *", "char *")
-int
-pe__cluster_dc_html(pcmk__output_t *out, va_list args) {
-    xmlNodePtr node = pcmk__output_create_xml_node(out, "li", NULL);
-
+static int
+cluster_dc_html(pcmk__output_t *out, va_list args) {
     pe_node_t *dc = va_arg(args, pe_node_t *);
     const char *quorum = va_arg(args, const char *);
     const char *dc_version_s = va_arg(args, const char *);
     char *dc_name = va_arg(args, char *);
 
+    xmlNodePtr node = pcmk__output_create_xml_node(out, "li", NULL);
+
     pcmk_create_html_node(node, "span", NULL, "bold", "Current DC: ");
 
     if (dc) {
         if (crm_is_true(quorum)) {
             char *buf = crm_strdup_printf("%s (version %s) - partition with quorum",
                                           dc_name, dc_version_s ? dc_version_s : "unknown");
             pcmk_create_html_node(node, "span", NULL, NULL, buf);
             free(buf);
         } else {
             char *buf = crm_strdup_printf("%s (version %s) - partition",
                                           dc_name, dc_version_s ? dc_version_s : "unknown");
             pcmk_create_html_node(node, "span", NULL, NULL, buf);
             free(buf);
 
             pcmk_create_html_node(node, "span", NULL, "warning", "WITHOUT");
             pcmk_create_html_node(node, "span", NULL, NULL, "quorum");
         }
     } else {
         pcmk_create_html_node(node ,"span", NULL, "warning", "NONE");
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("cluster-dc", "pe_node_t *", "const char *", "const char *", "char *")
 int
 pe__cluster_dc_text(pcmk__output_t *out, va_list args) {
     pe_node_t *dc = va_arg(args, pe_node_t *);
     const char *quorum = va_arg(args, const char *);
     const char *dc_version_s = va_arg(args, const char *);
     char *dc_name = va_arg(args, char *);
 
     if (dc) {
         out->list_item(out, "Current DC", "%s (version %s) - partition %s quorum",
                        dc_name, dc_version_s ? dc_version_s : "unknown",
                        crm_is_true(quorum) ? "with" : "WITHOUT");
     } else {
         out->list_item(out, "Current DC", "NONE");
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("cluster-dc", "pe_node_t *", "const char *", "const char *", "char *")
-int
-pe__cluster_dc_xml(pcmk__output_t *out, va_list args) {
+static int
+cluster_dc_xml(pcmk__output_t *out, va_list args) {
     pe_node_t *dc = va_arg(args, pe_node_t *);
     const char *quorum = va_arg(args, const char *);
     const char *dc_version_s = va_arg(args, const char *);
     char *dc_name G_GNUC_UNUSED = va_arg(args, char *);
 
     if (dc) {
         pcmk__output_create_xml_node(out, "current_dc",
                                      "present", "true",
                                      "version", dc_version_s ? dc_version_s : "",
                                      "name", dc->details->uname,
                                      "id", dc->details->id,
                                      "with_quorum", pcmk__btoa(crm_is_true(quorum)),
                                      NULL);
     } else {
         pcmk__output_create_xml_node(out, "current_dc",
                                      "present", "false",
                                      NULL);
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("maint-mode", "unsigned long long int")
 int
 pe__cluster_maint_mode_text(pcmk__output_t *out, va_list args) {
     unsigned long long flags = va_arg(args, unsigned long long);
 
     if (pcmk_is_set(flags, pe_flag_maintenance_mode)) {
-        fprintf(out->dest, "\n              *** Resource management is DISABLED ***");
-        fprintf(out->dest, "\n  The cluster will not attempt to start, stop or recover services");
-        fprintf(out->dest, "\n");
+        out->info(out, "\n              *** Resource management is DISABLED ***");
+        out->info(out, "  The cluster will not attempt to start, stop or recover services");
         return pcmk_rc_ok;
     } else if (pcmk_is_set(flags, pe_flag_stop_everything)) {
-        fprintf(out->dest, "\n    *** Resource management is DISABLED ***");
-        fprintf(out->dest, "\n  The cluster will keep all resources stopped");
-        fprintf(out->dest, "\n");
+        out->info(out, "\n    *** Resource management is DISABLED ***");
+        out->info(out, "  The cluster will keep all resources stopped");
         return pcmk_rc_ok;
     } else {
         return pcmk_rc_no_output;
     }
 }
 
 PCMK__OUTPUT_ARGS("cluster-options", "pe_working_set_t *")
-int
-pe__cluster_options_html(pcmk__output_t *out, va_list args) {
+static int
+cluster_options_html(pcmk__output_t *out, va_list args) {
     pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
 
     out->list_item(out, NULL, "STONITH of failed nodes %s",
                    pcmk_is_set(data_set->flags, pe_flag_stonith_enabled) ? "enabled" : "disabled");
 
     out->list_item(out, NULL, "Cluster is %s",
                    pcmk_is_set(data_set->flags, pe_flag_symmetric_cluster) ? "symmetric" : "asymmetric");
 
     switch (data_set->no_quorum_policy) {
         case no_quorum_freeze:
             out->list_item(out, NULL, "No quorum policy: Freeze resources");
             break;
 
         case no_quorum_stop:
             out->list_item(out, NULL, "No quorum policy: Stop ALL resources");
             break;
 
         case no_quorum_demote:
             out->list_item(out, NULL, "No quorum policy: Demote promotable "
                            "resources and stop all other resources");
             break;
 
         case no_quorum_ignore:
             out->list_item(out, NULL, "No quorum policy: Ignore");
             break;
 
         case no_quorum_suicide:
             out->list_item(out, NULL, "No quorum policy: Suicide");
             break;
     }
 
     if (pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)) {
         xmlNodePtr node = pcmk__output_create_xml_node(out, "li", NULL);
 
         pcmk_create_html_node(node, "span", NULL, NULL, "Resource management: ");
         pcmk_create_html_node(node, "span", NULL, "bold", "DISABLED");
         pcmk_create_html_node(node, "span", NULL, NULL,
                               " (the cluster will not attempt to start, stop, or recover services)");
     } else if (pcmk_is_set(data_set->flags, pe_flag_stop_everything)) {
         xmlNodePtr node = pcmk__output_create_xml_node(out, "li", NULL);
 
         pcmk_create_html_node(node, "span", NULL, NULL, "Resource management: ");
         pcmk_create_html_node(node, "span", NULL, "bold", "STOPPED");
         pcmk_create_html_node(node, "span", NULL, NULL,
                               " (the cluster will keep all resources stopped)");
     } else {
         out->list_item(out, NULL, "Resource management: enabled");
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("cluster-options", "pe_working_set_t *")
-int
-pe__cluster_options_log(pcmk__output_t *out, va_list args) {
+static int
+cluster_options_log(pcmk__output_t *out, va_list args) {
     pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
 
     if (pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)) {
         out->info(out, "Resource management is DISABLED.  The cluster will not attempt to start, stop or recover services.");
         return pcmk_rc_ok;
     } else if (pcmk_is_set(data_set->flags, pe_flag_stop_everything)) {
         out->info(out, "Resource management is DISABLED.  The cluster has stopped all resources.");
         return pcmk_rc_ok;
     } else {
         return pcmk_rc_no_output;
     }
 }
 
 PCMK__OUTPUT_ARGS("cluster-options", "pe_working_set_t *")
 int
 pe__cluster_options_text(pcmk__output_t *out, va_list args) {
     pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
 
     out->list_item(out, NULL, "STONITH of failed nodes %s",
                    pcmk_is_set(data_set->flags, pe_flag_stonith_enabled) ? "enabled" : "disabled");
 
     out->list_item(out, NULL, "Cluster is %s",
                    pcmk_is_set(data_set->flags, pe_flag_symmetric_cluster) ? "symmetric" : "asymmetric");
 
     switch (data_set->no_quorum_policy) {
         case no_quorum_freeze:
             out->list_item(out, NULL, "No quorum policy: Freeze resources");
             break;
 
         case no_quorum_stop:
             out->list_item(out, NULL, "No quorum policy: Stop ALL resources");
             break;
 
         case no_quorum_demote:
             out->list_item(out, NULL, "No quorum policy: Demote promotable "
                            "resources and stop all other resources");
             break;
 
         case no_quorum_ignore:
             out->list_item(out, NULL, "No quorum policy: Ignore");
             break;
 
         case no_quorum_suicide:
             out->list_item(out, NULL, "No quorum policy: Suicide");
             break;
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("cluster-options", "pe_working_set_t *")
-int
-pe__cluster_options_xml(pcmk__output_t *out, va_list args) {
+static int
+cluster_options_xml(pcmk__output_t *out, va_list args) {
     pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+
     const char *no_quorum_policy = NULL;
 
     switch (data_set->no_quorum_policy) {
         case no_quorum_freeze:
             no_quorum_policy = "freeze";
             break;
 
         case no_quorum_stop:
             no_quorum_policy = "stop";
             break;
 
         case no_quorum_demote:
             no_quorum_policy = "demote";
             break;
 
         case no_quorum_ignore:
             no_quorum_policy = "ignore";
             break;
 
         case no_quorum_suicide:
             no_quorum_policy = "suicide";
             break;
     }
 
     pcmk__output_create_xml_node(out, "cluster_options",
                                  "stonith-enabled", pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)),
                                  "symmetric-cluster", pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_symmetric_cluster)),
                                  "no-quorum-policy", no_quorum_policy,
                                  "maintenance-mode", pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)),
                                  "stop-all-resources", pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_stop_everything)),
                                  NULL);
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("cluster-stack", "const char *")
-int
-pe__cluster_stack_html(pcmk__output_t *out, va_list args) {
-    xmlNodePtr node = pcmk__output_create_xml_node(out, "li", NULL);
+static int
+cluster_stack_html(pcmk__output_t *out, va_list args) {
     const char *stack_s = va_arg(args, const char *);
 
+    xmlNodePtr node = pcmk__output_create_xml_node(out, "li", NULL);
+
     pcmk_create_html_node(node, "span", NULL, "bold", "Stack: ");
     pcmk_create_html_node(node, "span", NULL, NULL, stack_s);
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("cluster-stack", "const char *")
 int
 pe__cluster_stack_text(pcmk__output_t *out, va_list args) {
     const char *stack_s = va_arg(args, const char *);
+
     out->list_item(out, "Stack", "%s", stack_s);
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("cluster-stack", "const char *")
-int
-pe__cluster_stack_xml(pcmk__output_t *out, va_list args) {
+static int
+cluster_stack_xml(pcmk__output_t *out, va_list args) {
     const char *stack_s = va_arg(args, const char *);
 
     pcmk__output_create_xml_node(out, "stack",
                                  "type", stack_s,
                                  NULL);
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("cluster-times", "const char *", "const char *", "const char *", "const char *")
-int
-pe__cluster_times_html(pcmk__output_t *out, va_list args) {
-    xmlNodePtr updated_node = pcmk__output_create_xml_node(out, "li", NULL);
-    xmlNodePtr changed_node = pcmk__output_create_xml_node(out, "li", NULL);
-
+static int
+cluster_times_html(pcmk__output_t *out, va_list args) {
     const char *last_written = va_arg(args, const char *);
     const char *user = va_arg(args, const char *);
     const char *client = va_arg(args, const char *);
     const char *origin = va_arg(args, const char *);
 
+    xmlNodePtr updated_node = pcmk__output_create_xml_node(out, "li", NULL);
+    xmlNodePtr changed_node = pcmk__output_create_xml_node(out, "li", NULL);
+
     char *buf = last_changed_string(last_written, user, client, origin);
 
     pcmk_create_html_node(updated_node, "span", NULL, "bold", "Last updated: ");
     pcmk_create_html_node(updated_node, "span", NULL, NULL,
                           pcmk__epoch2str(NULL));
 
     pcmk_create_html_node(changed_node, "span", NULL, "bold", "Last change: ");
     pcmk_create_html_node(changed_node, "span", NULL, NULL, buf);
 
     free(buf);
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("cluster-times", "const char *", "const char *", "const char *", "const char *")
-int
-pe__cluster_times_xml(pcmk__output_t *out, va_list args) {
+static int
+cluster_times_xml(pcmk__output_t *out, va_list args) {
     const char *last_written = va_arg(args, const char *);
     const char *user = va_arg(args, const char *);
     const char *client = va_arg(args, const char *);
     const char *origin = va_arg(args, const char *);
 
     pcmk__output_create_xml_node(out, "last_update",
                                  "time", pcmk__epoch2str(NULL),
                                  NULL);
     pcmk__output_create_xml_node(out, "last_change",
                                  "time", last_written ? last_written : "",
                                  "user", user ? user : "",
                                  "client", client ? client : "",
                                  "origin", origin ? origin : "",
                                  NULL);
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("cluster-times", "const char *", "const char *", "const char *", "const char *")
 int
 pe__cluster_times_text(pcmk__output_t *out, va_list args) {
     const char *last_written = va_arg(args, const char *);
     const char *user = va_arg(args, const char *);
     const char *client = va_arg(args, const char *);
     const char *origin = va_arg(args, const char *);
 
     char *buf = last_changed_string(last_written, user, client, origin);
 
     out->list_item(out, "Last updated", "%s", pcmk__epoch2str(NULL));
     out->list_item(out, "Last change", " %s", buf);
 
     free(buf);
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("failed-action", "xmlNodePtr")
 int
 pe__failed_action_text(pcmk__output_t *out, va_list args) {
     xmlNodePtr xml_op = va_arg(args, xmlNodePtr);
+
     char *s = failed_action_string(xml_op);
 
     out->list_item(out, NULL, "%s", s);
     free(s);
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("failed-action", "xmlNodePtr")
-int
-pe__failed_action_xml(pcmk__output_t *out, va_list args) {
+static int
+failed_action_xml(pcmk__output_t *out, va_list args) {
     xmlNodePtr xml_op = va_arg(args, xmlNodePtr);
 
     const char *op_key = crm_element_value(xml_op, XML_LRM_ATTR_TASK_KEY);
     const char *last = crm_element_value(xml_op, XML_RSC_OP_LAST_CHANGE);
     int rc = crm_parse_int(crm_element_value(xml_op, XML_LRM_ATTR_RC), "0");
     int status = crm_parse_int(crm_element_value(xml_op, XML_LRM_ATTR_OPSTATUS), "0");
     const char *exit_reason = crm_element_value(xml_op, XML_LRM_ATTR_EXIT_REASON);
 
     char *rc_s = crm_itoa(rc);
     char *reason_s = crm_xml_escape(exit_reason ? exit_reason : "none");
     xmlNodePtr node = pcmk__output_create_xml_node(out, "failure",
                                                    op_key ? "op_key" : "id", op_key ? op_key : ID(xml_op),
                                                    "node", crm_element_value(xml_op, XML_ATTR_UNAME),
                                                    "exitstatus", services_ocf_exitcode_str(rc),
                                                    "exitreason", reason_s,
                                                    "exitcode", rc_s,
                                                    "call", crm_element_value(xml_op, XML_LRM_ATTR_CALLID),
                                                    "status", services_lrm_status_str(status),
                                                    NULL);
 
     if (last) {
         guint interval_ms = 0;
         char *s = NULL;
         time_t when = crm_parse_int(last, "0");
         crm_time_t *crm_when = crm_time_new(NULL);
         char *rc_change = NULL;
 
         crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms);
         s = crm_itoa(interval_ms);
 
         crm_time_set_timet(crm_when, &when);
         rc_change = crm_time_as_string(crm_when, crm_time_log_date | crm_time_log_timeofday | crm_time_log_with_timezone);
 
         pcmk__xe_set_props(node, XML_RSC_OP_LAST_CHANGE, rc_change,
                            "queued", crm_element_value(xml_op, XML_RSC_OP_T_QUEUE),
                            "exec", crm_element_value(xml_op, XML_RSC_OP_T_EXEC),
                            "interval", s,
                            "task", crm_element_value(xml_op, XML_LRM_ATTR_TASK),
                            NULL);
 
         free(s);
         free(rc_change);
         crm_time_free(crm_when);
     }
 
     free(reason_s);
     free(rc_s);
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("node", "pe_node_t *", "unsigned int", "gboolean", "const char *",
-                  "gboolean", "gboolean", "gboolean", "GListPtr", "GListPtr")
+                  "gboolean", "gboolean", "gboolean", "GList *", "GList *")
 int
 pe__node_html(pcmk__output_t *out, va_list args) {
     pe_node_t *node = va_arg(args, pe_node_t *);
     unsigned int print_opts = va_arg(args, unsigned int);
     gboolean full = va_arg(args, gboolean);
     const char *node_mode G_GNUC_UNUSED = va_arg(args, const char *);
     gboolean print_clone_detail = va_arg(args, gboolean);
     gboolean print_brief = va_arg(args, gboolean);
     gboolean group_by_node = va_arg(args, gboolean);
-    GListPtr only_node = va_arg(args, GListPtr);
-    GListPtr only_rsc = va_arg(args, GListPtr);
+    GList *only_node = va_arg(args, GList *);
+    GList *only_rsc = va_arg(args, GList *);
 
     char *node_name = pe__node_display_name(node, print_clone_detail);
     char *buf = crm_strdup_printf("Node: %s", node_name);
 
     if (full) {
         xmlNodePtr item_node = pcmk__output_create_xml_node(out, "li", NULL);
 
         pcmk_create_html_node(item_node, "span", NULL, NULL, buf);
 
         if (node->details->standby_onfail && node->details->online) {
             pcmk_create_html_node(item_node, "span", NULL, "standby", " standby (on-fail)");
         } else if (node->details->standby && node->details->online) {
             char *s = crm_strdup_printf(" standby%s", node->details->running_rsc ? " (with active resources)" : "");
             pcmk_create_html_node(item_node, "span", NULL, " standby", s);
             free(s);
         } else if (node->details->standby) {
             pcmk_create_html_node(item_node, "span", NULL, "offline", " OFFLINE (standby)");
         } else if (node->details->maintenance && node->details->online) {
             pcmk_create_html_node(item_node, "span", NULL, "maint", " maintenance");
         } else if (node->details->maintenance) {
             pcmk_create_html_node(item_node, "span", NULL, "offline", " OFFLINE (maintenance)");
         } else if (node->details->online) {
             pcmk_create_html_node(item_node, "span", NULL, "online", " online");
         } else {
             pcmk_create_html_node(item_node, "span", NULL, "offline", " OFFLINE");
         }
         if (print_brief && group_by_node) {
-            GListPtr rscs = pe__filter_rsc_list(node->details->running_rsc, only_rsc);
+            GList *rscs = pe__filter_rsc_list(node->details->running_rsc, only_rsc);
 
             if (rscs != NULL) {
                 out->begin_list(out, NULL, NULL, NULL);
                 pe__rscs_brief_output(out, rscs, print_opts | pe_print_rsconly, FALSE);
                 out->end_list(out);
             }
 
         } else if (group_by_node) {
-            GListPtr lpc2 = NULL;
+            GList *lpc2 = NULL;
 
             out->begin_list(out, NULL, NULL, NULL);
             for (lpc2 = node->details->running_rsc; lpc2 != NULL; lpc2 = lpc2->next) {
                 pe_resource_t *rsc = (pe_resource_t *) lpc2->data;
                 out->message(out, crm_map_element_name(rsc->xml), print_opts | pe_print_rsconly,
                              rsc, only_node, only_rsc);
             }
             out->end_list(out);
         }
     } else {
         out->begin_list(out, NULL, NULL, "%s", buf);
     }
 
     free(buf);
     free(node_name);
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("node", "pe_node_t *", "unsigned int", "gboolean", "const char *",
-                  "gboolean", "gboolean", "gboolean", "GListPtr", "GListPtr")
+                  "gboolean", "gboolean", "gboolean", "GList *", "GList *")
 int
 pe__node_text(pcmk__output_t *out, va_list args) {
     pe_node_t *node = va_arg(args, pe_node_t *);
     unsigned int print_opts = va_arg(args, unsigned int);
     gboolean full = va_arg(args, gboolean);
     const char *node_mode = va_arg(args, const char *);
     gboolean print_clone_detail = va_arg(args, gboolean);
     gboolean print_brief = va_arg(args, gboolean);
     gboolean group_by_node = va_arg(args, gboolean);
-    GListPtr only_node = va_arg(args, GListPtr);
-    GListPtr only_rsc = va_arg(args, GListPtr);
+    GList *only_node = va_arg(args, GList *);
+    GList *only_rsc = va_arg(args, GList *);
 
     if (full) {
         char *node_name = pe__node_display_name(node, print_clone_detail);
         char *buf = NULL;
 
         /* Print the node name and status */
         if (pe__is_guest_node(node)) {
             buf = crm_strdup_printf("GuestNode %s: %s", node_name, node_mode);
         } else if (pe__is_remote_node(node)) {
             buf = crm_strdup_printf("RemoteNode %s: %s", node_name, node_mode);
         } else {
             buf = crm_strdup_printf("Node %s: %s", node_name, node_mode);
         }
 
         /* If we're grouping by node, print its resources */
         if (group_by_node) {
             if (print_brief) {
-                GListPtr rscs = pe__filter_rsc_list(node->details->running_rsc, only_rsc);
+                GList *rscs = pe__filter_rsc_list(node->details->running_rsc, only_rsc);
 
                 if (rscs != NULL) {
                     out->begin_list(out, NULL, NULL, "%s", buf);
                     out->begin_list(out, NULL, NULL, "Resources");
 
                     pe__rscs_brief_output(out, rscs, print_opts | pe_print_rsconly, FALSE);
 
                     out->end_list(out);
                     out->end_list(out);
                 }
 
             } else {
-                GListPtr gIter2 = NULL;
+                GList *gIter2 = NULL;
 
                 out->begin_list(out, NULL, NULL, "%s", buf);
                 out->begin_list(out, NULL, NULL, "Resources");
 
                 for (gIter2 = node->details->running_rsc; gIter2 != NULL; gIter2 = gIter2->next) {
                     pe_resource_t *rsc = (pe_resource_t *) gIter2->data;
                     out->message(out, crm_map_element_name(rsc->xml), print_opts | pe_print_rsconly,
                                  rsc, only_node, only_rsc);
                 }
 
                 out->end_list(out);
                 out->end_list(out);
             }
         } else {
             out->list_item(out, NULL, "%s", buf);
         }
 
         free(buf);
         free(node_name);
     } else {
         out->begin_list(out, NULL, NULL, "Node: %s", pe__node_display_name(node, print_clone_detail));
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("node", "pe_node_t *", "unsigned int", "gboolean", "const char *",
-                  "gboolean", "gboolean", "gboolean", "GListPtr", "GListPtr")
+                  "gboolean", "gboolean", "gboolean", "GList *", "GList *")
 int
 pe__node_xml(pcmk__output_t *out, va_list args) {
     pe_node_t *node = va_arg(args, pe_node_t *);
     unsigned int print_opts = va_arg(args, unsigned int);
     gboolean full = va_arg(args, gboolean);
     const char *node_mode G_GNUC_UNUSED = va_arg(args, const char *);
     gboolean print_clone_detail G_GNUC_UNUSED = va_arg(args, gboolean);
     gboolean print_brief G_GNUC_UNUSED = va_arg(args, gboolean);
     gboolean group_by_node = va_arg(args, gboolean);
-    GListPtr only_node = va_arg(args, GListPtr);
-    GListPtr only_rsc = va_arg(args, GListPtr);
+    GList *only_node = va_arg(args, GList *);
+    GList *only_rsc = va_arg(args, GList *);
 
     if (full) {
         const char *node_type = "unknown";
         char *length_s = crm_itoa(g_list_length(node->details->running_rsc));
 
         switch (node->details->type) {
             case node_member:
                 node_type = "member";
                 break;
             case node_remote:
                 node_type = "remote";
                 break;
             case node_ping:
                 node_type = "ping";
                 break;
         }
         pe__name_and_nvpairs_xml(out, true, "node", 13,
                                  "name", node->details->uname,
                                  "id", node->details->id,
                                  "online", pcmk__btoa(node->details->online),
                                  "standby", pcmk__btoa(node->details->standby),
                                  "standby_onfail", pcmk__btoa(node->details->standby_onfail),
                                  "maintenance", pcmk__btoa(node->details->maintenance),
                                  "pending", pcmk__btoa(node->details->pending),
                                  "unclean", pcmk__btoa(node->details->unclean),
                                  "shutdown", pcmk__btoa(node->details->shutdown),
                                  "expected_up", pcmk__btoa(node->details->expected_up),
                                  "is_dc", pcmk__btoa(node->details->is_dc),
                                  "resources_running", length_s,
                                  "type", node_type);
 
         if (pe__is_guest_node(node)) {
             xmlNodePtr xml_node = pcmk__output_xml_peek_parent(out);
             crm_xml_add(xml_node, "id_as_resource", node->details->remote_rsc->container->id);
         }
 
         if (group_by_node) {
-            GListPtr lpc = NULL;
+            GList *lpc = NULL;
 
             for (lpc = node->details->running_rsc; lpc != NULL; lpc = lpc->next) {
                 pe_resource_t *rsc = (pe_resource_t *) lpc->data;
                 out->message(out, crm_map_element_name(rsc->xml), print_opts | pe_print_rsconly,
                              rsc, only_node, only_rsc);
             }
         }
 
         free(length_s);
 
         out->end_list(out);
     } else {
         pcmk__output_xml_create_parent(out, "node",
                                        "name", node->details->uname,
                                        NULL);
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("node-attribute", "const char *", "const char *", "gboolean", "int")
 int
 pe__node_attribute_text(pcmk__output_t *out, va_list args) {
     const char *name = va_arg(args, const char *);
     const char *value = va_arg(args, const char *);
     gboolean add_extra = va_arg(args, gboolean);
     int expected_score = va_arg(args, int);
 
-
     if (add_extra) {
         int v = crm_parse_int(value, "0");
 
         if (v <= 0) {
             out->list_item(out, NULL, "%-32s\t: %-10s\t: Connectivity is lost", name, value);
         } else if (v < expected_score) {
             out->list_item(out, NULL, "%-32s\t: %-10s\t: Connectivity is degraded (Expected=%d)", name, value, expected_score);
         } else {
             out->list_item(out, NULL, "%-32s\t: %-10s", name, value);
         }
     } else {
         out->list_item(out, NULL, "%-32s\t: %-10s", name, value);
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("node-attribute", "const char *", "const char *", "gboolean", "int")
-int
-pe__node_attribute_html(pcmk__output_t *out, va_list args) {
+static int
+node_attribute_html(pcmk__output_t *out, va_list args) {
     const char *name = va_arg(args, const char *);
     const char *value = va_arg(args, const char *);
     gboolean add_extra = va_arg(args, gboolean);
     int expected_score = va_arg(args, int);
 
     if (add_extra) {
         int v = crm_parse_int(value, "0");
         char *s = crm_strdup_printf("%s: %s", name, value);
         xmlNodePtr item_node = pcmk__output_create_xml_node(out, "li", NULL);
 
         pcmk_create_html_node(item_node, "span", NULL, NULL, s);
         free(s);
 
         if (v <= 0) {
             pcmk_create_html_node(item_node, "span", NULL, "bold", "(connectivity is lost)");
         } else if (v < expected_score) {
             char *buf = crm_strdup_printf("(connectivity is degraded -- expected %d", expected_score);
             pcmk_create_html_node(item_node, "span", NULL, "bold", buf);
             free(buf);
         }
     } else {
         out->list_item(out, NULL, "%s: %s", name, value);
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("node-and-op", "pe_working_set_t *", "xmlNodePtr")
-int
-pe__node_and_op(pcmk__output_t *out, va_list args) {
+static int
+node_and_op(pcmk__output_t *out, va_list args) {
     pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
     xmlNodePtr xml_op = va_arg(args, xmlNodePtr);
 
     pe_resource_t *rsc = NULL;
     gchar *node_str = NULL;
     char *last_change_str = NULL;
 
     const char *op_rsc = crm_element_value(xml_op, "resource");
     const char *status_s = crm_element_value(xml_op, XML_LRM_ATTR_OPSTATUS);
     const char *op_key = crm_element_value(xml_op, XML_LRM_ATTR_TASK_KEY);
     int status = crm_parse_int(status_s, "0");
     time_t last_change = 0;
 
     rsc = pe_find_resource(data_set->resources, op_rsc);
 
     if (rsc) {
         pe_node_t *node = pe__current_node(rsc);
         const char *target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
         int opts = pe_print_rsconly | pe_print_pending;
 
         if (node == NULL) {
             node = rsc->pending_node;
         }
 
         node_str = pcmk__native_output_string(rsc, rsc_printable_id(rsc), node,
                                               opts, target_role, false);
     } else {
         node_str = crm_strdup_printf("Unknown resource %s", op_rsc);
     }
 
     if (crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE,
                                 &last_change) == pcmk_ok) {
         last_change_str = crm_strdup_printf(", %s=%s, exec=%sms",
                                             XML_RSC_OP_LAST_CHANGE,
                                             crm_strip_trailing_newline(ctime(&last_change)),
                                             crm_element_value(xml_op, XML_RSC_OP_T_EXEC));
     }
 
     out->list_item(out, NULL, "%s: %s (node=%s, call=%s, rc=%s%s): %s",
                    node_str, op_key ? op_key : ID(xml_op),
                    crm_element_value(xml_op, XML_ATTR_UNAME),
                    crm_element_value(xml_op, XML_LRM_ATTR_CALLID),
                    crm_element_value(xml_op, XML_LRM_ATTR_RC),
                    last_change_str ? last_change_str : "",
                    services_lrm_status_str(status));
 
     g_free(node_str);
     free(last_change_str);
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("node-and-op", "pe_working_set_t *", "xmlNodePtr")
-int
-pe__node_and_op_xml(pcmk__output_t *out, va_list args) {
+static int
+node_and_op_xml(pcmk__output_t *out, va_list args) {
     pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
     xmlNodePtr xml_op = va_arg(args, xmlNodePtr);
 
     pe_resource_t *rsc = NULL;
     const char *op_rsc = crm_element_value(xml_op, "resource");
     const char *status_s = crm_element_value(xml_op, XML_LRM_ATTR_OPSTATUS);
     const char *op_key = crm_element_value(xml_op, XML_LRM_ATTR_TASK_KEY);
     int status = crm_parse_int(status_s, "0");
     time_t last_change = 0;
 
     xmlNode *node = pcmk__output_create_xml_node(out, "operation",
                                                  "op", op_key ? op_key : ID(xml_op),
                                                  "node", crm_element_value(xml_op, XML_ATTR_UNAME),
                                                  "call", crm_element_value(xml_op, XML_LRM_ATTR_CALLID),
                                                  "rc", crm_element_value(xml_op, XML_LRM_ATTR_RC),
                                                  "status", services_lrm_status_str(status),
                                                  NULL);
 
     rsc = pe_find_resource(data_set->resources, op_rsc);
 
     if (rsc) {
         const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
         const char *kind = crm_element_value(rsc->xml, XML_ATTR_TYPE);
         char *agent_tuple = NULL;
 
         agent_tuple = crm_strdup_printf("%s:%s:%s", class,
                                         pcmk_is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_provider) ? crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER) : "",
                                         kind);
 
         pcmk__xe_set_props(node, "rsc", rsc_printable_id(rsc),
                            "agent", agent_tuple,
                            NULL);
         free(agent_tuple);
     }
 
     if (crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE,
                                 &last_change) == pcmk_ok) {
         pcmk__xe_set_props(node, XML_RSC_OP_LAST_CHANGE, crm_strip_trailing_newline(ctime(&last_change)),
                            XML_RSC_OP_T_EXEC, crm_element_value(xml_op, XML_RSC_OP_T_EXEC),
                            NULL);
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("node-attribute", "const char *", "const char *", "gboolean", "int")
-int
-pe__node_attribute_xml(pcmk__output_t *out, va_list args) {
+static int
+node_attribute_xml(pcmk__output_t *out, va_list args) {
     const char *name = va_arg(args, const char *);
     const char *value = va_arg(args, const char *);
     gboolean add_extra = va_arg(args, gboolean);
     int expected_score = va_arg(args, int);
 
     xmlNodePtr node = pcmk__output_create_xml_node(out, "attribute",
                                                    "name", name,
                                                    "value", value,
                                                    NULL);
 
     if (add_extra) {
         char *buf = crm_itoa(expected_score);
         crm_xml_add(node, "expected", buf);
         free(buf);
     }
 
     return pcmk_rc_ok;
 }
 
-PCMK__OUTPUT_ARGS("node-list", "GListPtr", "GListPtr", "GListPtr", "unsigned int", "gboolean", "gboolean", "gboolean")
-int
-pe__node_list_html(pcmk__output_t *out, va_list args) {
-    GListPtr nodes = va_arg(args, GListPtr);
-    GListPtr only_node = va_arg(args, GListPtr);
-    GListPtr only_rsc = va_arg(args, GListPtr);
+PCMK__OUTPUT_ARGS("node-list", "GList *", "GList *", "GList *", "unsigned int", "gboolean", "gboolean", "gboolean")
+static int
+node_list_html(pcmk__output_t *out, va_list args) {
+    GList *nodes = va_arg(args, GList *);
+    GList *only_node = va_arg(args, GList *);
+    GList *only_rsc = va_arg(args, GList *);
     unsigned int print_opts = va_arg(args, unsigned int);
     gboolean print_clone_detail = va_arg(args, gboolean);
     gboolean print_brief = va_arg(args, gboolean);
     gboolean group_by_node = va_arg(args, gboolean);
 
     int rc = pcmk_rc_no_output;
 
-    for (GListPtr gIter = nodes; gIter != NULL; gIter = gIter->next) {
+    for (GList *gIter = nodes; gIter != NULL; gIter = gIter->next) {
         pe_node_t *node = (pe_node_t *) gIter->data;
 
         if (!pcmk__str_in_list(only_node, node->details->uname)) {
             continue;
         }
 
         PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Node List");
 
         out->message(out, "node", node, print_opts, TRUE, NULL, print_clone_detail,
                      print_brief, group_by_node, only_node, only_rsc);
     }
 
     PCMK__OUTPUT_LIST_FOOTER(out, rc);
     return rc;
 }
 
-PCMK__OUTPUT_ARGS("node-list", "GListPtr", "GListPtr", "GListPtr", "unsigned int", "gboolean", "gboolean", "gboolean")
+PCMK__OUTPUT_ARGS("node-list", "GList *", "GList *", "GList *", "unsigned int", "gboolean", "gboolean", "gboolean")
 int
 pe__node_list_text(pcmk__output_t *out, va_list args) {
-    GListPtr nodes = va_arg(args, GListPtr);
-    GListPtr only_node = va_arg(args, GListPtr);
-    GListPtr only_rsc = va_arg(args, GListPtr);
+    GList *nodes = va_arg(args, GList *);
+    GList *only_node = va_arg(args, GList *);
+    GList *only_rsc = va_arg(args, GList *);
     unsigned int print_opts = va_arg(args, unsigned int);
     gboolean print_clone_detail = va_arg(args, gboolean);
     gboolean print_brief = va_arg(args, gboolean);
     gboolean group_by_node = va_arg(args, gboolean);
 
     /* space-separated lists of node names */
     char *online_nodes = NULL;
     char *online_remote_nodes = NULL;
     char *online_guest_nodes = NULL;
     char *offline_nodes = NULL;
     char *offline_remote_nodes = NULL;
 
     size_t online_nodes_len = 0;
     size_t online_remote_nodes_len = 0;
     size_t online_guest_nodes_len = 0;
     size_t offline_nodes_len = 0;
     size_t offline_remote_nodes_len = 0;
 
     int rc = pcmk_rc_no_output;
 
-    for (GListPtr gIter = nodes; gIter != NULL; gIter = gIter->next) {
+    for (GList *gIter = nodes; gIter != NULL; gIter = gIter->next) {
         pe_node_t *node = (pe_node_t *) gIter->data;
         const char *node_mode = NULL;
         char *node_name = pe__node_display_name(node, print_clone_detail);
 
         if (!pcmk__str_in_list(only_node, node->details->uname)) {
             free(node_name);
             continue;
         }
 
         PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Node List");
 
         /* Get node mode */
         if (node->details->unclean) {
             if (node->details->online) {
                 node_mode = "UNCLEAN (online)";
 
             } else if (node->details->pending) {
                 node_mode = "UNCLEAN (pending)";
 
             } else {
                 node_mode = "UNCLEAN (offline)";
             }
 
         } else if (node->details->pending) {
             node_mode = "pending";
 
         } else if (node->details->standby_onfail && node->details->online) {
             node_mode = "standby (on-fail)";
 
         } else if (node->details->standby) {
             if (node->details->online) {
                 if (node->details->running_rsc) {
                     node_mode = "standby (with active resources)";
                 } else {
                     node_mode = "standby";
                 }
             } else {
                 node_mode = "OFFLINE (standby)";
             }
 
         } else if (node->details->maintenance) {
             if (node->details->online) {
                 node_mode = "maintenance";
             } else {
                 node_mode = "OFFLINE (maintenance)";
             }
 
         } else if (node->details->online) {
             node_mode = "online";
             if (group_by_node == FALSE) {
                 if (pe__is_guest_node(node)) {
                     pcmk__add_word(&online_guest_nodes,
                                    &online_guest_nodes_len, node_name);
                 } else if (pe__is_remote_node(node)) {
                     pcmk__add_word(&online_remote_nodes,
                                    &online_remote_nodes_len, node_name);
                 } else {
                     pcmk__add_word(&online_nodes, &online_nodes_len, node_name);
                 }
                 free(node_name);
                 continue;
             }
 
         } else {
             node_mode = "OFFLINE";
             if (group_by_node == FALSE) {
                 if (pe__is_remote_node(node)) {
                     pcmk__add_word(&offline_remote_nodes,
                                    &offline_remote_nodes_len, node_name);
                 } else if (pe__is_guest_node(node)) {
                     /* ignore offline guest nodes */
                 } else {
                     pcmk__add_word(&offline_nodes,
                                    &offline_nodes_len, node_name);
                 }
                 free(node_name);
                 continue;
             }
         }
 
         /* If we get here, node is in bad state, or we're grouping by node */
         out->message(out, "node", node, print_opts, TRUE, node_mode, print_clone_detail,
                      print_brief, group_by_node, only_node, only_rsc);
         free(node_name);
     }
 
     /* If we're not grouping by node, summarize nodes by status */
     if (online_nodes) {
         out->list_item(out, "Online", "[ %s ]", online_nodes);
         free(online_nodes);
     }
     if (offline_nodes) {
         out->list_item(out, "OFFLINE", "[ %s ]", offline_nodes);
         free(offline_nodes);
     }
     if (online_remote_nodes) {
         out->list_item(out, "RemoteOnline", "[ %s ]", online_remote_nodes);
         free(online_remote_nodes);
     }
     if (offline_remote_nodes) {
         out->list_item(out, "RemoteOFFLINE", "[ %s ]", offline_remote_nodes);
         free(offline_remote_nodes);
     }
     if (online_guest_nodes) {
         out->list_item(out, "GuestOnline", "[ %s ]", online_guest_nodes);
         free(online_guest_nodes);
     }
 
     PCMK__OUTPUT_LIST_FOOTER(out, rc);
     return rc;
 }
 
-PCMK__OUTPUT_ARGS("node-list", "GListPtr", "GListPtr", "GListPtr", "unsigned int", "gboolean", "gboolean", "gboolean")
-int
-pe__node_list_xml(pcmk__output_t *out, va_list args) {
-    GListPtr nodes = va_arg(args, GListPtr);
-    GListPtr only_node = va_arg(args, GListPtr);
-    GListPtr only_rsc = va_arg(args, GListPtr);
+PCMK__OUTPUT_ARGS("node-list", "GList *", "GList *", "GList *", "unsigned int", "gboolean", "gboolean", "gboolean")
+static int
+node_list_xml(pcmk__output_t *out, va_list args) {
+    GList *nodes = va_arg(args, GList *);
+    GList *only_node = va_arg(args, GList *);
+    GList *only_rsc = va_arg(args, GList *);
     unsigned int print_opts = va_arg(args, unsigned int);
     gboolean print_clone_detail = va_arg(args, gboolean);
     gboolean print_brief = va_arg(args, gboolean);
     gboolean group_by_node = va_arg(args, gboolean);
 
     out->begin_list(out, NULL, NULL, "nodes");
-    for (GListPtr gIter = nodes; gIter != NULL; gIter = gIter->next) {
+    for (GList *gIter = nodes; gIter != NULL; gIter = gIter->next) {
         pe_node_t *node = (pe_node_t *) gIter->data;
 
         if (!pcmk__str_in_list(only_node, node->details->uname)) {
             continue;
         }
 
         out->message(out, "node", node, print_opts, TRUE, NULL, print_clone_detail,
                      print_brief, group_by_node, only_node, only_rsc);
     }
     out->end_list(out);
 
     return pcmk_rc_ok;
 }
 
-PCMK__OUTPUT_ARGS("op-history", "struct xmlNode *", "const char *", "const char *", "int", "gboolean")
+PCMK__OUTPUT_ARGS("op-history", "xmlNodePtr", "const char *", "const char *", "int", "gboolean")
 int
 pe__op_history_text(pcmk__output_t *out, va_list args) {
-    xmlNode *xml_op = va_arg(args, xmlNode *);
+    xmlNodePtr xml_op = va_arg(args, xmlNodePtr);
     const char *task = va_arg(args, const char *);
     const char *interval_ms_s = va_arg(args, const char *);
     int rc = va_arg(args, int);
     gboolean print_timing = va_arg(args, gboolean);
 
     char *buf = op_history_string(xml_op, task, interval_ms_s, rc, print_timing);
 
     out->list_item(out, NULL, "%s", buf);
 
     free(buf);
     return pcmk_rc_ok;
 }
 
-PCMK__OUTPUT_ARGS("op-history", "struct xmlNode *", "const char *", "const char *", "int", "gboolean")
-int
-pe__op_history_xml(pcmk__output_t *out, va_list args) {
-    xmlNode *xml_op = va_arg(args, xmlNode *);
+PCMK__OUTPUT_ARGS("op-history", "xmlNodePtr", "const char *", "const char *", "int", "gboolean")
+static int
+op_history_xml(pcmk__output_t *out, va_list args) {
+    xmlNodePtr xml_op = va_arg(args, xmlNodePtr);
     const char *task = va_arg(args, const char *);
     const char *interval_ms_s = va_arg(args, const char *);
     int rc = va_arg(args, int);
     gboolean print_timing = va_arg(args, gboolean);
 
     char *rc_s = crm_itoa(rc);
     xmlNodePtr node = pcmk__output_create_xml_node(out, "operation_history",
                                                    "call", crm_element_value(xml_op, XML_LRM_ATTR_CALLID),
                                                    "task", task,
                                                    "rc", rc_s,
                                                    "rc_text", services_ocf_exitcode_str(rc),
                                                    NULL);
     free(rc_s);
 
     if (interval_ms_s && !pcmk__str_eq(interval_ms_s, "0", pcmk__str_casei)) {
         char *s = crm_strdup_printf("%sms", interval_ms_s);
         crm_xml_add(node, "interval", s);
         free(s);
     }
 
     if (print_timing) {
         const char *value = NULL;
 
         value = crm_element_value(xml_op, XML_RSC_OP_LAST_CHANGE);
         if (value) {
             time_t int_value = (time_t) crm_parse_int(value, NULL);
             if (int_value > 0) {
                 crm_xml_add(node, XML_RSC_OP_LAST_CHANGE, pcmk__epoch2str(&int_value));
             }
         }
 
         value = crm_element_value(xml_op, XML_RSC_OP_LAST_RUN);
         if (value) {
             time_t int_value = (time_t) crm_parse_int(value, NULL);
             if (int_value > 0) {
                 crm_xml_add(node, XML_RSC_OP_LAST_RUN, pcmk__epoch2str(&int_value));
             }
         }
 
         value = crm_element_value(xml_op, XML_RSC_OP_T_EXEC);
         if (value) {
             char *s = crm_strdup_printf("%sms", value);
             crm_xml_add(node, XML_RSC_OP_T_EXEC, s);
             free(s);
         }
         value = crm_element_value(xml_op, XML_RSC_OP_T_QUEUE);
         if (value) {
             char *s = crm_strdup_printf("%sms", value);
             crm_xml_add(node, XML_RSC_OP_T_QUEUE, s);
             free(s);
         }
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("resource-config", "pe_resource_t *", "gboolean")
-int pe__resource_config(pcmk__output_t *out, va_list args) {
+static int
+resource_config(pcmk__output_t *out, va_list args) {
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     gboolean raw = va_arg(args, gboolean);
 
     char *rsc_xml = NULL;
 
     if (raw) {
         rsc_xml = dump_xml_formatted(rsc->orig_xml ? rsc->orig_xml : rsc->xml);
     } else {
         rsc_xml = dump_xml_formatted(rsc->xml);
     }
 
     out->info(out, "Resource XML:");
     out->output_xml(out, "xml", rsc_xml);
 
     free(rsc_xml);
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("resource-history", "pe_resource_t *", "const char *", "gboolean", "int", "time_t", "gboolean")
 int
 pe__resource_history_text(pcmk__output_t *out, va_list args) {
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     const char *rsc_id = va_arg(args, const char *);
     gboolean all = va_arg(args, gboolean);
     int failcount = va_arg(args, int);
     time_t last_failure = va_arg(args, int);
     gboolean as_header = va_arg(args, gboolean);
 
     char *buf = resource_history_string(rsc, rsc_id, all, failcount, last_failure);
 
     if (as_header) {
         out->begin_list(out, NULL, NULL, "%s", buf);
     } else {
         out->list_item(out, NULL, "%s", buf);
     }
 
     free(buf);
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("resource-history", "pe_resource_t *", "const char *", "gboolean", "int", "time_t", "gboolean")
-int
-pe__resource_history_xml(pcmk__output_t *out, va_list args) {
+static int
+resource_history_xml(pcmk__output_t *out, va_list args) {
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     const char *rsc_id = va_arg(args, const char *);
     gboolean all = va_arg(args, gboolean);
     int failcount = va_arg(args, int);
     time_t last_failure = va_arg(args, int);
     gboolean as_header = va_arg(args, gboolean);
 
     xmlNodePtr node = pcmk__output_xml_create_parent(out, "resource_history",
                                                      "id", rsc_id,
                                                      NULL);
 
     if (rsc == NULL) {
         crm_xml_add(node, "orphan", "true");
     } else if (all || failcount || last_failure > 0) {
         char *migration_s = crm_itoa(rsc->migration_threshold);
 
         pcmk__xe_set_props(node, "orphan", "false",
                            "migration-threshold", migration_s,
                            NULL);
         free(migration_s);
 
         if (failcount > 0) {
             char *s = crm_itoa(failcount);
 
             crm_xml_add(node, PCMK__FAIL_COUNT_PREFIX, s);
             free(s);
         }
 
         if (last_failure > 0) {
             crm_xml_add(node, PCMK__LAST_FAILURE_PREFIX, pcmk__epoch2str(&last_failure));
         }
     }
 
     if (as_header == FALSE) {
         pcmk__output_xml_pop_parent(out);
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("resource-list", "pe_working_set_t *", "unsigned int", "gboolean",
-                  "gboolean", "gboolean", "gboolean", "GListPtr", "GListPtr", "gboolean")
-int
-pe__resource_list(pcmk__output_t *out, va_list args)
+                  "gboolean", "gboolean", "gboolean", "GList *", "GList *", "gboolean")
+static int
+resource_list(pcmk__output_t *out, va_list args)
 {
     pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
     unsigned int print_opts = va_arg(args, unsigned int);
     gboolean group_by_node = va_arg(args, gboolean);
     gboolean inactive_resources = va_arg(args, gboolean);
     gboolean brief_output = va_arg(args, gboolean);
     gboolean print_summary = va_arg(args, gboolean);
-    GListPtr only_node = va_arg(args, GListPtr);
-    GListPtr only_rsc = va_arg(args, GListPtr);
+    GList *only_node = va_arg(args, GList *);
+    GList *only_rsc = va_arg(args, GList *);
     gboolean print_spacer = va_arg(args, gboolean);
 
-    GListPtr rsc_iter;
+    GList *rsc_iter;
     int rc = pcmk_rc_no_output;
 
     /* If we already showed active resources by node, and
      * we're not showing inactive resources, we have nothing to do
      */
     if (group_by_node && !inactive_resources) {
         return rc;
     }
 
     PCMK__OUTPUT_SPACER_IF(out, print_spacer);
 
     if (group_by_node) {
         /* Active resources have already been printed by node */
         out->begin_list(out, NULL, NULL, "Inactive Resources");
     } else if (inactive_resources) {
         out->begin_list(out, NULL, NULL, "Full List of Resources");
     } else {
         out->begin_list(out, NULL, NULL, "Active Resources");
     }
 
     /* If we haven't already printed resources grouped by node,
      * and brief output was requested, print resource summary */
     if (brief_output && !group_by_node) {
-        GListPtr rscs = pe__filter_rsc_list(data_set->resources, only_rsc);
+        GList *rscs = pe__filter_rsc_list(data_set->resources, only_rsc);
 
         pe__rscs_brief_output(out, rscs, print_opts, inactive_resources);
         g_list_free(rscs);
     }
 
     /* For each resource, display it if appropriate */
     for (rsc_iter = data_set->resources; rsc_iter != NULL; rsc_iter = rsc_iter->next) {
         pe_resource_t *rsc = (pe_resource_t *) rsc_iter->data;
         int x;
 
         /* Complex resources may have some sub-resources active and some inactive */
         gboolean is_active = rsc->fns->active(rsc, TRUE);
         gboolean partially_active = rsc->fns->active(rsc, FALSE);
 
         /* Skip inactive orphans (deleted but still in CIB) */
         if (pcmk_is_set(rsc->flags, pe_rsc_orphan) && !is_active) {
             continue;
 
         /* Skip active resources if we already displayed them by node */
         } else if (group_by_node) {
             if (is_active) {
                 continue;
             }
 
         /* Skip primitives already counted in a brief summary */
         } else if (brief_output && (rsc->variant == pe_native)) {
             continue;
 
         /* Skip resources that aren't at least partially active,
          * unless we're displaying inactive resources
          */
         } else if (!partially_active && !inactive_resources) {
             continue;
 
         } else if (partially_active && !pe__rsc_running_on_any_node_in_list(rsc, only_node)) {
             continue;
         }
 
         /* Print this resource */
         x = out->message(out, crm_map_element_name(rsc->xml), print_opts, rsc,
                          only_node, only_rsc);
         if (x == pcmk_rc_ok) {
             rc = pcmk_rc_ok;
         }
     }
 
     if (print_summary && rc != pcmk_rc_ok) {
         if (group_by_node) {
             out->list_item(out, NULL, "No inactive resources");
         } else if (inactive_resources) {
             out->list_item(out, NULL, "No resources");
         } else {
             out->list_item(out, NULL, "No active resources");
         }
     }
 
     out->end_list(out);
     return rc;
 }
 
 PCMK__OUTPUT_ARGS("ticket", "pe_ticket_t *")
-int
-pe__ticket_html(pcmk__output_t *out, va_list args) {
+static int
+ticket_html(pcmk__output_t *out, va_list args) {
     pe_ticket_t *ticket = va_arg(args, pe_ticket_t *);
 
     if (ticket->last_granted > -1) {
         char *time = pcmk_format_named_time("last-granted", ticket->last_granted);
         out->list_item(out, NULL, "%s:\t%s%s %s", ticket->id,
                        ticket->granted ? "granted" : "revoked",
                        ticket->standby ? " [standby]" : "",
                        time);
         free(time);
     } else {
         out->list_item(out, NULL, "%s:\t%s%s", ticket->id,
                        ticket->granted ? "granted" : "revoked",
                        ticket->standby ? " [standby]" : "");
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("ticket", "pe_ticket_t *")
 int
 pe__ticket_text(pcmk__output_t *out, va_list args) {
     pe_ticket_t *ticket = va_arg(args, pe_ticket_t *);
 
     if (ticket->last_granted > -1) {
         char *time = pcmk_format_named_time("last-granted", ticket->last_granted);
-        out->list_item(out, ticket->id, "\t%s%s %s",
+        out->list_item(out, ticket->id, "%s%s %s",
                        ticket->granted ? "granted" : "revoked",
                        ticket->standby ? " [standby]" : "",
                        time);
         free(time);
     } else {
-        out->list_item(out, ticket->id, "\t%s%s",
+        out->list_item(out, ticket->id, "%s%s",
                        ticket->granted ? "granted" : "revoked",
                        ticket->standby ? " [standby]" : "");
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("ticket", "pe_ticket_t *")
-int
-pe__ticket_xml(pcmk__output_t *out, va_list args) {
-    xmlNodePtr node = NULL;
-
+static int
+ticket_xml(pcmk__output_t *out, va_list args) {
     pe_ticket_t *ticket = va_arg(args, pe_ticket_t *);
 
+    xmlNodePtr node = NULL;
+
     node = pcmk__output_create_xml_node(out, "ticket",
                                         "id", ticket->id,
                                         "status", ticket->granted ? "granted" : "revoked",
                                         "standby", pcmk__btoa(ticket->standby),
                                         NULL);
 
     if (ticket->last_granted > -1) {
         crm_xml_add(node, "last-granted", pcmk__epoch2str(&ticket->last_granted));
     }
 
     return pcmk_rc_ok;
 }
 
 static pcmk__message_entry_t fmt_functions[] = {
-    { "ban", "html", pe__ban_html },
+    { "ban", "html", ban_html },
     { "ban", "log", pe__ban_text },
     { "ban", "text", pe__ban_text },
-    { "ban", "xml", pe__ban_xml },
+    { "ban", "xml", ban_xml },
     { "bundle", "xml",  pe__bundle_xml },
     { "bundle", "html",  pe__bundle_html },
     { "bundle", "text",  pe__bundle_text },
     { "bundle", "log",  pe__bundle_text },
     { "clone", "xml",  pe__clone_xml },
     { "clone", "html",  pe__clone_html },
     { "clone", "text",  pe__clone_text },
     { "clone", "log",  pe__clone_text },
-    { "cluster-counts", "html", pe__cluster_counts_html },
+    { "cluster-counts", "html", cluster_counts_html },
     { "cluster-counts", "log", pe__cluster_counts_text },
     { "cluster-counts", "text", pe__cluster_counts_text },
-    { "cluster-counts", "xml", pe__cluster_counts_xml },
-    { "cluster-dc", "html", pe__cluster_dc_html },
+    { "cluster-counts", "xml", cluster_counts_xml },
+    { "cluster-dc", "html", cluster_dc_html },
     { "cluster-dc", "log", pe__cluster_dc_text },
     { "cluster-dc", "text", pe__cluster_dc_text },
-    { "cluster-dc", "xml", pe__cluster_dc_xml },
-    { "cluster-options", "html", pe__cluster_options_html },
-    { "cluster-options", "log", pe__cluster_options_log },
+    { "cluster-dc", "xml", cluster_dc_xml },
+    { "cluster-options", "html", cluster_options_html },
+    { "cluster-options", "log", cluster_options_log },
     { "cluster-options", "text", pe__cluster_options_text },
-    { "cluster-options", "xml", pe__cluster_options_xml },
+    { "cluster-options", "xml", cluster_options_xml },
     { "cluster-summary", "default", pe__cluster_summary },
-    { "cluster-summary", "html", pe__cluster_summary_html },
-    { "cluster-stack", "html", pe__cluster_stack_html },
+    { "cluster-summary", "html", cluster_summary_html },
+    { "cluster-stack", "html", cluster_stack_html },
     { "cluster-stack", "log", pe__cluster_stack_text },
     { "cluster-stack", "text", pe__cluster_stack_text },
-    { "cluster-stack", "xml", pe__cluster_stack_xml },
-    { "cluster-times", "html", pe__cluster_times_html },
+    { "cluster-stack", "xml", cluster_stack_xml },
+    { "cluster-times", "html", cluster_times_html },
     { "cluster-times", "log", pe__cluster_times_text },
     { "cluster-times", "text", pe__cluster_times_text },
-    { "cluster-times", "xml", pe__cluster_times_xml },
+    { "cluster-times", "xml", cluster_times_xml },
     { "failed-action", "default", pe__failed_action_text },
-    { "failed-action", "xml", pe__failed_action_xml },
+    { "failed-action", "xml", failed_action_xml },
     { "group", "xml",  pe__group_xml },
     { "group", "html",  pe__group_html },
     { "group", "text",  pe__group_text },
     { "group", "log",  pe__group_text },
     { "maint-mode", "text", pe__cluster_maint_mode_text },
     { "node", "html", pe__node_html },
     { "node", "log", pe__node_text },
     { "node", "text", pe__node_text },
     { "node", "xml", pe__node_xml },
-    { "node-and-op", "default", pe__node_and_op },
-    { "node-and-op", "xml", pe__node_and_op_xml },
-    { "node-list", "html", pe__node_list_html },
+    { "node-and-op", "default", node_and_op },
+    { "node-and-op", "xml", node_and_op_xml },
+    { "node-list", "html", node_list_html },
     { "node-list", "log", pe__node_list_text },
     { "node-list", "text", pe__node_list_text },
-    { "node-list", "xml", pe__node_list_xml },
-    { "node-attribute", "html", pe__node_attribute_html },
+    { "node-list", "xml", node_list_xml },
+    { "node-attribute", "html", node_attribute_html },
     { "node-attribute", "log", pe__node_attribute_text },
     { "node-attribute", "text", pe__node_attribute_text },
-    { "node-attribute", "xml", pe__node_attribute_xml },
+    { "node-attribute", "xml", node_attribute_xml },
     { "op-history", "default", pe__op_history_text },
-    { "op-history", "xml", pe__op_history_xml },
+    { "op-history", "xml", op_history_xml },
     { "primitive", "xml",  pe__resource_xml },
     { "primitive", "html",  pe__resource_html },
     { "primitive", "text",  pe__resource_text },
     { "primitive", "log",  pe__resource_text },
-    { "resource-config", "default", pe__resource_config },
+    { "resource-config", "default", resource_config },
     { "resource-history", "default", pe__resource_history_text },
-    { "resource-history", "xml", pe__resource_history_xml },
-    { "resource-list", "default", pe__resource_list },
-    { "ticket", "html", pe__ticket_html },
+    { "resource-history", "xml", resource_history_xml },
+    { "resource-list", "default", resource_list },
+    { "ticket", "html", ticket_html },
     { "ticket", "log", pe__ticket_text },
     { "ticket", "text", pe__ticket_text },
-    { "ticket", "xml", pe__ticket_xml },
+    { "ticket", "xml", ticket_xml },
 
     { NULL, NULL, NULL }
 };
 
 void
 pe__register_messages(pcmk__output_t *out) {
     pcmk__register_messages(out, fmt_functions);
 }
 
 void
 pe__output_node(pe_node_t *node, gboolean details, pcmk__output_t *out)
 {
     if (node == NULL) {
         crm_trace("<NULL>");
         return;
     }
 
     CRM_ASSERT(node->details);
     crm_trace("%sNode %s: (weight=%d, fixed=%s)",
               node->details->online ? "" : "Unavailable/Unclean ",
               node->details->uname, node->weight, node->fixed ? "True" : "False");
 
     if (details) {
         char *pe_mutable = strdup("\t\t");
         GListPtr gIter = node->details->running_rsc;
         GListPtr all = NULL;
 
         all = g_list_prepend(all, strdup("*"));
 
         crm_trace("\t\t===Node Attributes");
         g_hash_table_foreach(node->details->attrs, print_str_str, pe_mutable);
         free(pe_mutable);
 
         crm_trace("\t\t=== Resources");
 
         for (; gIter != NULL; gIter = gIter->next) {
             pe_resource_t *rsc = (pe_resource_t *) gIter->data;
 
             out->message(out, crm_map_element_name(rsc->xml),
                          pe_print_pending, rsc, all, all);
         }
 
         g_list_free_full(all, free);
     }
 }
diff --git a/tools/crm_mon_curses.c b/tools/crm_mon_curses.c
index 9cf28dc383..5c79dd2b77 100644
--- a/tools/crm_mon_curses.c
+++ b/tools/crm_mon_curses.c
@@ -1,452 +1,427 @@
 /*
  * Copyright 2019-2020 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 #include <stdarg.h>
 #include <stdlib.h>
 #include <crm/crm.h>
 #include <crm/common/curses_internal.h>
 #include <crm/common/output_internal.h>
 #include <crm/stonith-ng.h>
 #include <crm/fencing/internal.h>
 #include <crm/pengine/internal.h>
 #include <glib.h>
 
 #include "crm_mon.h"
 
 #if CURSES_ENABLED
 
 GOptionEntry crm_mon_curses_output_entries[] = {
     { NULL }
 };
 
 typedef struct curses_list_data_s {
     unsigned int len;
     char *singular_noun;
     char *plural_noun;
 } curses_list_data_t;
 
 typedef struct private_data_s {
     GQueue *parent_q;
 } private_data_t;
 
 static void
 curses_free_priv(pcmk__output_t *out) {
     private_data_t *priv = out->priv;
 
     if (priv == NULL) {
         return;
     }
 
     g_queue_free(priv->parent_q);
     free(priv);
     out->priv = NULL;
 }
 
 static bool
 curses_init(pcmk__output_t *out) {
     private_data_t *priv = NULL;
 
     /* If curses_init was previously called on this output struct, just return. */
     if (out->priv != NULL) {
         return true;
     } else {
         out->priv = calloc(1, sizeof(private_data_t));
         if (out->priv == NULL) {
             return false;
         }
 
         priv = out->priv;
     }
 
     priv->parent_q = g_queue_new();
 
     initscr();
     cbreak();
     noecho();
 
     return true;
 }
 
 static void
 curses_finish(pcmk__output_t *out, crm_exit_t exit_status, bool print, void **copy_dest) {
     echo();
     nocbreak();
     endwin();
 }
 
 static void
 curses_reset(pcmk__output_t *out) {
     CRM_ASSERT(out != NULL);
 
     curses_free_priv(out);
     curses_init(out);
 }
 
 static void
 curses_subprocess_output(pcmk__output_t *out, int exit_status,
                          const char *proc_stdout, const char *proc_stderr) {
     if (proc_stdout != NULL) {
         printw("%s\n", proc_stdout);
     }
 
     if (proc_stderr != NULL) {
         printw("%s\n", proc_stderr);
     }
 
     clrtoeol();
     refresh();
 }
 
 /* curses_version is defined in curses.h, so we can't use that name here.
  * Note that this function prints out via text, not with curses.
  */
 static void
 curses_ver(pcmk__output_t *out, bool extended) {
     if (extended) {
         printf("Pacemaker %s (Build: %s): %s\n", PACEMAKER_VERSION, BUILD_VERSION, CRM_FEATURES);
     } else {
         printf("Pacemaker %s\n", PACEMAKER_VERSION);
         printf("Written by Andrew Beekhof\n");
     }
 }
 
 G_GNUC_PRINTF(2, 3)
 static void
 curses_error(pcmk__output_t *out, const char *format, ...) {
     va_list ap;
 
     /* Informational output does not get indented, to separate it from other
      * potentially indented list output.
      */
     va_start(ap, format);
     vw_printw(stdscr, format, ap);
     va_end(ap);
 
     /* Add a newline. */
     addch('\n');
 
     clrtoeol();
     refresh();
     sleep(2);
 }
 
 G_GNUC_PRINTF(2, 3)
 static void
 curses_info(pcmk__output_t *out, const char *format, ...) {
     va_list ap;
 
     /* Informational output does not get indented, to separate it from other
      * potentially indented list output.
      */
     va_start(ap, format);
     vw_printw(stdscr, format, ap);
     va_end(ap);
 
     /* Add a newline. */
     addch('\n');
 
     clrtoeol();
     refresh();
 }
 
 static void
 curses_output_xml(pcmk__output_t *out, const char *name, const char *buf) {
     private_data_t *priv = out->priv;
 
     CRM_ASSERT(priv != NULL);
     curses_indented_printf(out, "%s", buf);
 }
 
 G_GNUC_PRINTF(4, 5)
 static void
 curses_begin_list(pcmk__output_t *out, const char *singular_noun, const char *plural_noun,
                   const char *format, ...) {
     private_data_t *priv = out->priv;
     curses_list_data_t *new_list = NULL;
     va_list ap;
 
     CRM_ASSERT(priv != NULL);
 
     va_start(ap, format);
 
     curses_indented_vprintf(out, format, ap);
     printw(":\n");
 
     va_end(ap);
 
     new_list = calloc(1, sizeof(curses_list_data_t));
     new_list->len = 0;
     new_list->singular_noun = singular_noun == NULL ? NULL : strdup(singular_noun);
     new_list->plural_noun = plural_noun == NULL ? NULL : strdup(plural_noun);
 
     g_queue_push_tail(priv->parent_q, new_list);
 }
 
 G_GNUC_PRINTF(3, 4)
 static void
 curses_list_item(pcmk__output_t *out, const char *id, const char *format, ...) {
     private_data_t *priv = out->priv;
     va_list ap;
 
     CRM_ASSERT(priv != NULL);
 
     va_start(ap, format);
 
     if (id != NULL) {
         curses_indented_printf(out, "%s: ", id);
         vw_printw(stdscr, format, ap);
     } else {
         curses_indented_vprintf(out, format, ap);
     }
 
     addch('\n');
     va_end(ap);
 
     out->increment_list(out);
 }
 
 static void
 curses_increment_list(pcmk__output_t *out) {
     private_data_t *priv = out->priv;
     gpointer tail;
 
     CRM_ASSERT(priv != NULL);
     tail = g_queue_peek_tail(priv->parent_q);
     CRM_ASSERT(tail != NULL);
     ((curses_list_data_t *) tail)->len++;
 }
 
 static void
 curses_end_list(pcmk__output_t *out) {
     private_data_t *priv = out->priv;
     curses_list_data_t *node = NULL;
 
     CRM_ASSERT(priv != NULL);
     node = g_queue_pop_tail(priv->parent_q);
 
     if (node->singular_noun != NULL && node->plural_noun != NULL) {
         if (node->len == 1) {
             curses_indented_printf(out, "%d %s found\n", node->len, node->singular_noun);
         } else {
             curses_indented_printf(out, "%d %s found\n", node->len, node->plural_noun);
         }
     }
 
     free(node);
 }
 
 static bool
 curses_is_quiet(pcmk__output_t *out) {
     return out->quiet;
 }
 
 static void
 curses_spacer(pcmk__output_t *out) {
     addch('\n');
 }
 
 pcmk__output_t *
 crm_mon_mk_curses_output(char **argv) {
     pcmk__output_t *retval = calloc(1, sizeof(pcmk__output_t));
 
     if (retval == NULL) {
         return NULL;
     }
 
     retval->fmt_name = "console";
     retval->request = argv == NULL ? NULL : g_strjoinv(" ", argv);
 
     retval->init = curses_init;
     retval->free_priv = curses_free_priv;
     retval->finish = curses_finish;
     retval->reset = curses_reset;
 
     retval->register_message = pcmk__register_message;
     retval->message = pcmk__call_message;
 
     retval->subprocess_output = curses_subprocess_output;
     retval->version = curses_ver;
     retval->err = curses_error;
     retval->info = curses_info;
     retval->output_xml = curses_output_xml;
 
     retval->begin_list = curses_begin_list;
     retval->list_item = curses_list_item;
     retval->increment_list = curses_increment_list;
     retval->end_list = curses_end_list;
 
     retval->is_quiet = curses_is_quiet;
     retval->spacer = curses_spacer;
 
     return retval;
 }
 
 G_GNUC_PRINTF(2, 0)
 void
 curses_indented_vprintf(pcmk__output_t *out, const char *format, va_list args) {
     int level = 0;
     private_data_t *priv = out->priv;
 
     CRM_ASSERT(priv != NULL);
 
     level = g_queue_get_length(priv->parent_q);
 
     for (int i = 0; i < level; i++) {
         printw("  ");
     }
 
     if (level > 0) {
         printw("* ");
     }
 
     vw_printw(stdscr, format, args);
 
     clrtoeol();
     refresh();
 }
 
 G_GNUC_PRINTF(2, 3)
 void
 curses_indented_printf(pcmk__output_t *out, const char *format, ...) {
     va_list ap;
 
     va_start(ap, format);
     curses_indented_vprintf(out, format, ap);
     va_end(ap);
 }
 
 PCMK__OUTPUT_ARGS("stonith-event", "stonith_history_t *", "gboolean", "gboolean")
 static int
 stonith_event_console(pcmk__output_t *out, va_list args) {
     stonith_history_t *event = va_arg(args, stonith_history_t *);
     gboolean full_history = va_arg(args, gboolean);
     gboolean later_succeeded = va_arg(args, gboolean);
 
     crm_time_t *crm_when = crm_time_new(NULL);
     char *buf = NULL;
 
     crm_time_set_timet(crm_when, &(event->completed));
     buf = crm_time_as_string(crm_when, crm_time_log_date | crm_time_log_timeofday | crm_time_log_with_timezone);
 
     switch (event->state) {
         case st_failed:
             curses_indented_printf(out, "%s of %s failed: delegate=%s, client=%s, origin=%s, %s='%s'%s\n",
                                    stonith_action_str(event->action), event->target,
                                    event->delegate ? event->delegate : "",
                                    event->client, event->origin,
                                    full_history ? "completed" : "last-failed", buf,
                                    later_succeeded ? " (a later attempt succeeded)" : "");
             break;
 
         case st_done:
             curses_indented_printf(out, "%s of %s successful: delegate=%s, client=%s, origin=%s, %s='%s'\n",
                                    stonith_action_str(event->action), event->target,
                                    event->delegate ? event->delegate : "",
                                    event->client, event->origin,
                                    full_history ? "completed" : "last-successful", buf);
             break;
 
         default:
             curses_indented_printf(out, "%s of %s pending: client=%s, origin=%s\n",
                                    stonith_action_str(event->action), event->target,
                                    event->client, event->origin);
             break;
     }
 
     free(buf);
     crm_time_free(crm_when);
     return pcmk_rc_ok;
 }
 
-PCMK__OUTPUT_ARGS("maint-mode", "unsigned long long int")
-static int
-cluster_maint_mode_console(pcmk__output_t *out, va_list args) {
-    unsigned long long flags = va_arg(args, unsigned long long);
-    int rc;
-
-    if (pcmk_is_set(flags, pe_flag_maintenance_mode)) {
-        printw("\n              *** Resource management is DISABLED ***");
-        printw("\n  The cluster will not attempt to start, stop or recover services");
-        printw("\n");
-        rc = pcmk_rc_ok;
-    } else if (pcmk_is_set(flags, pe_flag_stop_everything)) {
-        printw("\n    *** Resource management is DISABLED ***");
-        printw("\n  The cluster will keep all resources stopped");
-        printw("\n");
-        rc = pcmk_rc_ok;
-    } else {
-        rc = pcmk_rc_no_output;
-    }
-
-    clrtoeol();
-    refresh();
-    return rc;
-}
-
 static pcmk__message_entry_t fmt_functions[] = {
     { "ban", "console", pe__ban_text },
     { "bundle", "console", pe__bundle_text },
     { "clone", "console", pe__clone_text },
     { "cluster-counts", "console", pe__cluster_counts_text },
     { "cluster-dc", "console", pe__cluster_dc_text },
     { "cluster-options", "console", pe__cluster_options_text },
     { "cluster-stack", "console", pe__cluster_stack_text },
     { "cluster-summary", "console", pe__cluster_summary },
     { "cluster-times", "console", pe__cluster_times_text },
     { "failed-action", "console", pe__failed_action_text },
-    { "failed-fencing-history", "console", stonith__failed_history },
-    { "fencing-history", "console", stonith__history },
-    { "full-fencing-history", "console", stonith__full_history },
+    { "failed-fencing-list", "console", stonith__failed_history },
+    { "fencing-list", "console", stonith__history },
+    { "full-fencing-list", "console", stonith__full_history },
     { "group", "console", pe__group_text },
-    { "maint-mode", "console", cluster_maint_mode_console },
+    { "maint-mode", "console", pe__cluster_maint_mode_text },
     { "node", "console", pe__node_text },
     { "node-attribute", "console", pe__node_attribute_text },
     { "node-list", "console", pe__node_list_text },
     { "op-history", "console", pe__op_history_text },
-    { "pending-fencing-actions", "console", stonith__pending_actions },
+    { "pending-fencing-list", "console", stonith__pending_actions },
     { "primitive", "console", pe__resource_text },
     { "resource-history", "console", pe__resource_history_text },
     { "stonith-event", "console", stonith_event_console },
     { "ticket", "console", pe__ticket_text },
 
     { NULL, NULL, NULL }
 };
 
 void
 crm_mon_register_messages(pcmk__output_t *out) {
     pcmk__register_messages(out, fmt_functions);
 }
 
 #else
 
 pcmk__output_t *
 crm_mon_mk_curses_output(char **argv) {
     /* curses was disabled in the build, so fall back to text. */
     return pcmk__mk_text_output(argv);
 }
 
 G_GNUC_PRINTF(2, 0)
 void
 curses_indented_vprintf(pcmk__output_t *out, const char *format, va_list args) {
     return;
 }
 
 G_GNUC_PRINTF(2, 3)
 void
 curses_indented_printf(pcmk__output_t *out, const char *format, ...) {
     return;
 }
 
 void
 crm_mon_register_messages(pcmk__output_t *out) {
     return;
 }
 
 #endif
diff --git a/tools/crm_mon_print.c b/tools/crm_mon_print.c
index ce3e47c77a..06840b71e7 100644
--- a/tools/crm_mon_print.c
+++ b/tools/crm_mon_print.c
@@ -1,997 +1,997 @@
 /*
  * Copyright 2019-2020 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <glib.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <time.h>
 
 #ifndef PCMK__CONFIG_H
 #  define PCMK__CONFIG_H
 #  include <config.h>
 #endif
 
 #include <crm/cib/util.h>
 #include <crm/common/curses_internal.h>
 #include <crm/common/iso8601_internal.h>
 #include <crm/common/xml.h>
 #include <crm/msg_xml.h>
 #include <crm/pengine/internal.h>
 #include <crm/pengine/pe_types.h>
 #include <crm/stonith-ng.h>
 #include <crm/common/internal.h>
 #include <crm/common/xml_internal.h>
 #include <crm/common/util.h>
 #include <crm/fencing/internal.h>
 
 #include "crm_mon.h"
 
 static int print_rsc_history(pcmk__output_t *out, pe_working_set_t *data_set,
                              pe_node_t *node, xmlNode *rsc_entry, unsigned int mon_ops,
                              GListPtr op_list);
 static int print_node_history(pcmk__output_t *out, pe_working_set_t *data_set,
                               pe_node_t *node, xmlNode *node_state, gboolean operations,
                               unsigned int mon_ops, GListPtr only_node, GListPtr only_rsc);
 static gboolean add_extra_info(pcmk__output_t *out, pe_node_t * node, GListPtr rsc_list,
                                const char *attrname, int *expected_score);
 static void print_node_attribute(gpointer name, gpointer user_data);
 static int print_node_summary(pcmk__output_t *out, pe_working_set_t * data_set,
                               gboolean operations, unsigned int mon_ops,
                               GListPtr only_node, GListPtr only_rsc, gboolean print_spacer);
 static int print_cluster_tickets(pcmk__output_t *out, pe_working_set_t * data_set,
                                  gboolean print_spacer);
 static int print_neg_locations(pcmk__output_t *out, pe_working_set_t *data_set,
                                unsigned int mon_ops, const char *prefix,
                                GListPtr only_rsc, gboolean print_spacer);
 static int print_node_attributes(pcmk__output_t *out, pe_working_set_t *data_set,
                                  unsigned int mon_ops, GListPtr only_node,
                                  GListPtr only_rsc, gboolean print_spacer);
 static int print_failed_actions(pcmk__output_t *out, pe_working_set_t *data_set,
                                 GListPtr only_node, GListPtr only_rsc, gboolean print_spacer);
 
 static GListPtr
 build_uname_list(pe_working_set_t *data_set, const char *s) {
     GListPtr unames = NULL;
 
     if (pcmk__str_eq(s, "*", pcmk__str_null_matches)) {
         /* Nothing was given so return a list of all node names.  Or, '*' was
          * given.  This would normally fall into the pe__unames_with_tag branch
          * where it will return an empty list.  Catch it here instead.
          */
         unames = g_list_prepend(unames, strdup("*"));
     } else {
         pe_node_t *node = pe_find_node(data_set->nodes, s);
 
         if (node) {
             /* The given string was a valid uname for a node.  Return a
              * singleton list containing just that uname.
              */
             unames = g_list_prepend(unames, strdup(s));
         } else {
             /* The given string was not a valid uname.  It's either a tag or
              * it's a typo or something.  In the first case, we'll return a
              * list of all the unames of the nodes with the given tag.  In the
              * second case, we'll return a NULL pointer and nothing will
              * get displayed.
              */
             unames = pe__unames_with_tag(data_set, s);
         }
     }
 
     return unames;
 }
 
 static GListPtr
 build_rsc_list(pe_working_set_t *data_set, const char *s) {
     GListPtr resources = NULL;
 
     if (pcmk__str_eq(s, "*", pcmk__str_null_matches)) {
         resources = g_list_prepend(resources, strdup("*"));
     } else {
         pe_resource_t *rsc = pe_find_resource_with_flags(data_set->resources, s,
                                                          pe_find_renamed|pe_find_any);
 
         if (rsc) {
             /* A colon in the name we were given means we're being asked to filter
              * on a specific instance of a cloned resource.  Put that exact string
              * into the filter list.  Otherwise, use the printable ID of whatever
              * resource was found that matches what was asked for.
              */
             if (strstr(s, ":") != NULL) {
                 resources = g_list_prepend(resources, strdup(rsc->id));
             } else {
                 resources = g_list_prepend(resources, strdup(rsc_printable_id(rsc)));
             }
         } else {
             /* The given string was not a valid resource name.  It's either
              * a tag or it's a typo or something.  See build_uname_list for
              * more detail.
              */
             resources = pe__rscs_with_tag(data_set, s);
         }
     }
 
     return resources;
 }
 
 static int
 failure_count(pe_working_set_t *data_set, pe_node_t *node, pe_resource_t *rsc, time_t *last_failure) {
     return rsc ? pe_get_failcount(node, rsc, last_failure, pe_fc_default,
                                   NULL, data_set)
                : 0;
 }
 
 static GListPtr
 get_operation_list(xmlNode *rsc_entry) {
     GListPtr op_list = NULL;
     xmlNode *rsc_op = NULL;
 
     for (rsc_op = pcmk__xe_first_child(rsc_entry); rsc_op != NULL;
          rsc_op = pcmk__xe_next(rsc_op)) {
         const char *task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK);
         const char *interval_ms_s = crm_element_value(rsc_op,
                                                       XML_LRM_ATTR_INTERVAL_MS);
         const char *op_rc = crm_element_value(rsc_op, XML_LRM_ATTR_RC);
         int op_rc_i = crm_parse_int(op_rc, "0");
 
         /* Display 0-interval monitors as "probe" */
         if (pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_casei)
             && pcmk__str_eq(interval_ms_s, "0", pcmk__str_null_matches | pcmk__str_casei)) {
             task = "probe";
         }
 
         /* Ignore notifies and some probes */
         if (pcmk__str_eq(task, CRMD_ACTION_NOTIFY, pcmk__str_casei) || (pcmk__str_eq(task, "probe", pcmk__str_casei) && (op_rc_i == 7))) {
             continue;
         }
 
         if (pcmk__str_eq((const char *)rsc_op->name, XML_LRM_TAG_RSC_OP, pcmk__str_none)) {
             op_list = g_list_append(op_list, rsc_op);
         }
     }
 
     op_list = g_list_sort(op_list, sort_op_by_callid);
     return op_list;
 }
 
 /*!
  * \internal
  * \brief Print resource operation/failure history
  *
  * \param[in] out       The output functions structure.
  * \param[in] data_set  Cluster state to display.
  * \param[in] node      Node that ran this resource.
  * \param[in] rsc_entry Root of XML tree describing resource status.
  * \param[in] mon_ops   Bitmask of mon_op_*.
  * \param[in] op_list   A list of operations to print.
  */
 static int
 print_rsc_history(pcmk__output_t *out, pe_working_set_t *data_set, pe_node_t *node,
                   xmlNode *rsc_entry, unsigned int mon_ops, GListPtr op_list)
 {
     GListPtr gIter = NULL;
     int rc = pcmk_rc_no_output;
     const char *rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID);
     pe_resource_t *rsc = pe_find_resource(data_set->resources, rsc_id);
 
     /* Print each operation */
     for (gIter = op_list; gIter != NULL; gIter = gIter->next) {
         xmlNode *xml_op = (xmlNode *) gIter->data;
         const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
         const char *interval_ms_s = crm_element_value(xml_op,
                                                       XML_LRM_ATTR_INTERVAL_MS);
         const char *op_rc = crm_element_value(xml_op, XML_LRM_ATTR_RC);
         int op_rc_i = crm_parse_int(op_rc, "0");
 
         /* Display 0-interval monitors as "probe" */
         if (pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_casei)
             && pcmk__str_eq(interval_ms_s, "0", pcmk__str_null_matches | pcmk__str_casei)) {
             task = "probe";
         }
 
         /* If this is the first printed operation, print heading for resource */
         if (rc == pcmk_rc_no_output) {
             time_t last_failure = 0;
             int failcount = failure_count(data_set, node, rsc, &last_failure);
 
             out->message(out, "resource-history", rsc, rsc_id, TRUE, failcount, last_failure, TRUE);
             rc = pcmk_rc_ok;
         }
 
         /* Print the operation */
         out->message(out, "op-history", xml_op, task, interval_ms_s,
                      op_rc_i, pcmk_is_set(mon_ops, mon_op_print_timing));
     }
 
     /* Free the list we created (no need to free the individual items) */
     g_list_free(op_list);
 
     PCMK__OUTPUT_LIST_FOOTER(out, rc);
     return rc;
 }
 
 /*!
  * \internal
  * \brief Print node operation/failure history
  *
  * \param[in] out        The output functions structure.
  * \param[in] data_set   Cluster state to display.
  * \param[in] node_state Root of XML tree describing node status.
  * \param[in] operations Whether to print operations or just failcounts.
  * \param[in] mon_ops    Bitmask of mon_op_*.
  */
 static int
 print_node_history(pcmk__output_t *out, pe_working_set_t *data_set,
                    pe_node_t *node, xmlNode *node_state, gboolean operations,
                    unsigned int mon_ops, GListPtr only_node, GListPtr only_rsc)
 {
     xmlNode *lrm_rsc = NULL;
     xmlNode *rsc_entry = NULL;
     int rc = pcmk_rc_no_output;
 
     lrm_rsc = find_xml_node(node_state, XML_CIB_TAG_LRM, FALSE);
     lrm_rsc = find_xml_node(lrm_rsc, XML_LRM_TAG_RESOURCES, FALSE);
 
     /* Print history of each of the node's resources */
     for (rsc_entry = pcmk__xe_first_child(lrm_rsc); rsc_entry != NULL;
          rsc_entry = pcmk__xe_next(rsc_entry)) {
 
         const char *rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID);
         pe_resource_t *rsc = pe_find_resource(data_set->resources, rsc_id);
 
         if (!pcmk__str_eq((const char *)rsc_entry->name, XML_LRM_TAG_RESOURCE, pcmk__str_none)) {
             continue;
         }
 
         /* We can't use is_filtered here to filter group resources.  For is_filtered,
          * we have to decide whether to check the parent or not.  If we check the
          * parent, all elements of a group will always be printed because that's how
          * is_filtered works for groups.  If we do not check the parent, sometimes
          * this will filter everything out.
          *
          * For other resource types, is_filtered is okay.
          */
         if (uber_parent(rsc)->variant == pe_group) {
             if (!pcmk__str_in_list(only_rsc, rsc_printable_id(rsc)) &&
                 !pcmk__str_in_list(only_rsc, rsc_printable_id(uber_parent(rsc)))) {
                 continue;
             }
         } else {
             if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
                 continue;
             }
         }
 
         if (operations == FALSE) {
             time_t last_failure = 0;
             int failcount = failure_count(data_set, node, rsc, &last_failure);
 
             if (failcount <= 0) {
                 continue;
             }
 
             if (rc == pcmk_rc_no_output) {
                 rc = pcmk_rc_ok;
                 out->message(out, "node", node, get_resource_display_options(mon_ops),
                              FALSE, NULL,
                              pcmk_is_set(mon_ops, mon_op_print_clone_detail),
                              pcmk_is_set(mon_ops, mon_op_print_brief),
                              pcmk_is_set(mon_ops, mon_op_group_by_node),
                              only_node, only_rsc);
             }
 
             out->message(out, "resource-history", rsc, rsc_id, FALSE,
                          failcount, last_failure, FALSE);
         } else {
             GListPtr op_list = get_operation_list(rsc_entry);
 
             if (op_list == NULL) {
                 continue;
             }
 
             if (rc == pcmk_rc_no_output) {
                 rc = pcmk_rc_ok;
                 out->message(out, "node", node, get_resource_display_options(mon_ops),
                              FALSE, NULL,
                              pcmk_is_set(mon_ops, mon_op_print_clone_detail),
                              pcmk_is_set(mon_ops, mon_op_print_brief),
                              pcmk_is_set(mon_ops, mon_op_group_by_node),
                              only_node, only_rsc);
             }
 
             print_rsc_history(out, data_set, node, rsc_entry, mon_ops, op_list);
         }
     }
 
     PCMK__OUTPUT_LIST_FOOTER(out, rc);
     return rc;
 }
 
 /*!
  * \internal
  * \brief Determine whether extended information about an attribute should be added.
  *
  * \param[in]  out            The output functions structure.
  * \param[in]  node           Node that ran this resource.
  * \param[in]  rsc_list       The list of resources for this node.
  * \param[in]  attrname       The attribute to find.
  * \param[out] expected_score The expected value for this attribute.
  *
  * \return TRUE if extended information should be printed, FALSE otherwise
  * \note Currently, extended information is only supported for ping/pingd
  *       resources, for which a message will be printed if connectivity is lost
  *       or degraded.
  */
 static gboolean
 add_extra_info(pcmk__output_t *out, pe_node_t *node, GListPtr rsc_list,
                const char *attrname, int *expected_score)
 {
     GListPtr gIter = NULL;
 
     for (gIter = rsc_list; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *rsc = (pe_resource_t *) gIter->data;
         const char *type = g_hash_table_lookup(rsc->meta, "type");
         const char *name = NULL;
 
         if (rsc->children != NULL) {
             if (add_extra_info(out, node, rsc->children, attrname, expected_score)) {
                 return TRUE;
             }
         }
 
         if (!pcmk__strcase_any_of(type, "ping", "pingd", NULL)) {
             return FALSE;
         }
 
         name = g_hash_table_lookup(rsc->parameters, "name");
 
         if (name == NULL) {
             name = "pingd";
         }
 
         /* To identify the resource with the attribute name. */
         if (pcmk__str_eq(name, attrname, pcmk__str_casei)) {
             int host_list_num = 0;
             /* int value = crm_parse_int(attrvalue, "0"); */
             const char *hosts = g_hash_table_lookup(rsc->parameters, "host_list");
             const char *multiplier = g_hash_table_lookup(rsc->parameters, "multiplier");
 
             if (hosts) {
                 char **host_list = g_strsplit(hosts, " ", 0);
                 host_list_num = g_strv_length(host_list);
                 g_strfreev(host_list);
             }
 
             /* pingd multiplier is the same as the default value. */
             *expected_score = host_list_num * crm_parse_int(multiplier, "1");
 
             return TRUE;
         }
     }
     return FALSE;
 }
 
 /* structure for passing multiple user data to g_list_foreach() */
 struct mon_attr_data {
     pcmk__output_t *out;
     pe_node_t *node;
 };
 
 static void
 print_node_attribute(gpointer name, gpointer user_data)
 {
     const char *value = NULL;
     int expected_score = 0;
     gboolean add_extra = FALSE;
     struct mon_attr_data *data = (struct mon_attr_data *) user_data;
 
     value = pe_node_attribute_raw(data->node, name);
 
     add_extra = add_extra_info(data->out, data->node, data->node->details->running_rsc,
                                name, &expected_score);
 
     /* Print attribute name and value */
     data->out->message(data->out, "node-attribute", name, value, add_extra,
                        expected_score);
 }
 
 /*!
  * \internal
  * \brief Print history for all nodes.
  *
  * \param[in] out        The output functions structure.
  * \param[in] data_set   Cluster state to display.
  * \param[in] operations Whether to print operations or just failcounts.
  * \param[in] mon_ops    Bitmask of mon_op_*.
  */
 static int
 print_node_summary(pcmk__output_t *out, pe_working_set_t * data_set,
                    gboolean operations, unsigned int mon_ops, GListPtr only_node,
                    GListPtr only_rsc, gboolean print_spacer)
 {
     xmlNode *node_state = NULL;
     xmlNode *cib_status = get_object_root(XML_CIB_TAG_STATUS, data_set->input);
     int rc = pcmk_rc_no_output;
 
     if (xmlChildElementCount(cib_status) == 0) {
         return rc;
     }
 
     /* Print each node in the CIB status */
     for (node_state = pcmk__xe_first_child(cib_status); node_state != NULL;
          node_state = pcmk__xe_next(node_state)) {
         pe_node_t *node;
 
         if (!pcmk__str_eq((const char *)node_state->name, XML_CIB_TAG_STATE, pcmk__str_none)) {
             continue;
         }
 
         node = pe_find_node_id(data_set->nodes, ID(node_state));
 
         if (!node || !node->details || !node->details->online) {
             continue;
         }
 
         if (!pcmk__str_in_list(only_node, node->details->uname)) {
             continue;
         }
 
         PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc, operations ? "Operations" : "Migration Summary");
 
         print_node_history(out, data_set, node, node_state, operations, mon_ops,
                            only_node, only_rsc);
     }
 
     PCMK__OUTPUT_LIST_FOOTER(out, rc);
     return rc;
 }
 
 /*!
  * \internal
  * \brief Print all tickets.
  *
  * \param[in] out      The output functions structure.
  * \param[in] data_set Cluster state to display.
  */
 static int
 print_cluster_tickets(pcmk__output_t *out, pe_working_set_t * data_set,
                       gboolean print_spacer)
 {
     GHashTableIter iter;
     gpointer key, value;
 
     if (g_hash_table_size(data_set->tickets) == 0) {
         return pcmk_rc_no_output;
     }
 
     PCMK__OUTPUT_SPACER_IF(out, print_spacer);
 
     /* Print section heading */
     out->begin_list(out, NULL, NULL, "Tickets");
 
     /* Print each ticket */
     g_hash_table_iter_init(&iter, data_set->tickets);
     while (g_hash_table_iter_next(&iter, &key, &value)) {
         pe_ticket_t *ticket = (pe_ticket_t *) value;
         out->message(out, "ticket", ticket);
     }
 
     /* Close section */
     out->end_list(out);
     return pcmk_rc_ok;
 }
 
 /*!
  * \internal
  * \brief Print section for negative location constraints
  *
  * \param[in] out      The output functions structure.
  * \param[in] data_set Cluster state to display.
  * \param[in] mon_ops  Bitmask of mon_op_*.
  * \param[in] prefix   ID prefix to filter results by.
  */
 static int
 print_neg_locations(pcmk__output_t *out, pe_working_set_t *data_set,
                     unsigned int mon_ops, const char *prefix, GListPtr only_rsc,
                     gboolean print_spacer)
 {
     GListPtr gIter, gIter2;
     int rc = pcmk_rc_no_output;
 
     /* Print each ban */
     for (gIter = data_set->placement_constraints; gIter != NULL; gIter = gIter->next) {
         pe__location_t *location = gIter->data;
 
         if (prefix != NULL && !g_str_has_prefix(location->id, prefix))
             continue;
 
         if (!pcmk__str_in_list(only_rsc, rsc_printable_id(location->rsc_lh)) &&
             !pcmk__str_in_list(only_rsc, rsc_printable_id(uber_parent(location->rsc_lh)))) {
             continue;
         }
 
         for (gIter2 = location->node_list_rh; gIter2 != NULL; gIter2 = gIter2->next) {
             pe_node_t *node = (pe_node_t *) gIter2->data;
 
             if (node->weight < 0) {
                 PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc, "Negative Location Constraints");
                 out->message(out, "ban", node, location,
                              pcmk_is_set(mon_ops, mon_op_print_clone_detail));
             }
         }
     }
 
     PCMK__OUTPUT_LIST_FOOTER(out, rc);
     return rc;
 }
 
 /*!
  * \internal
  * \brief Print node attributes section
  *
  * \param[in] out      The output functions structure.
  * \param[in] data_set Cluster state to display.
  * \param[in] mon_ops  Bitmask of mon_op_*.
  */
 static int
 print_node_attributes(pcmk__output_t *out, pe_working_set_t *data_set,
                       unsigned int mon_ops, GListPtr only_node,
                       GListPtr only_rsc, gboolean print_spacer)
 {
     GListPtr gIter = NULL;
     int rc = pcmk_rc_no_output;
 
     /* Unpack all resource parameters (it would be more efficient to do this
      * only when needed for the first time in add_extra_info())
      */
     for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
         crm_mon_get_parameters(gIter->data, data_set);
     }
 
     /* Display each node's attributes */
     for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
         struct mon_attr_data data;
 
         data.out = out;
         data.node = (pe_node_t *) gIter->data;
 
         if (data.node && data.node->details && data.node->details->online) {
             GList *attr_list = NULL;
             GHashTableIter iter;
             gpointer key, value;
 
             g_hash_table_iter_init(&iter, data.node->details->attrs);
             while (g_hash_table_iter_next (&iter, &key, &value)) {
                 attr_list = append_attr_list(attr_list, key);
             }
 
             if (attr_list == NULL) {
                 continue;
             }
 
             if (!pcmk__str_in_list(only_node, data.node->details->uname)) {
                 continue;
             }
 
             PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc, "Node Attributes");
 
             out->message(out, "node", data.node, get_resource_display_options(mon_ops),
                          FALSE, NULL,
                          pcmk_is_set(mon_ops, mon_op_print_clone_detail),
                          pcmk_is_set(mon_ops, mon_op_print_brief),
                          pcmk_is_set(mon_ops, mon_op_group_by_node),
                          only_node, only_rsc);
             g_list_foreach(attr_list, print_node_attribute, &data);
             g_list_free(attr_list);
             out->end_list(out);
         }
     }
 
     PCMK__OUTPUT_LIST_FOOTER(out, rc);
     return rc;
 }
 
 /*!
  * \internal
  * \brief Print a section for failed actions
  *
  * \param[in] out      The output functions structure.
  * \param[in] data_set Cluster state to display.
  */
 static int
 print_failed_actions(pcmk__output_t *out, pe_working_set_t *data_set,
                      GListPtr only_node, GListPtr only_rsc, gboolean print_spacer)
 {
     xmlNode *xml_op = NULL;
     int rc = pcmk_rc_no_output;
 
     const char *id = NULL;
 
     if (xmlChildElementCount(data_set->failed) == 0) {
         return rc;
     }
 
     for (xml_op = pcmk__xml_first_child(data_set->failed); xml_op != NULL;
          xml_op = pcmk__xml_next(xml_op)) {
         char *rsc = NULL;
 
         if (!pcmk__str_in_list(only_node, crm_element_value(xml_op, XML_ATTR_UNAME))) {
             continue;
         }
 
         id = crm_element_value(xml_op, XML_LRM_ATTR_TASK_KEY);
         if (parse_op_key(id ? id : ID(xml_op), &rsc, NULL, NULL) == FALSE) {
             continue;
         }
 
         if (!pcmk__str_in_list(only_rsc, rsc)) {
             free(rsc);
             continue;
         }
 
         free(rsc);
 
         PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc, "Failed Resource Actions");
         out->message(out, "failed-action", xml_op);
     }
 
     PCMK__OUTPUT_LIST_FOOTER(out, rc);
     return rc;
 }
 
 #define CHECK_RC(retcode, retval)   \
     if (retval == pcmk_rc_ok) {     \
         retcode = pcmk_rc_ok;       \
     }
 
 /*!
  * \internal
  * \brief Top-level printing function for text/curses output.
  *
  * \param[in] out             The output functions structure.
  * \param[in] data_set        Cluster state to display.
  * \param[in] stonith_history List of stonith actions.
  * \param[in] mon_ops         Bitmask of mon_op_*.
  * \param[in] show            Bitmask of mon_show_*.
  * \param[in] prefix          ID prefix to filter results by.
  */
 void
 print_status(pcmk__output_t *out, pe_working_set_t *data_set,
              stonith_history_t *stonith_history, unsigned int mon_ops,
              unsigned int show, char *prefix, char *only_node, char *only_rsc)
 {
     GListPtr unames = NULL;
     GListPtr resources = NULL;
 
     unsigned int print_opts = get_resource_display_options(mon_ops);
     int rc = pcmk_rc_no_output;
 
     CHECK_RC(rc, out->message(out, "cluster-summary", data_set,
                               pcmk_is_set(mon_ops, mon_op_print_clone_detail),
                               pcmk_is_set(show, mon_show_stack),
                               pcmk_is_set(show, mon_show_dc),
                               pcmk_is_set(show, mon_show_times),
                               pcmk_is_set(show, mon_show_counts),
                               pcmk_is_set(show, mon_show_options)));
 
     unames = build_uname_list(data_set, only_node);
     resources = build_rsc_list(data_set, only_rsc);
 
     if (pcmk_is_set(show, mon_show_nodes) && unames) {
         PCMK__OUTPUT_SPACER_IF(out, rc == pcmk_rc_ok);
         CHECK_RC(rc, out->message(out, "node-list", data_set->nodes, unames,
                                   resources, print_opts,
                                   pcmk_is_set(mon_ops, mon_op_print_clone_detail),
                                   pcmk_is_set(mon_ops, mon_op_print_brief),
                                   pcmk_is_set(mon_ops, mon_op_group_by_node)));
     }
 
     /* Print resources section, if needed */
     if (pcmk_is_set(show, mon_show_resources)) {
         CHECK_RC(rc, out->message(out, "resource-list", data_set, print_opts,
                                   pcmk_is_set(mon_ops, mon_op_group_by_node),
                                   pcmk_is_set(mon_ops, mon_op_inactive_resources),
                                   pcmk_is_set(mon_ops, mon_op_print_brief), TRUE, unames,
                                   resources, rc == pcmk_rc_ok));
     }
 
     /* print Node Attributes section if requested */
     if (pcmk_is_set(show, mon_show_attributes)) {
         CHECK_RC(rc, print_node_attributes(out, data_set, mon_ops, unames, resources,
                                            rc == pcmk_rc_ok));
     }
 
     /* If requested, print resource operations (which includes failcounts)
      * or just failcounts
      */
     if (pcmk_is_set(show, mon_show_operations)
         || pcmk_is_set(show, mon_show_failcounts)) {
 
         CHECK_RC(rc, print_node_summary(out, data_set,
                                         pcmk_is_set(show, mon_show_operations),
                                         mon_ops, unames, resources,
                                         (rc == pcmk_rc_ok)));
     }
 
     /* If there were any failed actions, print them */
     if (pcmk_is_set(show, mon_show_failures)
         && xml_has_children(data_set->failed)) {
 
         CHECK_RC(rc, print_failed_actions(out, data_set, unames, resources,
                                           rc == pcmk_rc_ok));
     }
 
     /* Print failed stonith actions */
     if (pcmk_is_set(show, mon_show_fence_failed)
         && pcmk_is_set(mon_ops, mon_op_fence_history)) {
 
         stonith_history_t *hp = stonith__first_matching_event(stonith_history, stonith__event_state_eq,
                                                               GINT_TO_POINTER(st_failed));
 
         if (hp) {
-            CHECK_RC(rc, out->message(out, "failed-fencing-history", stonith_history, unames,
+            CHECK_RC(rc, out->message(out, "failed-fencing-list", stonith_history, unames,
                                       pcmk_is_set(mon_ops, mon_op_fence_full_history),
                                       rc == pcmk_rc_ok));
         }
     }
 
     /* Print tickets if requested */
     if (pcmk_is_set(show, mon_show_tickets)) {
         CHECK_RC(rc, print_cluster_tickets(out, data_set, rc == pcmk_rc_ok));
     }
 
     /* Print negative location constraints if requested */
     if (pcmk_is_set(show, mon_show_bans)) {
         CHECK_RC(rc, print_neg_locations(out, data_set, mon_ops, prefix, resources,
                                          rc == pcmk_rc_ok));
     }
 
     /* Print stonith history */
     if (pcmk_is_set(mon_ops, mon_op_fence_history)) {
         if (pcmk_is_set(show, mon_show_fence_worked)) {
             stonith_history_t *hp = stonith__first_matching_event(stonith_history, stonith__event_state_neq,
                                                                   GINT_TO_POINTER(st_failed));
 
             if (hp) {
-                CHECK_RC(rc, out->message(out, "fencing-history", hp, unames,
+                CHECK_RC(rc, out->message(out, "fencing-list", hp, unames,
                                           pcmk_is_set(mon_ops, mon_op_fence_full_history),
                                           rc == pcmk_rc_ok));
             }
         } else if (pcmk_is_set(show, mon_show_fence_pending)) {
             stonith_history_t *hp = stonith__first_matching_event(stonith_history, stonith__event_state_pending, NULL);
 
             if (hp) {
-                CHECK_RC(rc, out->message(out, "pending-fencing-actions", hp, unames,
+                CHECK_RC(rc, out->message(out, "pending-fencing-list", hp, unames,
                                           pcmk_is_set(mon_ops, mon_op_fence_full_history),
                                           rc == pcmk_rc_ok));
             }
         }
     }
 
     g_list_free_full(unames, free);
 }
 
 /*!
  * \internal
  * \brief Top-level printing function for XML output.
  *
  * \param[in] out             The output functions structure.
  * \param[in] data_set        Cluster state to display.
  * \param[in] stonith_history List of stonith actions.
  * \param[in] mon_ops         Bitmask of mon_op_*.
  * \param[in] show            Bitmask of mon_show_*.
  * \param[in] prefix          ID prefix to filter results by.
  */
 void
 print_xml_status(pcmk__output_t *out, pe_working_set_t *data_set,
                  crm_exit_t history_rc, stonith_history_t *stonith_history,
                  unsigned int mon_ops, unsigned int show, char *prefix,
                  char *only_node, char *only_rsc)
 {
     GListPtr unames = NULL;
     GListPtr resources = NULL;
     unsigned int print_opts = get_resource_display_options(mon_ops);
 
     out->message(out, "cluster-summary", data_set,
                  pcmk_is_set(mon_ops, mon_op_print_clone_detail),
                  pcmk_is_set(show, mon_show_stack),
                  pcmk_is_set(show, mon_show_dc),
                  pcmk_is_set(show, mon_show_times),
                  pcmk_is_set(show, mon_show_counts),
                  pcmk_is_set(show, mon_show_options));
 
     unames = build_uname_list(data_set, only_node);
     resources = build_rsc_list(data_set, only_rsc);
 
     /*** NODES ***/
     if (pcmk_is_set(show, mon_show_nodes)) {
         out->message(out, "node-list", data_set->nodes, unames,
                      resources, print_opts,
                      pcmk_is_set(mon_ops, mon_op_print_clone_detail),
                      pcmk_is_set(mon_ops, mon_op_print_brief),
                      pcmk_is_set(mon_ops, mon_op_group_by_node));
     }
 
     /* Print resources section, if needed */
     if (pcmk_is_set(show, mon_show_resources)) {
         out->message(out, "resource-list", data_set, print_opts,
                      pcmk_is_set(mon_ops, mon_op_group_by_node),
                      pcmk_is_set(mon_ops, mon_op_inactive_resources),
                      FALSE, FALSE, unames, resources, FALSE);
     }
 
     /* print Node Attributes section if requested */
     if (pcmk_is_set(show, mon_show_attributes)) {
         print_node_attributes(out, data_set, mon_ops, unames, resources, FALSE);
     }
 
     /* If requested, print resource operations (which includes failcounts)
      * or just failcounts
      */
     if (pcmk_is_set(show, mon_show_operations)
         || pcmk_is_set(show, mon_show_failcounts)) {
 
         print_node_summary(out, data_set,
                            pcmk_is_set(show, mon_show_operations),
                            mon_ops, unames, resources, FALSE);
     }
 
     /* If there were any failed actions, print them */
     if (pcmk_is_set(show, mon_show_failures)
         && xml_has_children(data_set->failed)) {
 
         print_failed_actions(out, data_set, unames, resources, FALSE);
     }
 
     /* Print stonith history */
     if (pcmk_is_set(show, mon_show_fencing_all)
         && pcmk_is_set(mon_ops, mon_op_fence_history)) {
 
-        out->message(out, "full-fencing-history", history_rc, stonith_history,
+        out->message(out, "full-fencing-list", history_rc, stonith_history,
                      unames, pcmk_is_set(mon_ops, mon_op_fence_full_history),
                      FALSE);
     }
 
     /* Print tickets if requested */
     if (pcmk_is_set(show, mon_show_tickets)) {
         print_cluster_tickets(out, data_set, FALSE);
     }
 
     /* Print negative location constraints if requested */
     if (pcmk_is_set(show, mon_show_bans)) {
         print_neg_locations(out, data_set, mon_ops, prefix, resources, FALSE);
     }
 
     g_list_free_full(unames, free);
     g_list_free_full(resources, free);
 }
 
 /*!
  * \internal
  * \brief Top-level printing function for HTML output.
  *
  * \param[in] out             The output functions structure.
  * \param[in] data_set        Cluster state to display.
  * \param[in] stonith_history List of stonith actions.
  * \param[in] mon_ops         Bitmask of mon_op_*.
  * \param[in] show            Bitmask of mon_show_*.
  * \param[in] prefix          ID prefix to filter results by.
  */
 int
 print_html_status(pcmk__output_t *out, pe_working_set_t *data_set,
                   stonith_history_t *stonith_history, unsigned int mon_ops,
                   unsigned int show, char *prefix, char *only_node,
                   char *only_rsc)
 {
     GListPtr unames = NULL;
     GListPtr resources = NULL;
 
     unsigned int print_opts = get_resource_display_options(mon_ops);
 
     out->message(out, "cluster-summary", data_set,
                  pcmk_is_set(mon_ops, mon_op_print_clone_detail),
                  pcmk_is_set(show, mon_show_stack),
                  pcmk_is_set(show, mon_show_dc),
                  pcmk_is_set(show, mon_show_times),
                  pcmk_is_set(show, mon_show_counts),
                  pcmk_is_set(show, mon_show_options));
 
     unames = build_uname_list(data_set, only_node);
     resources = build_rsc_list(data_set, only_rsc);
 
     /*** NODE LIST ***/
     if (pcmk_is_set(show, mon_show_nodes) && unames) {
         out->message(out, "node-list", data_set->nodes, unames,
                      resources, print_opts,
                      pcmk_is_set(mon_ops, mon_op_print_clone_detail),
                      pcmk_is_set(mon_ops, mon_op_print_brief),
                      pcmk_is_set(mon_ops, mon_op_group_by_node));
     }
 
     /* Print resources section, if needed */
     if (pcmk_is_set(show, mon_show_resources)) {
         out->message(out, "resource-list", data_set, print_opts,
                      pcmk_is_set(mon_ops, mon_op_group_by_node),
                      pcmk_is_set(mon_ops, mon_op_inactive_resources),
                      pcmk_is_set(mon_ops, mon_op_print_brief), TRUE, unames,
                      resources, FALSE);
     }
 
     /* print Node Attributes section if requested */
     if (pcmk_is_set(show, mon_show_attributes)) {
         print_node_attributes(out, data_set, mon_ops, unames, resources, FALSE);
     }
 
     /* If requested, print resource operations (which includes failcounts)
      * or just failcounts
      */
     if (pcmk_is_set(show, mon_show_operations)
         || pcmk_is_set(show, mon_show_failcounts)) {
 
         print_node_summary(out, data_set,
                            pcmk_is_set(show, mon_show_operations),
                            mon_ops, unames, resources, FALSE);
     }
 
     /* If there were any failed actions, print them */
     if (pcmk_is_set(show, mon_show_failures)
         && xml_has_children(data_set->failed)) {
 
         print_failed_actions(out, data_set, unames, resources, FALSE);
     }
 
     /* Print failed stonith actions */
     if (pcmk_is_set(show, mon_show_fence_failed)
         && pcmk_is_set(mon_ops, mon_op_fence_history)) {
 
         stonith_history_t *hp = stonith__first_matching_event(stonith_history, stonith__event_state_eq,
                                                               GINT_TO_POINTER(st_failed));
 
         if (hp) {
-            out->message(out, "failed-fencing-history", stonith_history, unames,
+            out->message(out, "failed-fencing-list", stonith_history, unames,
                          pcmk_is_set(mon_ops, mon_op_fence_full_history), FALSE);
         }
     }
 
     /* Print stonith history */
     if (pcmk_is_set(mon_ops, mon_op_fence_history)) {
         if (pcmk_is_set(show, mon_show_fence_worked)) {
             stonith_history_t *hp = stonith__first_matching_event(stonith_history, stonith__event_state_neq,
                                                                   GINT_TO_POINTER(st_failed));
 
             if (hp) {
-                out->message(out, "fencing-history", hp, unames,
+                out->message(out, "fencing-list", hp, unames,
                              pcmk_is_set(mon_ops, mon_op_fence_full_history),
                              FALSE);
             }
         } else if (pcmk_is_set(show, mon_show_fence_pending)) {
             stonith_history_t *hp = stonith__first_matching_event(stonith_history, stonith__event_state_pending, NULL);
 
             if (hp) {
-                out->message(out, "pending-fencing-actions", hp, unames,
+                out->message(out, "pending-fencing-list", hp, unames,
                              pcmk_is_set(mon_ops, mon_op_fence_full_history),
                              FALSE);
             }
         }
     }
 
     /* Print tickets if requested */
     if (pcmk_is_set(show, mon_show_tickets)) {
         print_cluster_tickets(out, data_set, FALSE);
     }
 
     /* Print negative location constraints if requested */
     if (pcmk_is_set(show, mon_show_bans)) {
         print_neg_locations(out, data_set, mon_ops, prefix, resources, FALSE);
     }
 
     g_list_free_full(unames, free);
     g_list_free_full(resources, free);
     return 0;
 }
diff --git a/tools/crm_resource.c b/tools/crm_resource.c
index 2c62ff6d1c..95c72fcaab 100644
--- a/tools/crm_resource.c
+++ b/tools/crm_resource.c
@@ -1,2037 +1,2037 @@
 /*
  * Copyright 2004-2020 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_resource.h>
 #include <crm/lrmd_internal.h>
 #include <crm/common/cmdline_internal.h>
 #include <crm/common/lists_internal.h>
 #include <pacemaker-internal.h>
 
 #include <sys/param.h>
 #include <stdio.h>
 #include <sys/types.h>
 #include <unistd.h>
 #include <stdlib.h>
 #include <errno.h>
 #include <fcntl.h>
 #include <libgen.h>
 #include <time.h>
 
 #include <crm/crm.h>
 #include <crm/stonith-ng.h>
 #include <crm/common/ipc_controld.h>
 #include <crm/cib/internal.h>
 
 #define SUMMARY "crm_resource - perform tasks related to Pacemaker cluster resources"
 
 enum rsc_command {
     cmd_none = 0,           // No command option given (yet)
     cmd_ban,
     cmd_cleanup,
     cmd_clear,
     cmd_colocations,
     cmd_colocations_deep,
     cmd_cts,
     cmd_delete,
     cmd_delete_param,
     cmd_execute_agent,
     cmd_fail,
     cmd_get_param,
     cmd_get_property,
     cmd_list_active_ops,
     cmd_list_agents,
     cmd_list_all_ops,
     cmd_list_alternatives,
     cmd_list_instances,
     cmd_list_providers,
     cmd_list_resources,
     cmd_list_standards,
     cmd_locate,
     cmd_metadata,
     cmd_move,
     cmd_query_raw_xml,
     cmd_query_xml,
     cmd_refresh,
     cmd_restart,
     cmd_set_param,
     cmd_set_property,
     cmd_wait,
     cmd_why,
 };
 
 struct {
     enum rsc_command rsc_cmd;   // The crm_resource command to perform
     const char *attr_set_type;
     int cib_options;
     gboolean clear_expired;
     int find_flags;             /* Flags to use when searching for resource */
     gboolean force;
     gchar *host_uname;
     gchar *interval_spec;
     gchar *move_lifetime;
     gchar *operation;
     GHashTable *override_params;
     gchar *prop_id;
     char *prop_name;
     gchar *prop_set;
     gchar *prop_value;
     gboolean recursive;
     gchar **remainder;
     gboolean require_cib;           // Whether command requires CIB connection
     gboolean require_crmd;          /* whether command requires controller connection */
     gboolean require_dataset;       /* whether command requires populated dataset instance */
     gboolean require_resource;      /* whether command requires that resource be specified */
     int resource_verbose;
     gchar *rsc_id;
     gchar *rsc_type;
     gboolean promoted_role_only;
     int timeout_ms;
     char *agent_spec;               // Standard and/or provider and/or agent
     char *v_agent;
     char *v_class;
     char *v_provider;
     gboolean validate_cmdline;      /* whether we are just validating based on command line options */
     GHashTable *validate_options;
     gchar *xml_file;
 } options = {
     .attr_set_type = XML_TAG_ATTR_SETS,
     .cib_options = cib_sync_call,
     .require_cib = TRUE,
     .require_dataset = TRUE,
     .require_resource = TRUE,
 };
 
 #if 0
 // @COMPAT @TODO enable this at next backward compatibility break
 #define SET_COMMAND(cmd) do {                                               \
         if (options.rsc_cmd != cmd_none) {                                  \
             g_set_error(error, PCMK__EXITC_ERROR, CRM_EX_USAGE,             \
                         "Only one command option may be specified");        \
             return FALSE;                                                   \
         }                                                                   \
         options.rsc_cmd = (cmd);                                            \
     } while (0)
 #else
 #define SET_COMMAND(cmd) do { options.rsc_cmd = (cmd); } while (0)
 #endif
 
 gboolean agent_provider_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
 gboolean attr_set_type_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
 gboolean class_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
 gboolean cleanup_refresh_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
 gboolean delete_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
 gboolean expired_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
 gboolean list_agents_cb(const gchar *option_name, const gchar *optarg,
                         gpointer data, GError **error);
 gboolean list_providers_cb(const gchar *option_name, const gchar *optarg,
                            gpointer data, GError **error);
 gboolean list_standards_cb(const gchar *option_name, const gchar *optarg,
                            gpointer data, GError **error);
 gboolean list_alternatives_cb(const gchar *option_name, const gchar *optarg,
                               gpointer data, GError **error);
 gboolean metadata_cb(const gchar *option_name, const gchar *optarg,
                      gpointer data, GError **error);
 gboolean option_cb(const gchar *option_name, const gchar *optarg,
                    gpointer data, GError **error);
 gboolean fail_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
 gboolean flag_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
 gboolean get_param_prop_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
 gboolean list_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
 gboolean set_delete_param_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
 gboolean set_prop_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
 gboolean timeout_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
 gboolean validate_or_force_cb(const gchar *option_name, const gchar *optarg,
                               gpointer data, GError **error);
 gboolean restart_cb(const gchar *option_name, const gchar *optarg,
                     gpointer data, GError **error);
 gboolean wait_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
 gboolean why_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
 
 static crm_exit_t exit_code = CRM_EX_OK;
 static pcmk__output_t *out = NULL;
 
 // Things that should be cleaned up on exit
 static GError *error = NULL;
 static GMainLoop *mainloop = NULL;
 static cib_t *cib_conn = NULL;
 static pcmk_ipc_api_t *controld_api = NULL;
 static pe_working_set_t *data_set = NULL;
 
 #define MESSAGE_TIMEOUT_S 60
 
 #define INDENT "                                    "
 
 static pcmk__supported_format_t formats[] = {
     PCMK__SUPPORTED_FORMAT_NONE,
     PCMK__SUPPORTED_FORMAT_TEXT,
     PCMK__SUPPORTED_FORMAT_XML,
     { NULL, NULL, NULL }
 };
 
 // Clean up and exit
 static crm_exit_t
 bye(crm_exit_t ec)
 {
     if (error != NULL) {
         if (out != NULL) {
             out->err(out, "%s: %s", g_get_prgname(), error->message);
         } else {
             fprintf(stderr, "%s: %s\n", g_get_prgname(), error->message);
         }
 
         g_clear_error(&error);
     }
 
     if (out != NULL) {
         out->finish(out, ec, true, NULL);
         pcmk__output_free(out);
     }
 
     if (cib_conn != NULL) {
         cib_t *save_cib_conn = cib_conn;
 
         cib_conn = NULL; // Ensure we can't free this twice
         save_cib_conn->cmds->signoff(save_cib_conn);
         cib_delete(save_cib_conn);
     }
     if (controld_api != NULL) {
         pcmk_ipc_api_t *save_controld_api = controld_api;
 
         controld_api = NULL; // Ensure we can't free this twice
         pcmk_free_ipc_api(save_controld_api);
     }
     if (mainloop != NULL) {
         g_main_loop_unref(mainloop);
         mainloop = NULL;
     }
     pe_free_working_set(data_set);
     data_set = NULL;
     crm_exit(ec);
     return ec;
 }
 
 static void
 quit_main_loop(crm_exit_t ec)
 {
     exit_code = ec;
     if (mainloop != NULL) {
         GMainLoop *mloop = mainloop;
 
         mainloop = NULL; // Don't re-enter this block
         pcmk_quit_main_loop(mloop, 10);
         g_main_loop_unref(mloop);
     }
 }
 
 static gboolean
 resource_ipc_timeout(gpointer data)
 {
     // Start with newline because "Waiting for ..." message doesn't have one
     if (error != NULL) {
         g_clear_error(&error);
     }
 
     g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_TIMEOUT,
                 "Aborting because no messages received in %d seconds", MESSAGE_TIMEOUT_S);
 
     quit_main_loop(CRM_EX_TIMEOUT);
     return FALSE;
 }
 
 static void
 controller_event_callback(pcmk_ipc_api_t *api, enum pcmk_ipc_event event_type,
                           crm_exit_t status, void *event_data, void *user_data)
 {
     switch (event_type) {
         case pcmk_ipc_event_disconnect:
             if (exit_code == CRM_EX_DISCONNECT) { // Unexpected
                 crm_info("Connection to controller was terminated");
             }
             quit_main_loop(exit_code);
             break;
 
         case pcmk_ipc_event_reply:
             if (status != CRM_EX_OK) {
                 out->err(out, "Error: bad reply from controller: %s",
                          crm_exit_str(status));
                 pcmk_disconnect_ipc(api);
                 quit_main_loop(status);
             } else {
                 if ((pcmk_controld_api_replies_expected(api) == 0)
                     && mainloop && g_main_loop_is_running(mainloop)) {
                     out->info(out, "... got reply (done)");
                     crm_debug("Got all the replies we expected");
                     pcmk_disconnect_ipc(api);
                     quit_main_loop(CRM_EX_OK);
                 } else {
                     out->info(out, "... got reply");
                 }
             }
             break;
 
         default:
             break;
     }
 }
 
 static void
 start_mainloop(pcmk_ipc_api_t *capi)
 {
     unsigned int count = pcmk_controld_api_replies_expected(capi);
 
     if (count > 0) {
         out->info(out, "Waiting for %d %s from the controller",
                   count, pcmk__plural_alt(count, "reply", "replies"));
         exit_code = CRM_EX_DISCONNECT; // For unexpected disconnects
         mainloop = g_main_loop_new(NULL, FALSE);
         g_timeout_add(MESSAGE_TIMEOUT_S * 1000, resource_ipc_timeout, NULL);
         g_main_loop_run(mainloop);
     }
 }
 
 static int
 compare_id(gconstpointer a, gconstpointer b)
 {
     return strcmp((const char *)a, (const char *)b);
 }
 
 static GListPtr
 build_constraint_list(xmlNode *root)
 {
     GListPtr retval = NULL;
     xmlNode *cib_constraints = NULL;
     xmlXPathObjectPtr xpathObj = NULL;
     int ndx = 0;
 
     cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, root);
     xpathObj = xpath_search(cib_constraints, "//" XML_CONS_TAG_RSC_LOCATION);
 
     for (ndx = 0; ndx < numXpathResults(xpathObj); ndx++) {
         xmlNode *match = getXpathResult(xpathObj, ndx);
         retval = g_list_insert_sorted(retval, (gpointer) ID(match), compare_id);
     }
 
     freeXpathObject(xpathObj);
     return retval;
 }
 
 /* short option letters still available: eEJkKXyYZ */
 
 static GOptionEntry query_entries[] = {
     { "list", 'L', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, list_cb,
       "List all cluster resources with status",
       NULL },
     { "list-raw", 'l', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, list_cb,
       "List IDs of all instantiated resources (individual members\n"
       INDENT "rather than groups etc.)",
       NULL },
     { "list-cts", 'c', G_OPTION_FLAG_HIDDEN|G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, list_cb,
       NULL,
       NULL },
     { "list-operations", 'O', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, list_cb,
       "List active resource operations, optionally filtered by\n"
       INDENT "--resource and/or --node",
       NULL },
     { "list-all-operations", 'o', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, list_cb,
       "List all resource operations, optionally filtered by\n"
       INDENT "--resource and/or --node",
       NULL },
     { "list-standards", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
       list_standards_cb,
       "List supported standards",
       NULL },
     { "list-ocf-providers", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
       list_providers_cb,
       "List all available OCF providers",
       NULL },
     { "list-agents", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK,
       list_agents_cb,
       "List all agents available for the named standard and/or provider",
       "STD/PROV" },
     { "list-ocf-alternatives", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK,
       list_alternatives_cb,
       "List all available providers for the named OCF agent",
       "AGENT" },
     { "show-metadata", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK,
       metadata_cb,
       "Show the metadata for the named class:provider:agent",
       "SPEC" },
     { "query-xml", 'q', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb,
       "Show XML configuration of resource (after any template expansion)",
       NULL },
     { "query-xml-raw", 'w', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb,
       "Show XML configuration of resource (before any template expansion)",
       NULL },
     { "get-parameter", 'g', G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, get_param_prop_cb,
       "Display named parameter for resource (use instance attribute\n"
       INDENT "unless --meta or --utilization is specified)",
       "PARAM" },
     { "get-property", 'G', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_CALLBACK, get_param_prop_cb,
       "Display named property of resource ('class', 'type', or 'provider') "
       "(requires --resource)",
       "PROPERTY" },
     { "locate", 'W', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb,
       "Show node(s) currently running resource",
       NULL },
     { "stack", 'A', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb,
       "Display the (co)location constraints that apply to a resource\n"
       INDENT "and the resources is it colocated with",
       NULL },
     { "constraints", 'a', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb,
       "Display the (co)location constraints that apply to a resource",
       NULL },
     { "why", 'Y', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, why_cb,
       "Show why resources are not running, optionally filtered by\n"
       INDENT "--resource and/or --node",
       NULL },
 
     { NULL }
 };
 
 static GOptionEntry command_entries[] = {
     { "validate", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
       validate_or_force_cb,
       "Validate resource configuration by calling agent's validate-all\n"
       INDENT "action. The configuration may be specified either by giving an\n"
       INDENT "existing resource name with -r, or by specifying --class,\n"
       INDENT "--agent, and --provider arguments, along with any number of\n"
       INDENT "--option arguments.",
       NULL },
     { "cleanup", 'C', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, cleanup_refresh_cb,
       "If resource has any past failures, clear its history and fail\n"
       INDENT "count. Optionally filtered by --resource, --node, --operation\n"
       INDENT "and --interval (otherwise all). --operation and --interval\n"
       INDENT "apply to fail counts, but entire history is always clear, to\n"
       INDENT "allow current state to be rechecked. If the named resource is\n"
       INDENT "part of a group, or one numbered instance of a clone or bundled\n"
       INDENT "resource, the clean-up applies to the whole collective resource\n"
       INDENT "unless --force is given.",
       NULL },
     { "refresh", 'R', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, cleanup_refresh_cb,
       "Delete resource's history (including failures) so its current state\n"
       INDENT "is rechecked. Optionally filtered by --resource and --node\n"
       INDENT "(otherwise all). If the named resource is part of a group, or one\n"
       INDENT "numbered instance of a clone or bundled resource, the refresh\n"
       INDENT "applies to the whole collective resource unless --force is given.",
       NULL },
     { "set-parameter", 'p', G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, set_delete_param_cb,
       "Set named parameter for resource (requires -v). Use instance\n"
       INDENT "attribute unless --meta or --utilization is specified.",
       "PARAM" },
     { "delete-parameter", 'd', G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, set_delete_param_cb,
       "Delete named parameter for resource. Use instance attribute\n"
       INDENT "unless --meta or --utilization is specified.",
       "PARAM" },
     { "set-property", 'S', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_CALLBACK, set_prop_cb,
       "Set named property of resource ('class', 'type', or 'provider') "
       "(requires -r, -t, -v)",
       "PROPERTY" },
 
     { NULL }
 };
 
 static GOptionEntry location_entries[] = {
     { "move", 'M', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb,
       "Create a constraint to move resource. If --node is specified,\n"
       INDENT "the constraint will be to move to that node, otherwise it\n"
       INDENT "will be to ban the current node. Unless --force is specified\n"
       INDENT "this will return an error if the resource is already running\n"
       INDENT "on the specified node. If --force is specified, this will\n"
       INDENT "always ban the current node.\n"
       INDENT "Optional: --lifetime, --master. NOTE: This may prevent the\n"
       INDENT "resource from running on its previous location until the\n"
       INDENT "implicit constraint expires or is removed with --clear.",
       NULL },
     { "ban", 'B', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb,
       "Create a constraint to keep resource off a node.\n"
       INDENT "Optional: --node, --lifetime, --master.\n"
       INDENT "NOTE: This will prevent the resource from running on the\n"
       INDENT "affected node until the implicit constraint expires or is\n"
       INDENT "removed with --clear. If --node is not specified, it defaults\n"
       INDENT "to the node currently running the resource for primitives\n"
       INDENT "and groups, or the master for promotable clones with\n"
       INDENT "promoted-max=1 (all other situations result in an error as\n"
       INDENT "there is no sane default).",
       NULL },
     { "clear", 'U', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb,
       "Remove all constraints created by the --ban and/or --move\n"
       INDENT "commands. Requires: --resource. Optional: --node, --master,\n"
       INDENT "--expired. If --node is not specified, all constraints created\n"
       INDENT "by --ban and --move will be removed for the named resource. If\n"
       INDENT "--node and --force are specified, any constraint created by\n"
       INDENT "--move will be cleared, even if it is not for the specified\n"
       INDENT "node. If --expired is specified, only those constraints whose\n"
       INDENT "lifetimes have expired will be removed.",
       NULL },
     { "expired", 'e', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, expired_cb,
       "Modifies the --clear argument to remove constraints with\n"
       INDENT "expired lifetimes.",
       NULL },
     { "lifetime", 'u', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.move_lifetime,
       "Lifespan (as ISO 8601 duration) of created constraints (with\n"
       INDENT "-B, -M) see https://en.wikipedia.org/wiki/ISO_8601#Durations)",
       "TIMESPEC" },
     { "master", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &options.promoted_role_only,
       "Limit scope of command to Master role (with -B, -M, -U). For\n"
       INDENT "-B and -M the previous master may remain active in the Slave role.",
       NULL },
 
     { NULL }
 };
 
 static GOptionEntry advanced_entries[] = {
     { "delete", 'D', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, delete_cb,
       "(Advanced) Delete a resource from the CIB. Required: -t",
       NULL },
     { "fail", 'F', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, fail_cb,
       "(Advanced) Tell the cluster this resource has failed",
       NULL },
     { "restart", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, restart_cb,
       "(Advanced) Tell the cluster to restart this resource and\n"
       INDENT "anything that depends on it",
       NULL },
     { "wait", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, wait_cb,
       "(Advanced) Wait until the cluster settles into a stable state",
       NULL },
     { "force-demote", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
       validate_or_force_cb,
       "(Advanced) Bypass the cluster and demote a resource on the local\n"
       INDENT "node. Unless --force is specified, this will refuse to do so if\n"
       INDENT "the cluster believes the resource is a clone instance already\n"
       INDENT "running on the local node.",
       NULL },
     { "force-stop", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
       validate_or_force_cb,
       "(Advanced) Bypass the cluster and stop a resource on the local node",
       NULL },
     { "force-start", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
       validate_or_force_cb,
       "(Advanced) Bypass the cluster and start a resource on the local\n"
       INDENT "node. Unless --force is specified, this will refuse to do so if\n"
       INDENT "the cluster believes the resource is a clone instance already\n"
       INDENT "running on the local node.",
       NULL },
     { "force-promote", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
       validate_or_force_cb,
       "(Advanced) Bypass the cluster and promote a resource on the local\n"
       INDENT "node. Unless --force is specified, this will refuse to do so if\n"
       INDENT "the cluster believes the resource is a clone instance already\n"
       INDENT "running on the local node.",
       NULL },
     { "force-check", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
       validate_or_force_cb,
       "(Advanced) Bypass the cluster and check the state of a resource on\n"
       INDENT "the local node",
       NULL },
 
     { NULL }
 };
 
 static GOptionEntry validate_entries[] = {
     { "class", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, class_cb,
       "The standard the resource agent confirms to (for example, ocf).\n"
       INDENT "Use with --agent, --provider, --option, and --validate.",
       "CLASS" },
     { "agent", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, agent_provider_cb,
       "The agent to use (for example, IPaddr). Use with --class,\n"
       INDENT "--provider, --option, and --validate.",
       "AGENT" },
     { "provider", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, agent_provider_cb,
       "The vendor that supplies the resource agent (for example,\n"
       INDENT "heartbeat). Use with --class, --agent, --option, and --validate.",
       "PROVIDER" },
     { "option", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, option_cb,
       "Specify a device configuration parameter as NAME=VALUE (may be\n"
       INDENT "specified multiple times). Use with --validate and without the\n"
       INDENT "-r option.",
       "PARAM" },
 
     { NULL }
 };
 
 static GOptionEntry addl_entries[] = {
     { "node", 'N', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.host_uname,
       "Node name",
       "NAME" },
     { "recursive", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &options.recursive,
       "Follow colocation chains when using --set-parameter",
       NULL },
     { "resource-type", 't', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.rsc_type,
       "Resource XML element (primitive, group, etc.) (with -D)",
       "ELEMENT" },
     { "parameter-value", 'v', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.prop_value,
       "Value to use with -p",
       "PARAM" },
     { "meta", 'm', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, attr_set_type_cb,
       "Use resource meta-attribute instead of instance attribute\n"
       INDENT "(with -p, -g, -d)",
       NULL },
     { "utilization", 'z', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, attr_set_type_cb,
       "Use resource utilization attribute instead of instance attribute\n"
       INDENT "(with -p, -g, -d)",
       NULL },
     { "operation", 'n', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.operation,
       "Operation to clear instead of all (with -C -r)",
       "OPERATION" },
     { "interval", 'I', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.interval_spec,
       "Interval of operation to clear (default 0) (with -C -r -n)",
       "N" },
     { "set-name", 's', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.prop_set,
       "(Advanced) XML ID of attributes element to use (with -p, -d)",
       "ID" },
     { "nvpair", 'i', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.prop_id,
       "(Advanced) XML ID of nvpair element to use (with -p, -d)",
       "ID" },
     { "timeout", 'T', G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, timeout_cb,
       "(Advanced) Abort if command does not finish in this time (with\n"
       INDENT "--restart, --wait, --force-*)",
       "N" },
     { "force", 'f', G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &options.force,
       "If making CIB changes, do so regardless of quorum. See help for\n"
       INDENT "individual commands for additional behavior.",
       NULL },
     { "xml-file", 'x', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_FILENAME, &options.xml_file,
       NULL,
       "FILE" },
     { "host-uname", 'H', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_STRING, &options.host_uname,
       NULL,
       "HOST" },
 
     { NULL }
 };
 
 gboolean
 agent_provider_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
     options.validate_cmdline = TRUE;
     options.require_resource = FALSE;
 
     if (pcmk__str_eq(option_name, "--provider", pcmk__str_casei)) {
         if (options.v_provider) {
            free(options.v_provider);
         }
         options.v_provider = strdup(optarg);
     } else {
         if (options.v_agent) {
            free(options.v_agent);
         }
         options.v_agent = strdup(optarg);
     }
 
     return TRUE;
 }
 
 gboolean
 attr_set_type_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
     if (pcmk__str_any_of(option_name, "-m", "--meta", NULL)) {
         options.attr_set_type = XML_TAG_META_SETS;
     } else if (pcmk__str_any_of(option_name, "-z", "--utilization", NULL)) {
         options.attr_set_type = XML_TAG_UTILIZATION;
     }
 
     return TRUE;
 }
 
 gboolean
 class_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
     if (!(pcmk_get_ra_caps(optarg) & pcmk_ra_cap_params)) {
         if (!out->is_quiet(out)) {
             g_set_error(error, G_OPTION_ERROR, CRM_EX_INVALID_PARAM,
                         "Standard %s does not support parameters\n", optarg);
         }
         return FALSE;
 
     } else {
         if (options.v_class != NULL) {
             free(options.v_class);
         }
 
         options.v_class = strdup(optarg);
     }
 
     options.validate_cmdline = TRUE;
     options.require_resource = FALSE;
     return TRUE;
 }
 
 gboolean
 cleanup_refresh_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
     if (pcmk__str_any_of(option_name, "-C", "--cleanup", NULL)) {
         SET_COMMAND(cmd_cleanup);
     } else {
         SET_COMMAND(cmd_refresh);
     }
 
     options.require_resource = FALSE;
     if (getenv("CIB_file") == NULL) {
         options.require_crmd = TRUE;
     }
     options.find_flags = pe_find_renamed|pe_find_anon;
     return TRUE;
 }
 
 gboolean
 delete_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
     options.require_dataset = FALSE;
     SET_COMMAND(cmd_delete);
     options.find_flags = pe_find_renamed|pe_find_any;
     return TRUE;
 }
 
 gboolean
 expired_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
     options.clear_expired = TRUE;
     options.require_resource = FALSE;
     return TRUE;
 }
 
 static void
 get_agent_spec(const gchar *optarg)
 {
     options.require_cib = FALSE;
     options.require_dataset = FALSE;
     options.require_resource = FALSE;
     if (options.agent_spec != NULL) {
         free(options.agent_spec);
         options.agent_spec = NULL;
     }
     if (optarg != NULL) {
         options.agent_spec = strdup(optarg);
     }
 }
 
 gboolean
 list_agents_cb(const gchar *option_name, const gchar *optarg, gpointer data,
                GError **error)
 {
     SET_COMMAND(cmd_list_agents);
     get_agent_spec(optarg);
     return TRUE;
 }
 
 gboolean
 list_providers_cb(const gchar *option_name, const gchar *optarg, gpointer data,
                   GError **error)
 {
     SET_COMMAND(cmd_list_providers);
     get_agent_spec(optarg);
     return TRUE;
 }
 
 gboolean
 list_standards_cb(const gchar *option_name, const gchar *optarg, gpointer data,
                   GError **error)
 {
     SET_COMMAND(cmd_list_standards);
     options.require_cib = FALSE;
     options.require_dataset = FALSE;
     options.require_resource = FALSE;
     return TRUE;
 }
 
 gboolean
 list_alternatives_cb(const gchar *option_name, const gchar *optarg,
                      gpointer data, GError **error)
 {
     SET_COMMAND(cmd_list_alternatives);
     get_agent_spec(optarg);
     return TRUE;
 }
 
 gboolean
 metadata_cb(const gchar *option_name, const gchar *optarg, gpointer data,
             GError **error)
 {
     SET_COMMAND(cmd_metadata);
     get_agent_spec(optarg);
     return TRUE;
 }
 
 gboolean
 option_cb(const gchar *option_name, const gchar *optarg, gpointer data,
           GError **error)
 {
     char *name = NULL;
     char *value = NULL;
 
     if (pcmk_scan_nvpair(optarg, &name, &value) != 2) {
         return FALSE;
     }
     if (options.validate_options == NULL) {
         options.validate_options = crm_str_table_new();
     }
     g_hash_table_replace(options.validate_options, name, value);
     return TRUE;
 }
 
 gboolean
 fail_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
     options.require_crmd = TRUE;
     SET_COMMAND(cmd_fail);
     return TRUE;
 }
 
 gboolean
 flag_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
     if (pcmk__str_any_of(option_name, "-U", "--clear", NULL)) {
         options.find_flags = pe_find_renamed|pe_find_anon;
         SET_COMMAND(cmd_clear);
     } else if (pcmk__str_any_of(option_name, "-B", "--ban", NULL)) {
         options.find_flags = pe_find_renamed|pe_find_anon;
         SET_COMMAND(cmd_ban);
     } else if (pcmk__str_any_of(option_name, "-M", "--move", NULL)) {
         options.find_flags = pe_find_renamed|pe_find_anon;
         SET_COMMAND(cmd_move);
     } else if (pcmk__str_any_of(option_name, "-q", "--query-xml", NULL)) {
         options.find_flags = pe_find_renamed|pe_find_any;
         SET_COMMAND(cmd_query_xml);
     } else if (pcmk__str_any_of(option_name, "-w", "--query-xml-raw", NULL)) {
         options.find_flags = pe_find_renamed|pe_find_any;
         SET_COMMAND(cmd_query_raw_xml);
     } else if (pcmk__str_any_of(option_name, "-W", "--locate", NULL)) {
         options.find_flags = pe_find_renamed|pe_find_anon;
         SET_COMMAND(cmd_locate);
     } else if (pcmk__str_any_of(option_name, "-A", "--stack", NULL)) {
         options.find_flags = pe_find_renamed|pe_find_anon;
         SET_COMMAND(cmd_colocations_deep);
     } else {
         options.find_flags = pe_find_renamed|pe_find_anon;
         SET_COMMAND(cmd_colocations);
     }
 
     return TRUE;
 }
 
 gboolean
 get_param_prop_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
     if (pcmk__str_any_of(option_name, "-g", "--get-parameter", NULL)) {
         SET_COMMAND(cmd_get_param);
     } else {
         SET_COMMAND(cmd_get_property);
     }
 
     if (options.prop_name) {
         free(options.prop_name);
     }
 
     options.prop_name = strdup(optarg);
     options.find_flags = pe_find_renamed|pe_find_any;
     return TRUE;
 }
 
 gboolean
 list_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
     if (pcmk__str_any_of(option_name, "-c", "--list-cts", NULL)) {
         SET_COMMAND(cmd_cts);
     } else if (pcmk__str_any_of(option_name, "-L", "--list", NULL)) {
         SET_COMMAND(cmd_list_resources);
     } else if (pcmk__str_any_of(option_name, "-l", "--list-raw", NULL)) {
         SET_COMMAND(cmd_list_instances);
     } else if (pcmk__str_any_of(option_name, "-O", "--list-operations", NULL)) {
         SET_COMMAND(cmd_list_active_ops);
     } else {
         SET_COMMAND(cmd_list_all_ops);
     }
 
     options.require_resource = FALSE;
     return TRUE;
 }
 
 gboolean
 set_delete_param_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
     if (pcmk__str_any_of(option_name, "-p", "--set-parameter", NULL)) {
         SET_COMMAND(cmd_set_param);
     } else {
         SET_COMMAND(cmd_delete_param);
     }
 
     if (options.prop_name) {
         free(options.prop_name);
     }
 
     options.prop_name = strdup(optarg);
     options.find_flags = pe_find_renamed|pe_find_any;
     return TRUE;
 }
 
 gboolean
 set_prop_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
     options.require_dataset = FALSE;
 
     if (options.prop_name) {
         free(options.prop_name);
     }
 
     options.prop_name = strdup(optarg);
     SET_COMMAND(cmd_set_property);
     options.find_flags = pe_find_renamed|pe_find_any;
     return TRUE;
 }
 
 gboolean
 timeout_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
     options.timeout_ms = crm_get_msec(optarg);
     return TRUE;
 }
 
 gboolean
 validate_or_force_cb(const gchar *option_name, const gchar *optarg,
                      gpointer data, GError **error)
 {
     SET_COMMAND(cmd_execute_agent);
     if (options.operation) {
         g_free(options.operation);
     }
     options.operation = g_strdup(option_name + 2); // skip "--"
     options.find_flags = pe_find_renamed|pe_find_anon;
     options.override_params = crm_str_table_new();
     return TRUE;
 }
 
 gboolean
 restart_cb(const gchar *option_name, const gchar *optarg, gpointer data,
            GError **error)
 {
     SET_COMMAND(cmd_restart);
     options.find_flags = pe_find_renamed|pe_find_anon;
     return TRUE;
 }
 
 gboolean
 wait_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
     SET_COMMAND(cmd_wait);
     options.require_resource = FALSE;
     options.require_dataset = FALSE;
     return TRUE;
 }
 
 gboolean
 why_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
     options.require_resource = FALSE;
     SET_COMMAND(cmd_why);
     options.find_flags = pe_find_renamed|pe_find_anon;
     return TRUE;
 }
 
 static int
 ban_or_move(pcmk__output_t *out, pe_resource_t *rsc, const char *move_lifetime,
             crm_exit_t *exit_code)
 {
     int rc = pcmk_rc_ok;
     pe_node_t *current = NULL;
     unsigned int nactive = 0;
 
     CRM_CHECK(rsc != NULL, return EINVAL);
 
     current = pe__find_active_requires(rsc, &nactive);
 
     if (nactive == 1) {
         rc = cli_resource_ban(out, options.rsc_id, current->details->uname, move_lifetime, NULL,
                               cib_conn, options.cib_options, options.promoted_role_only);
 
     } else if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
         int count = 0;
         GListPtr iter = NULL;
 
         current = NULL;
         for(iter = rsc->children; iter; iter = iter->next) {
             pe_resource_t *child = (pe_resource_t *)iter->data;
             enum rsc_role_e child_role = child->fns->state(child, TRUE);
 
             if(child_role == RSC_ROLE_MASTER) {
                 count++;
                 current = pe__current_node(child);
             }
         }
 
         if(count == 1 && current) {
             rc = cli_resource_ban(out, options.rsc_id, current->details->uname, move_lifetime, NULL,
                                   cib_conn, options.cib_options, options.promoted_role_only);
 
         } else {
             rc = EINVAL;
             *exit_code = CRM_EX_USAGE;
             g_set_error(&error, PCMK__EXITC_ERROR, *exit_code,
                         "Resource '%s' not moved: active in %d locations (promoted in %d).\n"
                         "To prevent '%s' from running on a specific location, "
                         "specify a node."
                         "To prevent '%s' from being promoted at a specific "
                         "location, specify a node and the master option.",
                         options.rsc_id, nactive, count, options.rsc_id, options.rsc_id);
         }
 
     } else {
         rc = EINVAL;
         *exit_code = CRM_EX_USAGE;
         g_set_error(&error, PCMK__EXITC_ERROR, *exit_code,
                     "Resource '%s' not moved: active in %d locations.\n"
                     "To prevent '%s' from running on a specific location, "
                     "specify a node.",
                     options.rsc_id, nactive, options.rsc_id);
     }
 
     return rc;
 }
 
 static void
 cleanup(pcmk__output_t *out, pe_resource_t *rsc)
 {
     int rc = pcmk_rc_ok;
 
     if (options.force == FALSE) {
         rsc = uber_parent(rsc);
     }
 
     crm_debug("Erasing failures of %s (%s requested) on %s",
               rsc->id, options.rsc_id, (options.host_uname? options.host_uname: "all nodes"));
     rc = cli_resource_delete(out, controld_api, options.host_uname, rsc, options.operation,
                              options.interval_spec, TRUE, data_set, options.force);
 
     if ((rc == pcmk_rc_ok) && !out->is_quiet(out)) {
         // Show any reasons why resource might stay stopped
         cli_resource_check(out, cib_conn, rsc);
     }
 
     if (rc == pcmk_rc_ok) {
         start_mainloop(controld_api);
     }
 }
 
 static int
 clear_constraints(pcmk__output_t *out, xmlNodePtr *cib_xml_copy)
 {
     GListPtr before = NULL;
     GListPtr after = NULL;
     GListPtr remaining = NULL;
     GListPtr ele = NULL;
     pe_node_t *dest = NULL;
     int rc = pcmk_rc_ok;
 
     if (!out->is_quiet(out)) {
         before = build_constraint_list(data_set->input);
     }
 
     if (options.clear_expired) {
         rc = cli_resource_clear_all_expired(data_set->input, cib_conn, options.cib_options,
                                             options.rsc_id, options.host_uname,
                                             options.promoted_role_only);
 
     } else if (options.host_uname) {
         dest = pe_find_node(data_set->nodes, options.host_uname);
         if (dest == NULL) {
             rc = pcmk_rc_node_unknown;
             if (!out->is_quiet(out)) {
                 g_list_free(before);
             }
             return rc;
         }
         rc = cli_resource_clear(options.rsc_id, dest->details->uname, NULL,
                                 cib_conn, options.cib_options, TRUE, options.force);
 
     } else {
         rc = cli_resource_clear(options.rsc_id, NULL, data_set->nodes,
                                 cib_conn, options.cib_options, TRUE, options.force);
     }
 
     if (!out->is_quiet(out)) {
         rc = cib_conn->cmds->query(cib_conn, NULL, cib_xml_copy, cib_scope_local | cib_sync_call);
         rc = pcmk_legacy2rc(rc);
 
         if (rc != pcmk_rc_ok) {
             g_set_error(&error, PCMK__RC_ERROR, rc,
                         "Could not get modified CIB: %s\n", pcmk_strerror(rc));
             g_list_free(before);
             return rc;
         }
 
         data_set->input = *cib_xml_copy;
         cluster_status(data_set);
 
         after = build_constraint_list(data_set->input);
         remaining = pcmk__subtract_lists(before, after, (GCompareFunc) strcmp);
 
         for (ele = remaining; ele != NULL; ele = ele->next) {
             out->info(out, "Removing constraint: %s", (char *) ele->data);
         }
 
         g_list_free(before);
         g_list_free(after);
         g_list_free(remaining);
     }
 
     return rc;
 }
 
 static int
 delete()
 {
     int rc = pcmk_rc_ok;
     xmlNode *msg_data = NULL;
 
     if (options.rsc_type == NULL) {
         rc = ENXIO;
         g_set_error(&error, PCMK__RC_ERROR, rc,
                     "You need to specify a resource type with -t");
         return rc;
     }
 
     msg_data = create_xml_node(NULL, options.rsc_type);
     crm_xml_add(msg_data, XML_ATTR_ID, options.rsc_id);
 
     rc = cib_conn->cmds->remove(cib_conn, XML_CIB_TAG_RESOURCES, msg_data,
                                 options.cib_options);
     rc = pcmk_legacy2rc(rc);
     free_xml(msg_data);
     return rc;
 }
 
 static int
 list_agents(pcmk__output_t *out, const char *agent_spec, crm_exit_t *exit_code)
 {
     int rc = pcmk_rc_ok;
     char *provider = strchr(agent_spec, ':');
     lrmd_t *lrmd_conn = lrmd_api_new();
     lrmd_list_t *list = NULL;
 
     if (provider) {
         *provider++ = 0;
     }
 
     rc = lrmd_conn->cmds->list_agents(lrmd_conn, &list, agent_spec, provider);
 
     if (rc > 0) {
         rc = out->message(out, "agents-list", list, agent_spec, provider);
     } else {
         rc = pcmk_rc_error;
     }
 
     if (rc != pcmk_rc_ok) {
         *exit_code = CRM_EX_NOSUCH;
         if (provider == NULL) {
             g_set_error(&error, PCMK__EXITC_ERROR, *exit_code,
                         "No agents found for standard '%s'", agent_spec);
         } else {
             g_set_error(&error, PCMK__EXITC_ERROR, *exit_code,
                         "No agents found for standard '%s' and provider '%s'",
                         agent_spec, provider);
         }
     }
 
     lrmd_api_delete(lrmd_conn);
     return rc;
 }
 
 static int
 list_providers(pcmk__output_t *out, const char *agent_spec, crm_exit_t *exit_code)
 {
     int rc;
     const char *text = NULL;
     lrmd_t *lrmd_conn = lrmd_api_new();
     lrmd_list_t *list = NULL;
 
     switch (options.rsc_cmd) {
         case cmd_list_alternatives:
             rc = lrmd_conn->cmds->list_ocf_providers(lrmd_conn, agent_spec, &list);
 
             if (rc > 0) {
                 rc = out->message(out, "alternatives-list", list, agent_spec);
             } else {
                 rc = pcmk_rc_error;
             }
 
             text = "OCF providers";
             break;
         case cmd_list_standards:
             rc = lrmd_conn->cmds->list_standards(lrmd_conn, &list);
 
             if (rc > 0) {
                 rc = out->message(out, "standards-list", list);
             } else {
                 rc = pcmk_rc_error;
             }
 
             text = "standards";
             break;
         case cmd_list_providers:
             rc = lrmd_conn->cmds->list_ocf_providers(lrmd_conn, agent_spec, &list);
 
             if (rc > 0) {
                 rc = out->message(out, "providers-list", list, agent_spec);
             } else {
                 rc = pcmk_rc_error;
             }
 
             text = "OCF providers";
             break;
         default:
             *exit_code = CRM_EX_SOFTWARE;
             g_set_error(&error, PCMK__EXITC_ERROR, *exit_code, "Bug");
             lrmd_api_delete(lrmd_conn);
             return pcmk_rc_error;
     }
 
     if (rc != pcmk_rc_ok) {
         if (agent_spec != NULL) {
             *exit_code = CRM_EX_NOSUCH;
             rc = pcmk_rc_error;
             g_set_error(&error, PCMK__EXITC_ERROR, *exit_code,
                         "No %s found for %s", text, agent_spec);
 
         } else {
             *exit_code = CRM_EX_NOSUCH;
             rc = pcmk_rc_error;
             g_set_error(&error, PCMK__EXITC_ERROR, *exit_code,
                         "No %s found", text);
         }
     }
 
     lrmd_api_delete(lrmd_conn);
     return rc;
 }
 
 static int
 populate_working_set(xmlNodePtr *cib_xml_copy)
 {
     int rc = pcmk_rc_ok;
 
     if (options.xml_file != NULL) {
         *cib_xml_copy = filename2xml(options.xml_file);
     } else {
         rc = cib_conn->cmds->query(cib_conn, NULL, cib_xml_copy, cib_scope_local | cib_sync_call);
         rc = pcmk_legacy2rc(rc);
     }
 
     if(rc != pcmk_rc_ok) {
         return rc;
     }
 
     /* Populate the working set instance */
     data_set = pe_new_working_set();
     if (data_set == NULL) {
         rc = ENOMEM;
         return rc;
     }
 
     pe__set_working_set_flags(data_set, pe_flag_no_counts|pe_flag_no_compat);
     rc = update_working_set_xml(data_set, cib_xml_copy);
     if (rc == pcmk_rc_ok) {
         cluster_status(data_set);
     }
     return rc;
 }
 
 static int
 refresh(pcmk__output_t *out)
 {
     int rc = pcmk_rc_ok;
     const char *router_node = options.host_uname;
     int attr_options = pcmk__node_attr_none;
 
     if (options.host_uname) {
         pe_node_t *node = pe_find_node(data_set->nodes, options.host_uname);
 
         if (pe__is_guest_or_remote_node(node)) {
             node = pe__current_node(node->details->remote_rsc);
             if (node == NULL) {
                 rc = ENXIO;
                 g_set_error(&error, PCMK__RC_ERROR, rc,
                             "No cluster connection to Pacemaker Remote node %s detected",
                             options.host_uname);
                 return rc;
             }
             router_node = node->details->uname;
             attr_options |= pcmk__node_attr_remote;
         }
     }
 
     if (controld_api == NULL) {
         out->info(out, "Dry run: skipping clean-up of %s due to CIB_file",
                   options.host_uname? options.host_uname : "all nodes");
         rc = pcmk_rc_ok;
         return rc;
     }
 
     crm_debug("Re-checking the state of all resources on %s", options.host_uname?options.host_uname:"all nodes");
 
     rc = pcmk__node_attr_request_clear(NULL, options.host_uname,
                                        NULL, NULL, NULL,
                                        NULL, attr_options);
 
     if (pcmk_controld_api_reprobe(controld_api, options.host_uname,
                                   router_node) == pcmk_rc_ok) {
         start_mainloop(controld_api);
     }
 
     return rc;
 }
 
 static void
 refresh_resource(pcmk__output_t *out, pe_resource_t *rsc)
 {
     int rc = pcmk_rc_ok;
 
     if (options.force == FALSE) {
         rsc = uber_parent(rsc);
     }
 
     crm_debug("Re-checking the state of %s (%s requested) on %s",
               rsc->id, options.rsc_id, (options.host_uname? options.host_uname: "all nodes"));
     rc = cli_resource_delete(out, controld_api, options.host_uname, rsc, NULL,
                              0, FALSE, data_set, options.force);
 
     if ((rc == pcmk_rc_ok) && !out->is_quiet(out)) {
         // Show any reasons why resource might stay stopped
         cli_resource_check(out, cib_conn, rsc);
     }
 
     if (rc == pcmk_rc_ok) {
         start_mainloop(controld_api);
     }
 }
 
 static int
 set_property()
 {
     int rc = pcmk_rc_ok;
     xmlNode *msg_data = NULL;
 
     if (pcmk__str_empty(options.rsc_type)) {
         g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
                     "Must specify -t with resource type");
         rc = ENXIO;
         return rc;
 
     } else if (pcmk__str_empty(options.prop_value)) {
         g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
                     "Must supply -v with new value");
         rc = EINVAL;
         return rc;
     }
 
     CRM_LOG_ASSERT(options.prop_name != NULL);
 
     msg_data = create_xml_node(NULL, options.rsc_type);
     crm_xml_add(msg_data, XML_ATTR_ID, options.rsc_id);
     crm_xml_add(msg_data, options.prop_name, options.prop_value);
 
     rc = cib_conn->cmds->modify(cib_conn, XML_CIB_TAG_RESOURCES, msg_data,
                                 options.cib_options);
     rc = pcmk_legacy2rc(rc);
     free_xml(msg_data);
 
     return rc;
 }
 
 static int
 show_metadata(pcmk__output_t *out, const char *agent_spec, crm_exit_t *exit_code)
 {
     int rc = pcmk_rc_ok;
     char *standard = NULL;
     char *provider = NULL;
     char *type = NULL;
     char *metadata = NULL;
     lrmd_t *lrmd_conn = lrmd_api_new();
 
     rc = crm_parse_agent_spec(agent_spec, &standard, &provider, &type);
     rc = pcmk_legacy2rc(rc);
 
     if (rc == pcmk_rc_ok) {
         rc = lrmd_conn->cmds->get_metadata(lrmd_conn, standard,
                                            provider, type,
                                            &metadata, 0);
         rc = pcmk_legacy2rc(rc);
 
         if (metadata) {
             out->output_xml(out, "metadata", metadata);
         } else {
             *exit_code = crm_errno2exit(rc);
             g_set_error(&error, PCMK__EXITC_ERROR, *exit_code,
                         "Metadata query for %s failed: %s",
                         agent_spec, pcmk_rc_str(rc));
         }
     } else {
         rc = ENXIO;
         g_set_error(&error, PCMK__RC_ERROR, rc,
                     "'%s' is not a valid agent specification", agent_spec);
     }
 
     lrmd_api_delete(lrmd_conn);
     return rc;
 }
 
 static void
 validate_cmdline(crm_exit_t *exit_code)
 {
     // -r cannot be used with any of --class, --agent, or --provider
     if (options.rsc_id != NULL) {
         g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
                     "--resource cannot be used with --class, --agent, and --provider");
 
     // If --class, --agent, or --provider are given, --validate must also be given.
     } else if (options.rsc_cmd != cmd_execute_agent) {
         g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
                     "--class, --agent, and --provider require --validate");
 
     // Not all of --class, --agent, and --provider need to be given.  Not all
     // classes support the concept of a provider.  Check that what we were given
     // is valid.
     } else if (pcmk__str_eq(options.v_class, "stonith", pcmk__str_none)) {
         if (options.v_provider != NULL) {
             g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
                         "stonith does not support providers");
 
         } else if (stonith_agent_exists(options.v_agent, 0) == FALSE) {
             g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
                         "%s is not a known stonith agent", options.v_agent ? options.v_agent : "");
         }
 
     } else if (resources_agent_exists(options.v_class, options.v_provider, options.v_agent) == FALSE) {
         g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
                     "%s:%s:%s is not a known resource",
                     options.v_class ? options.v_class : "",
                     options.v_provider ? options.v_provider : "",
                     options.v_agent ? options.v_agent : "");
     }
 
     if (error == NULL) {
         if (options.validate_options == NULL) {
             options.validate_options = crm_str_table_new();
         }
         *exit_code = cli_resource_execute_from_params(out, "test", options.v_class, options.v_provider, options.v_agent,
                                                       "validate-all", options.validate_options,
                                                       options.override_params, options.timeout_ms,
                                                       options.resource_verbose, options.force);
     }
 }
 
 static GOptionContext *
 build_arg_context(pcmk__common_args_t *args, GOptionGroup **group) {
     GOptionContext *context = NULL;
 
     GOptionEntry extra_prog_entries[] = {
         { "quiet", 'Q', G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &(args->quiet),
           "Be less descriptive in output.",
           NULL },
         { "resource", 'r', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.rsc_id,
           "Resource ID",
           "ID" },
         { G_OPTION_REMAINING, 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING_ARRAY, &options.remainder,
           NULL,
           NULL },
 
         { NULL }
     };
 
     const char *description = "Examples:\n\n"
                               "List the available OCF agents:\n\n"
                               "\t# crm_resource --list-agents ocf\n\n"
                               "List the available OCF agents from the linux-ha project:\n\n"
                               "\t# crm_resource --list-agents ocf:heartbeat\n\n"
                               "Move 'myResource' to a specific node:\n\n"
                               "\t# crm_resource --resource myResource --move --node altNode\n\n"
                               "Allow (but not force) 'myResource' to move back to its original "
                               "location:\n\n"
                               "\t# crm_resource --resource myResource --clear\n\n"
                               "Stop 'myResource' (and anything that depends on it):\n\n"
                               "\t# crm_resource --resource myResource --set-parameter target-role "
                               "--meta --parameter-value Stopped\n\n"
                               "Tell the cluster not to manage 'myResource' (the cluster will not "
                               "attempt to start or stop the\n"
                               "resource under any circumstances; useful when performing maintenance "
                               "tasks on a resource):\n\n"
                               "\t# crm_resource --resource myResource --set-parameter is-managed "
                               "--meta --parameter-value false\n\n"
                               "Erase the operation history of 'myResource' on 'aNode' (the cluster "
                               "will 'forget' the existing\n"
                               "resource state, including any errors, and attempt to recover the"
                               "resource; useful when a resource\n"
                               "had failed permanently and has been repaired by an administrator):\n\n"
                               "\t# crm_resource --resource myResource --cleanup --node aNode\n\n";
 
     context = pcmk__build_arg_context(args, "text (default), xml", group, NULL);
     g_option_context_set_description(context, description);
 
     /* Add the -Q option, which cannot be part of the globally supported options
      * because some tools use that flag for something else.
      */
     pcmk__add_main_args(context, extra_prog_entries);
 
     pcmk__add_arg_group(context, "queries", "Queries:",
                         "Show query help", query_entries);
     pcmk__add_arg_group(context, "commands", "Commands:",
                         "Show command help", command_entries);
     pcmk__add_arg_group(context, "locations", "Locations:",
                         "Show location help", location_entries);
     pcmk__add_arg_group(context, "validate", "Validate:",
                         "Show validate help", validate_entries);
     pcmk__add_arg_group(context, "advanced", "Advanced:",
                         "Show advanced option help", advanced_entries);
     pcmk__add_arg_group(context, "additional", "Additional Options:",
                         "Show additional options", addl_entries);
     return context;
 }
 
 int
 main(int argc, char **argv)
 {
     xmlNode *cib_xml_copy = NULL;
     pe_resource_t *rsc = NULL;
 
     int rc = pcmk_rc_ok;
 
     pcmk__common_args_t *args = pcmk__new_common_args(SUMMARY);
     GOptionContext *context = NULL;
     GOptionGroup *output_group = NULL;
     gchar **processed_args = NULL;
 
     context = build_arg_context(args, &output_group);
     pcmk__register_formats(output_group, formats);
     crm_log_cli_init("crm_resource");
 
     processed_args = pcmk__cmdline_preproc(argv, "GINSTdginpstuv");
 
     if (!g_option_context_parse_strv(context, &processed_args, &error)) {
         exit_code = CRM_EX_USAGE;
         goto done;
     }
 
     for (int i = 0; i < args->verbosity; i++) {
         crm_bump_log_level(argc, argv);
     }
 
     rc = pcmk__output_new(&out, args->output_ty, args->output_dest, argv);
     if (rc != pcmk_rc_ok) {
         fprintf(stderr, "Error creating output format %s: %s\n",
                 args->output_ty, pcmk_rc_str(rc));
         exit_code = CRM_EX_ERROR;
         goto done;
     }
 
     options.resource_verbose = args->verbosity;
     out->quiet = args->quiet;
 
     crm_log_args(argc, argv);
 
     if (options.host_uname) {
         crm_trace("Option host => %s", options.host_uname);
     }
 
     // If the user didn't explicitly specify a command, list resources
     if (options.rsc_cmd == cmd_none) {
         options.rsc_cmd = cmd_list_resources;
         options.require_resource = FALSE;
     }
 
     // --expired without --clear/-U doesn't make sense
     if (options.clear_expired && (options.rsc_cmd != cmd_clear)) {
         g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, "--expired requires --clear or -U");
         goto done;
     }
 
     if ((options.remainder != NULL) && (options.override_params != NULL)) {
         // Commands that use positional arguments will create override_params
         for (gchar **s = options.remainder; *s; s++) {
             char *name = calloc(1, strlen(*s));
             char *value = calloc(1, strlen(*s));
             int rc = sscanf(*s, "%[^=]=%s", name, value);
 
             if (rc == 2) {
                 g_hash_table_replace(options.override_params, name, value);
 
             } else {
                 g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
                             "Error parsing '%s' as a name=value pair",
                             argv[optind]);
                 free(value);
                 free(name);
                 goto done;
             }
         }
 
     } else if (options.remainder != NULL) {
         gchar **strv = NULL;
         gchar *msg = NULL;
         int i = 1;
         int len = 0;
 
         for (gchar **s = options.remainder; *s; s++) {
             len++;
         }
 
         CRM_ASSERT(len > 0);
 
         strv = calloc(len, sizeof(char *));
         strv[0] = strdup("non-option ARGV-elements:");
 
         for (gchar **s = options.remainder; *s; s++) {
             strv[i] = crm_strdup_printf("[%d of %d] %s\n", i, len, *s);
             i++;
         }
 
         msg = g_strjoinv("", strv);
         g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, "%s", msg);
 
         for(i = 0; i < len; i++) {
             free(strv[i]);
         }
 
         g_free(msg);
         g_free(strv);
         goto done;
     }
 
     if (pcmk__str_eq(args->output_ty, "xml", pcmk__str_none)) {
         /* Kind of a hack to display XML lists using a real tag instead of <list>.  This just
          * saves from having to write custom messages to build the lists around all these things
          */
         if (options.rsc_cmd == cmd_list_resources || options.rsc_cmd == cmd_query_xml ||
             options.rsc_cmd == cmd_query_raw_xml || options.rsc_cmd == cmd_list_active_ops ||
             options.rsc_cmd == cmd_list_all_ops) {
             pcmk__force_args(context, &error, "%s --xml-simple-list --xml-substitute", g_get_prgname());
         } else {
             pcmk__force_args(context, &error, "%s --xml-substitute", g_get_prgname());
         }
     } else if (pcmk__str_eq(args->output_ty, "text", pcmk__str_null_matches)) {
         if (options.rsc_cmd == cmd_colocations || options.rsc_cmd == cmd_colocations_deep ||
             options.rsc_cmd == cmd_list_resources) {
             pcmk__force_args(context, &error, "%s --text-fancy", g_get_prgname());
         }
     }
 
     pe__register_messages(out);
     crm_resource_register_messages(out);
     lrmd__register_messages(out);
     pcmk__register_lib_messages(out);
 
     if (args->version) {
         out->version(out, false);
         goto done;
     }
 
     if (optind > argc) {
         g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
                     "Invalid option(s) supplied, use --help for valid usage");
         exit_code = CRM_EX_USAGE;
         goto done;
     }
 
     // Sanity check validating from command line parameters.  If everything checks out,
     // go ahead and run the validation.  This way we don't need a CIB connection.
     if (options.validate_cmdline) {
         validate_cmdline(&exit_code);
         goto done;
     } else if (options.validate_options != NULL) {
         // @COMPAT @TODO error out here when we can break backward compatibility
         g_hash_table_destroy(options.validate_options);
         options.validate_options = NULL;
     }
 
     if (error != NULL) {
         exit_code = CRM_EX_USAGE;
         goto done;
     }
 
     if (options.force) {
         crm_debug("Forcing...");
         cib__set_call_options(options.cib_options, crm_system_name,
                               cib_quorum_override);
     }
 
     if (options.require_resource && !options.rsc_id) {
         rc = ENXIO;
         g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
                     "Must supply a resource id with -r");
         goto done;
     }
 
     if (options.find_flags && options.rsc_id) {
         options.require_dataset = TRUE;
     }
 
     // Establish a connection to the CIB if needed
     if (options.require_cib) {
         cib_conn = cib_new();
         if ((cib_conn == NULL) || (cib_conn->cmds == NULL)) {
             rc = pcmk_rc_error;
             g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_DISCONNECT,
                         "Could not create CIB connection");
             goto done;
         }
         rc = cib_conn->cmds->signon(cib_conn, crm_system_name, cib_command);
         rc = pcmk_legacy2rc(rc);
         if (rc != pcmk_rc_ok) {
             g_set_error(&error, PCMK__RC_ERROR, rc,
                         "Could not connect to the CIB: %s", pcmk_rc_str(rc));
             goto done;
         }
     }
 
     /* Populate working set from XML file if specified or CIB query otherwise */
     if (options.require_dataset) {
         rc = populate_working_set(&cib_xml_copy);
         if (rc != pcmk_rc_ok) {
             goto done;
         }
     }
 
     // If command requires that resource exist if specified, find it
     if (options.find_flags && options.rsc_id) {
         rsc = pe_find_resource_with_flags(data_set->resources, options.rsc_id,
                                           options.find_flags);
         if (rsc == NULL) {
             rc = ENXIO;
             g_set_error(&error, PCMK__RC_ERROR, rc,
                         "Resource '%s' not found", options.rsc_id);
             goto done;
         }
     }
 
     // Establish a connection to the controller if needed
     if (options.require_crmd) {
         rc = pcmk_new_ipc_api(&controld_api, pcmk_ipc_controld);
         if (rc != pcmk_rc_ok) {
             g_set_error(&error, PCMK__RC_ERROR, rc,
                         "Error connecting to the controller: %s", pcmk_rc_str(rc));
             goto done;
         }
         pcmk_register_ipc_callback(controld_api, controller_event_callback,
                                    NULL);
         rc = pcmk_connect_ipc(controld_api, pcmk_ipc_dispatch_main);
         if (rc != pcmk_rc_ok) {
             g_set_error(&error, PCMK__RC_ERROR, rc,
                         "Error connecting to the controller: %s", pcmk_rc_str(rc));
             goto done;
         }
     }
 
     switch (options.rsc_cmd) {
         case cmd_list_resources: {
             GListPtr all = NULL;
             all = g_list_prepend(all, strdup("*"));
             rc = out->message(out, "resource-list", data_set,
                               pe_print_rsconly | pe_print_pending,
                               FALSE, TRUE, FALSE, TRUE, all, all, FALSE);
             g_list_free_full(all, free);
 
             if (rc == pcmk_rc_no_output) {
                 rc = ENXIO;
             }
             break;
         }
 
         case cmd_list_instances:
             rc = out->message(out, "resource-names-list", data_set->resources);
 
             if (rc != pcmk_rc_ok) {
                 rc = ENXIO;
             }
 
             break;
 
         case cmd_list_standards:
         case cmd_list_providers:
         case cmd_list_alternatives:
             rc = list_providers(out, options.agent_spec, &exit_code);
             break;
 
         case cmd_list_agents:
             rc = list_agents(out, options.agent_spec, &exit_code);
             break;
 
         case cmd_metadata:
             rc = show_metadata(out, options.agent_spec, &exit_code);
             break;
 
         case cmd_restart:
             /* We don't pass data_set because rsc needs to stay valid for the
              * entire lifetime of cli_resource_restart(), but it will reset and
              * update the working set multiple times, so it needs to use its own
              * copy.
              */
             rc = cli_resource_restart(out, rsc, options.host_uname,
                                       options.move_lifetime, options.timeout_ms,
                                       cib_conn, options.cib_options,
                                       options.promoted_role_only,
                                       options.force);
             break;
 
         case cmd_wait:
             rc = wait_till_stable(out, options.timeout_ms, cib_conn);
             break;
 
         case cmd_execute_agent:
             exit_code = cli_resource_execute(out, rsc, options.rsc_id,
                                              options.operation,
                                              options.override_params,
                                              options.timeout_ms, cib_conn,
                                              data_set, options.resource_verbose,
                                              options.force);
             break;
 
         case cmd_colocations:
             rc = out->message(out, "stacks-constraints", rsc, data_set, false);
             break;
 
         case cmd_colocations_deep:
             rc = out->message(out, "stacks-constraints", rsc, data_set, true);
             break;
 
         case cmd_cts:
             rc = pcmk_rc_ok;
 
             for (GList *lpc = data_set->resources; lpc != NULL;
                  lpc = lpc->next) {
 
                 rsc = (pe_resource_t *) lpc->data;
                 cli_resource_print_cts(out, rsc);
             }
 
             cli_resource_print_cts_constraints(out, data_set);
             break;
 
         case cmd_fail:
             rc = cli_resource_fail(out, controld_api, options.host_uname,
                                    options.rsc_id, data_set);
             if (rc == pcmk_rc_ok) {
                 start_mainloop(controld_api);
             }
             break;
 
         case cmd_list_active_ops:
             rc = cli_resource_print_operations(out, options.rsc_id,
                                                options.host_uname, TRUE,
                                                data_set);
             break;
 
         case cmd_list_all_ops:
             rc = cli_resource_print_operations(out, options.rsc_id,
                                                options.host_uname, FALSE,
                                                data_set);
             break;
 
         case cmd_locate: {
             GListPtr resources = cli_resource_search(out, rsc, options.rsc_id, data_set);
-            rc = out->message(out, "resource-search", resources, rsc, options.rsc_id);
+            rc = out->message(out, "resource-search-list", resources, rsc, options.rsc_id);
             break;
         }
 
         case cmd_query_xml:
             rc = cli_resource_print(out, rsc, data_set, TRUE);
             break;
 
         case cmd_query_raw_xml:
             rc = cli_resource_print(out, rsc, data_set, FALSE);
             break;
 
         case cmd_why:
             {
                 pe_node_t *dest = NULL;
 
                 if (options.host_uname) {
                     dest = pe_find_node(data_set->nodes, options.host_uname);
                     if (dest == NULL) {
                         rc = pcmk_rc_node_unknown;
                         goto done;
                     }
                 }
-                out->message(out, "resource-why", cib_conn, data_set->resources, rsc, dest);
+                out->message(out, "resource-reasons-list", cib_conn, data_set->resources, rsc, dest);
                 rc = pcmk_rc_ok;
             }
             break;
 
         case cmd_clear:
             rc = clear_constraints(out, &cib_xml_copy);
             break;
 
         case cmd_move:
             if (options.host_uname == NULL) {
                 rc = ban_or_move(out, rsc, options.move_lifetime, &exit_code);
             } else {
                 rc = cli_resource_move(out, rsc, options.rsc_id, options.host_uname,
                                        options.move_lifetime, cib_conn,
                                        options.cib_options, data_set,
                                        options.promoted_role_only,
                                        options.force);
             }
             break;
 
         case cmd_ban:
             if (options.host_uname == NULL) {
                 rc = ban_or_move(out, rsc, options.move_lifetime, &exit_code);
             } else {
                 pe_node_t *dest = pe_find_node(data_set->nodes,
                                                options.host_uname);
 
                 if (dest == NULL) {
                     rc = pcmk_rc_node_unknown;
                     goto done;
                 }
                 rc = cli_resource_ban(out, options.rsc_id, dest->details->uname,
                                       options.move_lifetime, NULL, cib_conn,
                                       options.cib_options,
                                       options.promoted_role_only);
             }
             break;
 
         case cmd_get_property:
-            rc = out->message(out, "property", rsc, options.prop_name);
+            rc = out->message(out, "property-list", rsc, options.prop_name);
             if (rc == pcmk_rc_no_output) {
                 rc = ENXIO;
             }
 
             break;
 
         case cmd_set_property:
             rc = set_property();
             break;
 
         case cmd_get_param: {
             unsigned int count = 0;
             GHashTable *params = NULL;
             pe_node_t *current = pe__find_active_on(rsc, &count, NULL);
 
             if (count > 1) {
                 out->err(out, "%s is active on more than one node,"
                          " returning the default value for %s", rsc->id, crm_str(options.prop_name));
                 current = NULL;
             }
 
             params = crm_str_table_new();
 
             if (pcmk__str_eq(options.attr_set_type, XML_TAG_ATTR_SETS, pcmk__str_casei)) {
                 get_rsc_attributes(params, rsc, current, data_set);
 
             } else if (pcmk__str_eq(options.attr_set_type, XML_TAG_META_SETS, pcmk__str_casei)) {
                 /* No need to redirect to the parent */
                 get_meta_attributes(params, rsc, current, data_set);
 
             } else {
                 pe__unpack_dataset_nvpairs(rsc->xml, XML_TAG_UTILIZATION, NULL, params,
                                            NULL, FALSE, data_set);
             }
 
             crm_debug("Looking up %s in %s", options.prop_name, rsc->id);
-            rc = out->message(out, "attribute", rsc, options.prop_name, params);
+            rc = out->message(out, "attribute-list", rsc, options.prop_name, params);
             g_hash_table_destroy(params);
             break;
         }
 
         case cmd_set_param:
             if (pcmk__str_empty(options.prop_value)) {
                 g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
                             "You need to supply a value with the -v option");
                 rc = EINVAL;
                 goto done;
             }
 
             /* coverity[var_deref_model] False positive */
             rc = cli_resource_update_attribute(out, rsc, options.rsc_id,
                                                options.prop_set,
                                                options.attr_set_type,
                                                options.prop_id,
                                                options.prop_name,
                                                options.prop_value,
                                                options.recursive, cib_conn,
                                                options.cib_options, data_set,
                                                options.force);
             break;
 
         case cmd_delete_param:
             /* coverity[var_deref_model] False positive */
             rc = cli_resource_delete_attribute(out, rsc, options.rsc_id,
                                                options.prop_set,
                                                options.attr_set_type,
                                                options.prop_id,
                                                options.prop_name, cib_conn,
                                                options.cib_options, data_set,
                                                options.force);
             break;
 
         case cmd_cleanup:
             if (rsc == NULL) {
                 rc = cli_cleanup_all(out, controld_api, options.host_uname,
                                      options.operation, options.interval_spec,
                                      data_set);
                 if (rc == pcmk_rc_ok) {
                     start_mainloop(controld_api);
                 }
             } else {
                 cleanup(out, rsc);
             }
             break;
 
         case cmd_refresh:
             if (rsc == NULL) {
                 rc = refresh(out);
             } else {
                 refresh_resource(out, rsc);
             }
             break;
 
         case cmd_delete:
             rc = delete();
             break;
 
         default:
             g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_SOFTWARE,
                         "Unimplemented command: %d", (int) options.rsc_cmd);
             break;
     }
 
 done:
     if (rc != pcmk_rc_ok) {
         if (rc == pcmk_rc_no_quorum) {
             g_prefix_error(&error, "To ignore quorum, use the force option.\n");
         }
 
         if (error != NULL) {
             char *msg = crm_strdup_printf("%s\nError performing operation: %s",
                                           error->message, pcmk_rc_str(rc));
             g_clear_error(&error);
             g_set_error(&error, PCMK__RC_ERROR, rc, "%s", msg);
             free(msg);
         } else {
             g_set_error(&error, PCMK__RC_ERROR, rc,
                         "Error performing operation: %s", pcmk_rc_str(rc));
         }
 
         if (exit_code == CRM_EX_OK) {
             exit_code = pcmk_rc2exitc(rc);
         }
     }
 
     g_free(options.host_uname);
     g_free(options.interval_spec);
     g_free(options.move_lifetime);
     g_free(options.operation);
     g_free(options.prop_id);
     free(options.prop_name);
     g_free(options.prop_set);
     g_free(options.prop_value);
     g_free(options.rsc_id);
     g_free(options.rsc_type);
     free(options.agent_spec);
     free(options.v_agent);
     free(options.v_class);
     free(options.v_provider);
     g_free(options.xml_file);
     g_strfreev(options.remainder);
 
     if (options.override_params != NULL) {
         g_hash_table_destroy(options.override_params);
     }
 
     /* options.validate_options does not need to be destroyed here.  See the
      * comments in cli_resource_execute_from_params.
      */
 
     g_strfreev(processed_args);
     g_option_context_free(context);
 
     return bye(exit_code);
 }
diff --git a/tools/crm_resource_print.c b/tools/crm_resource_print.c
index cb0687903f..89d6172dd3 100644
--- a/tools/crm_resource_print.c
+++ b/tools/crm_resource_print.c
@@ -1,575 +1,574 @@
 /*
  * Copyright 2004-2020 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_resource.h>
 #include <crm/common/lists_internal.h>
 #include <crm/common/xml_internal.h>
 #include <crm/common/output_internal.h>
 
 #define cons_string(x) x?x:"NA"
 void
 cli_resource_print_cts_constraints(pcmk__output_t *out, pe_working_set_t * data_set)
 {
     xmlNode *xml_obj = NULL;
     xmlNode *lifetime = NULL;
     xmlNode *cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, data_set->input);
 
     for (xml_obj = pcmk__xe_first_child(cib_constraints); xml_obj != NULL;
          xml_obj = pcmk__xe_next(xml_obj)) {
         const char *id = crm_element_value(xml_obj, XML_ATTR_ID);
 
         if (id == NULL) {
             continue;
         }
 
         // @COMPAT lifetime is deprecated
         lifetime = first_named_child(xml_obj, "lifetime");
         if (pe_evaluate_rules(lifetime, NULL, data_set->now, NULL) == FALSE) {
             continue;
         }
 
         if (!pcmk__str_eq(XML_CONS_TAG_RSC_DEPEND, crm_element_name(xml_obj), pcmk__str_casei)) {
             continue;
         }
 
         out->info(out, "Constraint %s %s %s %s %s %s %s",
                   crm_element_name(xml_obj),
                   cons_string(crm_element_value(xml_obj, XML_ATTR_ID)),
                   cons_string(crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE)),
                   cons_string(crm_element_value(xml_obj, XML_COLOC_ATTR_TARGET)),
                   cons_string(crm_element_value(xml_obj, XML_RULE_ATTR_SCORE)),
                   cons_string(crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE_ROLE)),
                   cons_string(crm_element_value(xml_obj, XML_COLOC_ATTR_TARGET_ROLE)));
     }
 }
 
 void
 cli_resource_print_cts(pcmk__output_t *out, pe_resource_t * rsc)
 {
     GListPtr lpc = NULL;
     const char *host = NULL;
     bool needs_quorum = TRUE;
     const char *rtype = crm_element_value(rsc->xml, XML_ATTR_TYPE);
     const char *rprov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER);
     const char *rclass = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
     pe_node_t *node = pe__current_node(rsc);
 
     if (pcmk__str_eq(rclass, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) {
         needs_quorum = FALSE;
     } else {
         // @TODO check requires in resource meta-data and rsc_defaults
     }
 
     if (node != NULL) {
         host = node->details->uname;
     }
 
     out->info(out, "Resource: %s %s %s %s %s %s %s %s %d %lld 0x%.16llx",
               crm_element_name(rsc->xml), rsc->id,
               rsc->clone_name ? rsc->clone_name : rsc->id, rsc->parent ? rsc->parent->id : "NA",
               rprov ? rprov : "NA", rclass, rtype, host ? host : "NA", needs_quorum, rsc->flags,
               rsc->flags);
 
     for (lpc = rsc->children; lpc != NULL; lpc = lpc->next) {
         pe_resource_t *child = (pe_resource_t *) lpc->data;
 
         cli_resource_print_cts(out, child);
     }
 }
 
 // \return Standard Pacemaker return code
 int
 cli_resource_print_operations(pcmk__output_t *out, const char *rsc_id,
                               const char *host_uname, bool active,
                               pe_working_set_t * data_set)
 {
     int rc = pcmk_rc_no_output;
     GListPtr ops = find_operations(rsc_id, host_uname, active, data_set);
 
     if (!ops) {
         return rc;
     }
 
     out->begin_list(out, NULL, NULL, "Resource Operations");
     rc = pcmk_rc_ok;
 
     for (GListPtr lpc = ops; lpc != NULL; lpc = lpc->next) {
         xmlNode *xml_op = (xmlNode *) lpc->data;
         out->message(out, "node-and-op", data_set, xml_op);
     }
 
     out->end_list(out);
     return rc;
 }
 
 // \return Standard Pacemaker return code
 int
 cli_resource_print(pcmk__output_t *out, pe_resource_t *rsc,
                    pe_working_set_t *data_set, bool expanded)
 {
     unsigned int opts = pe_print_pending;
     GListPtr all = NULL;
 
     all = g_list_prepend(all, strdup("*"));
 
     out->begin_list(out, NULL, NULL, "Resource Config");
     out->message(out, crm_map_element_name(rsc->xml), opts, rsc, all, all);
     out->message(out, "resource-config", rsc, !expanded);
     out->end_list(out);
 
     g_list_free_full(all, free);
     return pcmk_rc_ok;
 }
 
-PCMK__OUTPUT_ARGS("attribute", "pe_resource_t *", "char *", "GHashTable *")
+PCMK__OUTPUT_ARGS("attribute-list", "pe_resource_t *", "char *", "GHashTable *")
 static int
-attribute_default(pcmk__output_t *out, va_list args) {
+attribute_list_default(pcmk__output_t *out, va_list args) {
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     char *attr = va_arg(args, char *);
     GHashTable *params = va_arg(args, GHashTable *);
 
     const char *value = g_hash_table_lookup(params, attr);
 
     if (value != NULL) {
         out->begin_list(out, NULL, NULL, "Attributes");
         out->list_item(out, attr, "%s", value);
         out->end_list(out);
     } else {
         out->err(out, "Attribute '%s' not found for '%s'", attr, rsc->id);
     }
 
     return pcmk_rc_ok;
 }
 
-PCMK__OUTPUT_ARGS("attribute", "pe_resource_t *", "char *", "GHashTable *")
+PCMK__OUTPUT_ARGS("attribute-list", "pe_resource_t *", "char *", "GHashTable *")
 static int
-attribute_text(pcmk__output_t *out, va_list args) {
+attribute_list_text(pcmk__output_t *out, va_list args) {
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     char *attr = va_arg(args, char *);
     GHashTable *params = va_arg(args, GHashTable *);
 
     const char *value = g_hash_table_lookup(params, attr);
 
     if (value != NULL) {
         out->info(out, "%s", value);
     } else {
         out->err(out, "Attribute '%s' not found for '%s'", attr, rsc->id);
     }
 
     return pcmk_rc_ok;
 }
 
-PCMK__OUTPUT_ARGS("property", "pe_resource_t *", "char *")
+PCMK__OUTPUT_ARGS("property-list", "pe_resource_t *", "char *")
 static int
-property_default(pcmk__output_t *out, va_list args) {
+property_list_default(pcmk__output_t *out, va_list args) {
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     char *attr = va_arg(args, char *);
 
     const char *value = crm_element_value(rsc->xml, attr);
 
     if (value != NULL) {
         out->begin_list(out, NULL, NULL, "Properties");
         out->list_item(out, attr, "%s", value);
         out->end_list(out);
     }
 
     return pcmk_rc_ok;
 }
 
-PCMK__OUTPUT_ARGS("property", "pe_resource_t *", "char *")
+PCMK__OUTPUT_ARGS("property-list", "pe_resource_t *", "char *")
 static int
-property_text(pcmk__output_t *out, va_list args) {
+property_list_text(pcmk__output_t *out, va_list args) {
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     char *attr = va_arg(args, char *);
 
     const char *value = crm_element_value(rsc->xml, attr);
 
     if (value != NULL) {
         out->info(out, "%s", value);
     }
 
     return pcmk_rc_ok;
 }
 
-PCMK__OUTPUT_ARGS("resource-check", "resource_checks_t *")
+PCMK__OUTPUT_ARGS("resource-check-list", "resource_checks_t *")
 static int
-resource_check_default(pcmk__output_t *out, va_list args) {
+resource_check_list_default(pcmk__output_t *out, va_list args) {
     resource_checks_t *checks = va_arg(args, resource_checks_t *);
 
     pe_resource_t *parent = uber_parent(checks->rsc);
     int rc = pcmk_rc_no_output;
     bool printed = false;
 
     if (checks->flags != 0 || checks->lock_node != NULL) {
         printed = true;
         out->begin_list(out, NULL, NULL, "Resource Checks");
     }
 
     if (pcmk_is_set(checks->flags, rsc_remain_stopped)) {
         out->list_item(out, "check", "Configuration specifies '%s' should remain stopped",
                        parent->id);
     }
 
     if (pcmk_is_set(checks->flags, rsc_unpromotable)) {
         out->list_item(out, "check", "Configuration specifies '%s' should not be promoted",
                        parent->id);
     }
 
     if (pcmk_is_set(checks->flags, rsc_unmanaged)) {
         out->list_item(out, "check", "Configuration prevents cluster from stopping or starting unmanaged '%s'",
                        parent->id);
     }
 
     if (checks->lock_node) {
         out->list_item(out, "check", "'%s' is locked to node %s due to shutdown",
                        parent->id, checks->lock_node);
     }
 
     if (printed) {
         out->end_list(out);
         rc = pcmk_rc_ok;
     }
 
     return rc;
 }
 
-PCMK__OUTPUT_ARGS("resource-check", "resource_checks_t *")
+PCMK__OUTPUT_ARGS("resource-check-list", "resource_checks_t *")
 static int
-resource_check_xml(pcmk__output_t *out, va_list args) {
+resource_check_list_xml(pcmk__output_t *out, va_list args) {
     resource_checks_t *checks = va_arg(args, resource_checks_t *);
 
     pe_resource_t *parent = uber_parent(checks->rsc);
     int rc = pcmk_rc_no_output;
 
-    xmlNode *node = pcmk__output_create_xml_node(out, "check",
-                                                 "id", parent->id,
-                                                 NULL);
+    xmlNodePtr node = pcmk__output_create_xml_node(out, "check",
+                                                   "id", parent->id,
+                                                   NULL);
 
     if (pcmk_is_set(checks->flags, rsc_remain_stopped)) {
         crm_xml_add(node, "remain_stopped", "true");
     }
 
     if (pcmk_is_set(checks->flags, rsc_unpromotable)) {
         crm_xml_add(node, "promotable", "false");
     }
 
     if (pcmk_is_set(checks->flags, rsc_unmanaged)) {
         crm_xml_add(node, "unmanaged", "true");
     }
 
     if (checks->lock_node) {
         crm_xml_add(node, "locked-to", checks->lock_node);
     }
 
     return rc;
 }
 
-PCMK__OUTPUT_ARGS("resource-search", "GListPtr", "pe_resource_t *", "gchar *")
+PCMK__OUTPUT_ARGS("resource-search-list", "GList *", "pe_resource_t *", "gchar *")
 static int
-resource_search_default(pcmk__output_t *out, va_list args)
+resource_search_list_default(pcmk__output_t *out, va_list args)
 {
-    GListPtr nodes = va_arg(args, GListPtr);
+    GList *nodes = va_arg(args, GList *);
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     gchar *requested_name = va_arg(args, gchar *);
 
     bool printed = false;
     int rc = pcmk_rc_no_output;
 
     if (!out->is_quiet(out) && nodes == NULL) {
         out->err(out, "resource %s is NOT running", requested_name);
         return rc;
     }
 
-    for (GListPtr lpc = nodes; lpc != NULL; lpc = lpc->next) {
+    for (GList *lpc = nodes; lpc != NULL; lpc = lpc->next) {
         pe_node_t *node = (pe_node_t *) lpc->data;
 
         if (!printed) {
             out->begin_list(out, NULL, NULL, "Nodes");
             printed = true;
             rc = pcmk_rc_ok;
         }
 
         if (out->is_quiet(out)) {
             out->list_item(out, "node", "%s", node->details->uname);
         } else {
             const char *state = "";
 
             if (!pe_rsc_is_clone(rsc) && rsc->fns->state(rsc, TRUE) == RSC_ROLE_MASTER) {
                 state = " Master";
             }
             out->list_item(out, "node", "resource %s is running on: %s%s",
                            requested_name, node->details->uname, state);
         }
     }
 
     if (printed) {
         out->end_list(out);
     }
 
     return rc;
 }
 
-
-PCMK__OUTPUT_ARGS("resource-search", "GListPtr", "pe_resource_t *", "gchar *")
+PCMK__OUTPUT_ARGS("resource-search-list", "GList *", "pe_resource_t *", "gchar *")
 static int
-resource_search_xml(pcmk__output_t *out, va_list args)
+resource_search_list_xml(pcmk__output_t *out, va_list args)
 {
-    GListPtr nodes = va_arg(args, GListPtr);
+    GList *nodes = va_arg(args, GList *);
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     gchar *requested_name = va_arg(args, gchar *);
 
     pcmk__output_xml_create_parent(out, "nodes",
                                    "resource", requested_name,
                                    NULL);
 
-    for (GListPtr lpc = nodes; lpc != NULL; lpc = lpc->next) {
+    for (GList *lpc = nodes; lpc != NULL; lpc = lpc->next) {
         pe_node_t *node = (pe_node_t *) lpc->data;
-        xmlNode *sub_node = pcmk__output_create_xml_text_node(out, "node", node->details->uname);
+        xmlNodePtr sub_node = pcmk__output_create_xml_text_node(out, "node", node->details->uname);
 
         if (!pe_rsc_is_clone(rsc) && rsc->fns->state(rsc, TRUE) == RSC_ROLE_MASTER) {
             crm_xml_add(sub_node, "state", "promoted");
         }
     }
 
     return pcmk_rc_ok;
 }
 
-PCMK__OUTPUT_ARGS("resource-why", "cib_t *", "GListPtr", "pe_resource_t *",
+PCMK__OUTPUT_ARGS("resource-reasons-list", "cib_t *", "GList *", "pe_resource_t *",
                   "pe_node_t *")
 static int
-resource_why_default(pcmk__output_t *out, va_list args)
+resource_reasons_list_default(pcmk__output_t *out, va_list args)
 {
     cib_t *cib_conn = va_arg(args, cib_t *);
-    GListPtr resources = va_arg(args, GListPtr);
+    GList *resources = va_arg(args, GList *);
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     pe_node_t *node = va_arg(args, pe_node_t *);
 
     const char *host_uname = (node == NULL)? NULL : node->details->uname;
 
     out->begin_list(out, NULL, NULL, "Resource Reasons");
 
     if ((rsc == NULL) && (host_uname == NULL)) {
-        GListPtr lpc = NULL;
-        GListPtr hosts = NULL;
+        GList *lpc = NULL;
+        GList *hosts = NULL;
 
         for (lpc = resources; lpc != NULL; lpc = lpc->next) {
             pe_resource_t *rsc = (pe_resource_t *) lpc->data;
             rsc->fns->location(rsc, &hosts, TRUE);
 
             if (hosts == NULL) {
                 out->list_item(out, "reason", "Resource %s is not running", rsc->id);
             } else {
                 out->list_item(out, "reason", "Resource %s is running", rsc->id);
             }
 
             cli_resource_check(out, cib_conn, rsc);
             g_list_free(hosts);
             hosts = NULL;
         }
 
     } else if ((rsc != NULL) && (host_uname != NULL)) {
         if (resource_is_running_on(rsc, host_uname)) {
             out->list_item(out, "reason", "Resource %s is running on host %s",
                            rsc->id, host_uname);
         } else {
             out->list_item(out, "reason", "Resource %s is not running on host %s",
                            rsc->id, host_uname);
         }
 
         cli_resource_check(out, cib_conn, rsc);
 
     } else if ((rsc == NULL) && (host_uname != NULL)) {
         const char* host_uname =  node->details->uname;
-        GListPtr allResources = node->details->allocated_rsc;
-        GListPtr activeResources = node->details->running_rsc;
-        GListPtr unactiveResources = pcmk__subtract_lists(allResources, activeResources, (GCompareFunc) strcmp);
-        GListPtr lpc = NULL;
+        GList *allResources = node->details->allocated_rsc;
+        GList *activeResources = node->details->running_rsc;
+        GList *unactiveResources = pcmk__subtract_lists(allResources, activeResources, (GCompareFunc) strcmp);
+        GList *lpc = NULL;
 
         for (lpc = activeResources; lpc != NULL; lpc = lpc->next) {
             pe_resource_t *rsc = (pe_resource_t *) lpc->data;
             out->list_item(out, "reason", "Resource %s is running on host %s",
                            rsc->id, host_uname);
             cli_resource_check(out, cib_conn, rsc);
         }
 
         for(lpc = unactiveResources; lpc != NULL; lpc = lpc->next) {
             pe_resource_t *rsc = (pe_resource_t *) lpc->data;
             out->list_item(out, "reason", "Resource %s is assigned to host %s but not running",
                            rsc->id, host_uname);
             cli_resource_check(out, cib_conn, rsc);
         }
 
         g_list_free(allResources);
         g_list_free(activeResources);
         g_list_free(unactiveResources);
 
     } else if ((rsc != NULL) && (host_uname == NULL)) {
-        GListPtr hosts = NULL;
+        GList *hosts = NULL;
 
         rsc->fns->location(rsc, &hosts, TRUE);
         out->list_item(out, "reason", "Resource %s is %srunning",
                        rsc->id, (hosts? "" : "not "));
         cli_resource_check(out, cib_conn, rsc);
         g_list_free(hosts);
     }
 
     out->end_list(out);
     return pcmk_rc_ok;
 }
 
-PCMK__OUTPUT_ARGS("resource-why", "cib_t *", "GListPtr", "pe_resource_t *",
+PCMK__OUTPUT_ARGS("resource-reasons-list", "cib_t *", "GList *", "pe_resource_t *",
                   "pe_node_t *")
 static int
-resource_why_xml(pcmk__output_t *out, va_list args)
+resource_reasons_list_xml(pcmk__output_t *out, va_list args)
 {
     cib_t *cib_conn = va_arg(args, cib_t *);
-    GListPtr resources = va_arg(args, GListPtr);
+    GList *resources = va_arg(args, GList *);
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     pe_node_t *node = va_arg(args, pe_node_t *);
 
     const char *host_uname = (node == NULL)? NULL : node->details->uname;
 
-    xmlNode *xml_node = pcmk__output_xml_create_parent(out, "reason", NULL);
+    xmlNodePtr xml_node = pcmk__output_xml_create_parent(out, "reason", NULL);
 
     if ((rsc == NULL) && (host_uname == NULL)) {
-        GListPtr lpc = NULL;
-        GListPtr hosts = NULL;
+        GList *lpc = NULL;
+        GList *hosts = NULL;
 
         pcmk__output_xml_create_parent(out, "resources", NULL);
 
         for (lpc = resources; lpc != NULL; lpc = lpc->next) {
             pe_resource_t *rsc = (pe_resource_t *) lpc->data;
 
             rsc->fns->location(rsc, &hosts, TRUE);
 
             pcmk__output_xml_create_parent(out, "resource",
                                            "id", rsc->id,
                                            "running", pcmk__btoa(hosts != NULL),
                                            NULL);
 
             cli_resource_check(out, cib_conn, rsc);
             pcmk__output_xml_pop_parent(out);
             g_list_free(hosts);
             hosts = NULL;
         }
 
         pcmk__output_xml_pop_parent(out);
 
     } else if ((rsc != NULL) && (host_uname != NULL)) {
         if (resource_is_running_on(rsc, host_uname)) {
             crm_xml_add(xml_node, "running_on", host_uname);
         }
 
         cli_resource_check(out, cib_conn, rsc);
 
     } else if ((rsc == NULL) && (host_uname != NULL)) {
         const char* host_uname =  node->details->uname;
-        GListPtr allResources = node->details->allocated_rsc;
-        GListPtr activeResources = node->details->running_rsc;
-        GListPtr unactiveResources = pcmk__subtract_lists(allResources, activeResources, (GCompareFunc) strcmp);
-        GListPtr lpc = NULL;
+        GList *allResources = node->details->allocated_rsc;
+        GList *activeResources = node->details->running_rsc;
+        GList *unactiveResources = pcmk__subtract_lists(allResources, activeResources, (GCompareFunc) strcmp);
+        GList *lpc = NULL;
 
         pcmk__output_xml_create_parent(out, "resources", NULL);
 
         for (lpc = activeResources; lpc != NULL; lpc = lpc->next) {
             pe_resource_t *rsc = (pe_resource_t *) lpc->data;
 
             pcmk__output_xml_create_parent(out, "resource",
                                            "id", rsc->id,
                                            "running", "true",
                                            "host", host_uname,
                                            NULL);
 
             cli_resource_check(out, cib_conn, rsc);
             pcmk__output_xml_pop_parent(out);
         }
 
         for(lpc = unactiveResources; lpc != NULL; lpc = lpc->next) {
             pe_resource_t *rsc = (pe_resource_t *) lpc->data;
 
             pcmk__output_xml_create_parent(out, "resource",
                                            "id", rsc->id,
                                            "running", "false",
                                            "host", host_uname,
                                            NULL);
 
             cli_resource_check(out, cib_conn, rsc);
             pcmk__output_xml_pop_parent(out);
         }
 
         pcmk__output_xml_pop_parent(out);
         g_list_free(allResources);
         g_list_free(activeResources);
         g_list_free(unactiveResources);
 
     } else if ((rsc != NULL) && (host_uname == NULL)) {
-        GListPtr hosts = NULL;
+        GList *hosts = NULL;
 
         rsc->fns->location(rsc, &hosts, TRUE);
         crm_xml_add(xml_node, "running", pcmk__btoa(hosts != NULL));
         cli_resource_check(out, cib_conn, rsc);
         g_list_free(hosts);
     }
 
     return pcmk_rc_ok;
 }
 
 static void
 add_resource_name(pcmk__output_t *out, pe_resource_t *rsc) {
     if (rsc->children == NULL) {
         out->list_item(out, "resource", "%s", rsc->id);
     } else {
         for (GListPtr lpc = rsc->children; lpc != NULL; lpc = lpc->next) {
             pe_resource_t *child = (pe_resource_t *) lpc->data;
             add_resource_name(out, child);
         }
     }
 }
 
-PCMK__OUTPUT_ARGS("resource-names-list", "GListPtr")
+PCMK__OUTPUT_ARGS("resource-names-list", "GList *")
 static int
 resource_names(pcmk__output_t *out, va_list args) {
-    GListPtr resources = va_arg(args, GListPtr);
+    GList *resources = va_arg(args, GList *);
 
     if (resources == NULL) {
         out->err(out, "NO resources configured\n");
         return pcmk_rc_no_output;
     }
 
     out->begin_list(out, NULL, NULL, "Resource Names");
 
-    for (GListPtr lpc = resources; lpc != NULL; lpc = lpc->next) {
+    for (GList *lpc = resources; lpc != NULL; lpc = lpc->next) {
         pe_resource_t *rsc = (pe_resource_t *) lpc->data;
         add_resource_name(out, rsc);
     }
 
     out->end_list(out);
     return pcmk_rc_ok;
 }
 
 static pcmk__message_entry_t fmt_functions[] = {
-    { "attribute", "default", attribute_default },
-    { "attribute", "text", attribute_text },
-    { "property", "default", property_default },
-    { "property", "text", property_text },
-    { "resource-check", "default", resource_check_default },
-    { "resource-check", "xml", resource_check_xml },
-    { "resource-search", "default", resource_search_default },
-    { "resource-search", "xml", resource_search_xml },
-    { "resource-why", "default", resource_why_default },
-    { "resource-why", "xml", resource_why_xml },
+    { "attribute-list", "default", attribute_list_default },
+    { "attribute-list", "text", attribute_list_text },
+    { "property-list", "default", property_list_default },
+    { "property-list", "text", property_list_text },
+    { "resource-check-list", "default", resource_check_list_default },
+    { "resource-check-list", "xml", resource_check_list_xml },
+    { "resource-search-list", "default", resource_search_list_default },
+    { "resource-search-list", "xml", resource_search_list_xml },
+    { "resource-reasons-list", "default", resource_reasons_list_default },
+    { "resource-reasons-list", "xml", resource_reasons_list_xml },
     { "resource-names-list", "default", resource_names },
 
     { NULL, NULL, NULL }
 };
 
 void
 crm_resource_register_messages(pcmk__output_t *out) {
     pcmk__register_messages(out, fmt_functions);
 }
diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c
index bbd8bc1e53..3a9feac600 100644
--- a/tools/crm_resource_runtime.c
+++ b/tools/crm_resource_runtime.c
@@ -1,1959 +1,1959 @@
 /*
  * Copyright 2004-2020 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_resource.h>
 #include <crm/common/ipc_controld.h>
 #include <crm/common/lists_internal.h>
 #include <crm/common/xml_internal.h>
 
 resource_checks_t *
 cli_check_resource(pe_resource_t *rsc, char *role_s, char *managed)
 {
     pe_resource_t *parent = uber_parent(rsc);
     resource_checks_t *rc = calloc(1, sizeof(resource_checks_t));
 
     if (role_s) {
         enum rsc_role_e role = text2role(role_s);
 
         if (role == RSC_ROLE_STOPPED) {
             rc->flags |= rsc_remain_stopped;
         } else if (pcmk_is_set(parent->flags, pe_rsc_promotable) &&
                    role == RSC_ROLE_SLAVE) {
             rc->flags |= rsc_unpromotable;
         }
     }
 
     if (managed && !crm_is_true(managed)) {
         rc->flags |= rsc_unmanaged;
     }
 
     if (rsc->lock_node) {
         rc->lock_node = rsc->lock_node->details->uname;
     }
 
     rc->rsc = rsc;
     return rc;
 }
 
 GListPtr
 cli_resource_search(pcmk__output_t *out, pe_resource_t *rsc, const char *requested_name,
                     pe_working_set_t *data_set)
 {
     GListPtr found = NULL;
     pe_resource_t *parent = uber_parent(rsc);
 
     if (pe_rsc_is_clone(rsc)) {
         for (GListPtr iter = rsc->children; iter != NULL; iter = iter->next) {
             GListPtr extra = ((pe_resource_t *) iter->data)->running_on;
             if (extra != NULL) {
                 found = g_list_concat(found, extra);
             }
         }
 
     /* The anonymous clone children's common ID is supplied */
     } else if (pe_rsc_is_clone(parent)
                && !pcmk_is_set(rsc->flags, pe_rsc_unique)
                && rsc->clone_name
                && pcmk__str_eq(requested_name, rsc->clone_name, pcmk__str_casei)
                && !pcmk__str_eq(requested_name, rsc->id, pcmk__str_casei)) {
 
         for (GListPtr iter = parent->children; iter; iter = iter->next) {
             GListPtr extra = ((pe_resource_t *) iter->data)->running_on;
             if (extra != NULL) {
                 found = g_list_concat(found, extra);
             }
         }
 
     } else if (rsc->running_on != NULL) {
         found = g_list_concat(found, rsc->running_on);
     }
 
     return found;
 }
 
 #define XPATH_MAX 1024
 
 // \return Standard Pacemaker return code
 static int
 find_resource_attr(pcmk__output_t *out, cib_t * the_cib, const char *attr,
                    const char *rsc, const char *attr_set_type, const char *set_name,
                    const char *attr_id, const char *attr_name, char **value)
 {
     int offset = 0;
     int rc = pcmk_rc_ok;
     xmlNode *xml_search = NULL;
     char *xpath_string = NULL;
 
     if(value) {
         *value = NULL;
     }
 
     if(the_cib == NULL) {
         return ENOTCONN;
     }
 
     xpath_string = calloc(1, XPATH_MAX);
     offset +=
         snprintf(xpath_string + offset, XPATH_MAX - offset, "%s", get_object_path("resources"));
 
     offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "//*[@id=\"%s\"]", rsc);
 
     if (attr_set_type) {
         offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "/%s", attr_set_type);
         if (set_name) {
             offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "[@id=\"%s\"]", set_name);
         }
     }
 
     offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "//nvpair[");
     if (attr_id) {
         offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "@id=\"%s\"", attr_id);
     }
 
     if (attr_name) {
         if (attr_id) {
             offset += snprintf(xpath_string + offset, XPATH_MAX - offset, " and ");
         }
         offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "@name=\"%s\"", attr_name);
     }
     offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "]");
     CRM_LOG_ASSERT(offset > 0);
 
     rc = the_cib->cmds->query(the_cib, xpath_string, &xml_search,
                               cib_sync_call | cib_scope_local | cib_xpath);
     rc = pcmk_legacy2rc(rc);
 
     if (rc != pcmk_rc_ok) {
         goto bail;
     }
 
     crm_log_xml_debug(xml_search, "Match");
     if (xml_has_children(xml_search)) {
         xmlNode *child = NULL;
 
         rc = EINVAL;
         out->info(out, "Multiple attributes match name=%s", attr_name);
 
         for (child = pcmk__xml_first_child(xml_search); child != NULL;
              child = pcmk__xml_next(child)) {
             out->info(out, "  Value: %s \t(id=%s)",
                       crm_element_value(child, XML_NVPAIR_ATTR_VALUE), ID(child));
         }
 
         out->spacer(out);
 
     } else if(value) {
         const char *tmp = crm_element_value(xml_search, attr);
 
         if (tmp) {
             *value = strdup(tmp);
         }
     }
 
   bail:
     free(xpath_string);
     free_xml(xml_search);
     return rc;
 }
 
 /* PRIVATE. Use the find_matching_attr_resources instead. */
 static void
 find_matching_attr_resources_recursive(pcmk__output_t *out, GList/* <pe_resource_t*> */ ** result,
                                        pe_resource_t * rsc, const char * rsc_id,
                                        const char * attr_set, const char * attr_set_type,
                                        const char * attr_id, const char * attr_name,
                                        cib_t * cib, const char * cmd, int depth)
 {
     int rc = pcmk_rc_ok;
     char *lookup_id = clone_strip(rsc->id);
     char *local_attr_id = NULL;
 
     /* visit the children */
     for(GList *gIter = rsc->children; gIter; gIter = gIter->next) {
         find_matching_attr_resources_recursive(out, result, (pe_resource_t*)gIter->data,
                                                rsc_id, attr_set, attr_set_type,
                                                attr_id, attr_name, cib, cmd, depth+1);
         /* do it only once for clones */
         if(pe_clone == rsc->variant) {
             break;
         }
     }
 
     rc = find_resource_attr(out, cib, XML_ATTR_ID, lookup_id, attr_set_type,
                             attr_set, attr_id, attr_name, &local_attr_id);
     /* Post-order traversal. 
      * The root is always on the list and it is the last item. */
     if((0 == depth) || (pcmk_rc_ok == rc)) {
         /* push the head */
         *result = g_list_append(*result, rsc);
     }
 
     free(local_attr_id);
     free(lookup_id);
 }
 
 
 /* The result is a linearized pre-ordered tree of resources. */
 static GList/*<pe_resource_t*>*/ *
 find_matching_attr_resources(pcmk__output_t *out, pe_resource_t * rsc,
                              const char * rsc_id, const char * attr_set,
                              const char * attr_set_type, const char * attr_id,
                              const char * attr_name, cib_t * cib, const char * cmd,
                              gboolean force)
 {
     int rc = pcmk_rc_ok;
     char *lookup_id = NULL;
     char *local_attr_id = NULL;
     GList * result = NULL;
     /* If --force is used, update only the requested resource (clone or primitive).
      * Otherwise, if the primitive has the attribute, use that.
      * Otherwise use the clone. */
     if(force == TRUE) {
         return g_list_append(result, rsc);
     }
     if(rsc->parent && pe_clone == rsc->parent->variant) {
         int rc = pcmk_rc_ok;
         char *local_attr_id = NULL;
         rc = find_resource_attr(out, cib, XML_ATTR_ID, rsc_id, attr_set_type,
                                 attr_set, attr_id, attr_name, &local_attr_id);
         free(local_attr_id);
 
         if(rc != pcmk_rc_ok) {
             rsc = rsc->parent;
             if (!out->is_quiet(out)) {
                 out->info(out, "Performing %s of '%s' on '%s', the parent of '%s'",
                           cmd, attr_name, rsc->id, rsc_id);
             }
         }
         return g_list_append(result, rsc);
     } else if(rsc->parent == NULL && rsc->children && pe_clone == rsc->variant) {
         pe_resource_t *child = rsc->children->data;
 
         if(child->variant == pe_native) {
             lookup_id = clone_strip(child->id); /* Could be a cloned group! */
             rc = find_resource_attr(out, cib, XML_ATTR_ID, lookup_id, attr_set_type,
                                     attr_set, attr_id, attr_name, &local_attr_id);
 
             if(rc == pcmk_rc_ok) {
                 rsc = child;
                 if (!out->is_quiet(out)) {
                     out->info(out, "A value for '%s' already exists in child '%s', performing %s on that instead of '%s'",
                               attr_name, lookup_id, cmd, rsc_id);
                 }
             }
 
             free(local_attr_id);
             free(lookup_id);
         }
         return g_list_append(result, rsc);
     }
     /* If the resource is a group ==> children inherit the attribute if defined. */
     find_matching_attr_resources_recursive(out, &result, rsc, rsc_id, attr_set,
                                            attr_set_type, attr_id, attr_name,
                                            cib, cmd, 0);
     return result;
 }
 
 // \return Standard Pacemaker return code
 int
 cli_resource_update_attribute(pcmk__output_t *out, pe_resource_t *rsc,
                               const char *requested_name, const char *attr_set,
                               const char *attr_set_type, const char *attr_id,
                               const char *attr_name, const char *attr_value,
                               gboolean recursive, cib_t *cib, int cib_options,
                               pe_working_set_t *data_set, gboolean force)
 {
     int rc = pcmk_rc_ok;
     static bool need_init = TRUE;
 
     char *local_attr_id = NULL;
     char *local_attr_set = NULL;
 
     GList/*<pe_resource_t*>*/ *resources = NULL;
     const char *common_attr_id = attr_id;
 
     if (attr_id == NULL && force == FALSE) {
         find_resource_attr (out, cib, XML_ATTR_ID, uber_parent(rsc)->id, NULL,
                             NULL, NULL, attr_name, NULL);
     }
 
     if (pcmk__str_eq(attr_set_type, XML_TAG_ATTR_SETS, pcmk__str_casei)) {
         if (force == FALSE) {
             rc = find_resource_attr(out, cib, XML_ATTR_ID, uber_parent(rsc)->id,
                                     XML_TAG_META_SETS, attr_set, attr_id,
                                     attr_name, &local_attr_id);
             if (rc == pcmk_rc_ok && !out->is_quiet(out)) {
                 out->err(out, "WARNING: There is already a meta attribute for '%s' called '%s' (id=%s)",
                          uber_parent(rsc)->id, attr_name, local_attr_id);
                 out->err(out, "         Delete '%s' first or use the force option to override",
                          local_attr_id);
             }
             free(local_attr_id);
             if (rc == pcmk_rc_ok) {
                 return ENOTUNIQ;
             }
         }
         resources = g_list_append(resources, rsc);
 
     } else {
         resources = find_matching_attr_resources(out, rsc, requested_name, attr_set, attr_set_type,
                                                  attr_id, attr_name, cib, "update", force);
     }
 
     /* If either attr_set or attr_id is specified,
      * one clearly intends to modify a single resource.
      * It is the last item on the resource list.*/
     for(GList *gIter = (attr_set||attr_id) ? g_list_last(resources) : resources
             ; gIter; gIter = gIter->next) {
         char *lookup_id = NULL;
 
         xmlNode *xml_top = NULL;
         xmlNode *xml_obj = NULL;
         local_attr_id = NULL;
         local_attr_set = NULL;
 
         rsc = (pe_resource_t*)gIter->data;
         attr_id = common_attr_id;
 
         lookup_id = clone_strip(rsc->id); /* Could be a cloned group! */
         rc = find_resource_attr(out, cib, XML_ATTR_ID, lookup_id, attr_set_type,
                                 attr_set, attr_id, attr_name, &local_attr_id);
 
         if (rc == pcmk_rc_ok) {
             crm_debug("Found a match for name=%s: id=%s", attr_name, local_attr_id);
             attr_id = local_attr_id;
 
         } else if (rc != ENXIO) {
             free(lookup_id);
             free(local_attr_id);
             g_list_free(resources);
             return rc;
 
         } else {
             const char *tag = crm_element_name(rsc->xml);
 
             if (attr_set == NULL) {
                 local_attr_set = crm_strdup_printf("%s-%s", lookup_id,
                                                    attr_set_type);
                 attr_set = local_attr_set;
             }
             if (attr_id == NULL) {
                 local_attr_id = crm_strdup_printf("%s-%s", attr_set, attr_name);
                 attr_id = local_attr_id;
             }
 
             xml_top = create_xml_node(NULL, tag);
             crm_xml_add(xml_top, XML_ATTR_ID, lookup_id);
 
             xml_obj = create_xml_node(xml_top, attr_set_type);
             crm_xml_add(xml_obj, XML_ATTR_ID, attr_set);
         }
 
         xml_obj = crm_create_nvpair_xml(xml_obj, attr_id, attr_name, attr_value);
         if (xml_top == NULL) {
             xml_top = xml_obj;
         }
 
         crm_log_xml_debug(xml_top, "Update");
 
         rc = cib->cmds->modify(cib, XML_CIB_TAG_RESOURCES, xml_top, cib_options);
         rc = pcmk_legacy2rc(rc);
 
         if (rc == pcmk_rc_ok && !out->is_quiet(out)) {
             out->info(out, "Set '%s' option: id=%s%s%s%s%s value=%s", lookup_id, local_attr_id,
                       attr_set ? " set=" : "", attr_set ? attr_set : "",
                       attr_name ? " name=" : "", attr_name ? attr_name : "", attr_value);
         }
 
         free_xml(xml_top);
 
         free(lookup_id);
         free(local_attr_id);
         free(local_attr_set);
 
         if(recursive && pcmk__str_eq(attr_set_type, XML_TAG_META_SETS, pcmk__str_casei)) {
             GListPtr lpc = NULL;
 
             if(need_init) {
                 xmlNode *cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, data_set->input);
 
                 need_init = FALSE;
                 unpack_constraints(cib_constraints, data_set);
 
                 for (lpc = data_set->resources; lpc != NULL; lpc = lpc->next) {
                     pe_resource_t *r = (pe_resource_t *) lpc->data;
 
                     pe__clear_resource_flags(r, pe_rsc_allocating);
                 }
             }
 
             crm_debug("Looking for dependencies %p", rsc->rsc_cons_lhs);
             pe__set_resource_flags(rsc, pe_rsc_allocating);
             for (lpc = rsc->rsc_cons_lhs; lpc != NULL; lpc = lpc->next) {
                 rsc_colocation_t *cons = (rsc_colocation_t *) lpc->data;
                 pe_resource_t *peer = cons->rsc_lh;
 
                 crm_debug("Checking %s %d", cons->id, cons->score);
                 if (cons->score > 0 && !pcmk_is_set(peer->flags, pe_rsc_allocating)) {
                     /* Don't get into colocation loops */
                     crm_debug("Setting %s=%s for dependent resource %s", attr_name, attr_value, peer->id);
                     cli_resource_update_attribute(out, peer, peer->id, NULL, attr_set_type,
                                                   NULL, attr_name, attr_value, recursive,
                                                   cib, cib_options, data_set, force);
                 }
             }
         }
     }
     g_list_free(resources);
     return rc;
 }
 
 // \return Standard Pacemaker return code
 int
 cli_resource_delete_attribute(pcmk__output_t *out, pe_resource_t *rsc,
                               const char *requested_name, const char *attr_set,
                               const char *attr_set_type, const char *attr_id,
                               const char *attr_name, cib_t *cib, int cib_options,
                               pe_working_set_t *data_set, gboolean force)
 {
     int rc = pcmk_rc_ok;
     GList/*<pe_resource_t*>*/ *resources = NULL;
 
     if (attr_id == NULL && force == FALSE) {
         find_resource_attr(out, cib, XML_ATTR_ID, uber_parent(rsc)->id, NULL,
                            NULL, NULL, attr_name, NULL);
     }
 
     if(pcmk__str_eq(attr_set_type, XML_TAG_META_SETS, pcmk__str_casei)) {
         resources = find_matching_attr_resources(out, rsc, requested_name, attr_set, attr_set_type,
                                                  attr_id, attr_name, cib, "delete", force);
     } else {
         resources = g_list_append(resources, rsc);
     }
 
     for(GList *gIter = resources; gIter; gIter = gIter->next) {
         char *lookup_id = NULL;
         xmlNode *xml_obj = NULL;
         char *local_attr_id = NULL;
 
         rsc = (pe_resource_t*)gIter->data;
 
         lookup_id = clone_strip(rsc->id);
         rc = find_resource_attr(out, cib, XML_ATTR_ID, lookup_id, attr_set_type,
                                 attr_set, attr_id, attr_name, &local_attr_id);
 
         if (rc == ENXIO) {
             free(lookup_id);
             rc = pcmk_rc_ok;
             continue;
 
         } else if (rc != pcmk_rc_ok) {
             free(lookup_id);
             g_list_free(resources);
             return rc;
         }
 
         if (attr_id == NULL) {
             attr_id = local_attr_id;
         }
 
         xml_obj = crm_create_nvpair_xml(NULL, attr_id, attr_name, NULL);
         crm_log_xml_debug(xml_obj, "Delete");
 
         CRM_ASSERT(cib);
         rc = cib->cmds->remove(cib, XML_CIB_TAG_RESOURCES, xml_obj, cib_options);
         rc = pcmk_legacy2rc(rc);
 
         if (rc == pcmk_rc_ok && !out->is_quiet(out)) {
             out->info(out, "Deleted '%s' option: id=%s%s%s%s%s", lookup_id, local_attr_id,
                       attr_set ? " set=" : "", attr_set ? attr_set : "",
                       attr_name ? " name=" : "", attr_name ? attr_name : "");
         }
 
         free(lookup_id);
         free_xml(xml_obj);
         free(local_attr_id);
     }
     g_list_free(resources);
     return rc;
 }
 
 // \return Standard Pacemaker return code
 static int
 send_lrm_rsc_op(pcmk__output_t *out, pcmk_ipc_api_t *controld_api, bool do_fail_resource,
                 const char *host_uname, const char *rsc_id, pe_working_set_t *data_set)
 {
     const char *router_node = host_uname;
     const char *rsc_api_id = NULL;
     const char *rsc_long_id = NULL;
     const char *rsc_class = NULL;
     const char *rsc_provider = NULL;
     const char *rsc_type = NULL;
     bool cib_only = false;
     pe_resource_t *rsc = pe_find_resource(data_set->resources, rsc_id);
 
     if (rsc == NULL) {
         out->err(out, "Resource %s not found", rsc_id);
         return ENXIO;
 
     } else if (rsc->variant != pe_native) {
         out->err(out, "We can only process primitive resources, not %s", rsc_id);
         return EINVAL;
     }
 
     rsc_class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
     rsc_provider = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER),
     rsc_type = crm_element_value(rsc->xml, XML_ATTR_TYPE);
     if ((rsc_class == NULL) || (rsc_type == NULL)) {
         out->err(out, "Resource %s does not have a class and type", rsc_id);
         return EINVAL;
     }
 
     if (host_uname == NULL) {
         out->err(out, "Please specify a node name");
         return EINVAL;
 
     } else {
         pe_node_t *node = pe_find_node(data_set->nodes, host_uname);
 
         if (node == NULL) {
             out->err(out, "Node %s not found", host_uname);
             return pcmk_rc_node_unknown;
         }
 
         if (!(node->details->online)) {
             if (do_fail_resource) {
                 out->err(out, "Node %s is not online", host_uname);
                 return ENOTCONN;
             } else {
                 cib_only = true;
             }
         }
         if (!cib_only && pe__is_guest_or_remote_node(node)) {
             node = pe__current_node(node->details->remote_rsc);
             if (node == NULL) {
                 out->err(out, "No cluster connection to Pacemaker Remote node %s detected",
                          host_uname);
                 return ENOTCONN;
             }
             router_node = node->details->uname;
         }
     }
 
     if (rsc->clone_name) {
         rsc_api_id = rsc->clone_name;
         rsc_long_id = rsc->id;
     } else {
         rsc_api_id = rsc->id;
     }
     if (do_fail_resource) {
         return pcmk_controld_api_fail(controld_api, host_uname, router_node,
                                       rsc_api_id, rsc_long_id,
                                       rsc_class, rsc_provider, rsc_type);
     } else {
         return pcmk_controld_api_refresh(controld_api, host_uname, router_node,
                                          rsc_api_id, rsc_long_id, rsc_class,
                                          rsc_provider, rsc_type, cib_only);
     }
 }
 
 /*!
  * \internal
  * \brief Get resource name as used in failure-related node attributes
  *
  * \param[in] rsc  Resource to check
  *
  * \return Newly allocated string containing resource's fail name
  * \note The caller is responsible for freeing the result.
  */
 static inline char *
 rsc_fail_name(pe_resource_t *rsc)
 {
     const char *name = (rsc->clone_name? rsc->clone_name : rsc->id);
 
     return pcmk_is_set(rsc->flags, pe_rsc_unique)? strdup(name) : clone_strip(name);
 }
 
 // \return Standard Pacemaker return code
 static int
 clear_rsc_history(pcmk__output_t *out, pcmk_ipc_api_t *controld_api,
                   const char *host_uname, const char *rsc_id,
                   pe_working_set_t *data_set)
 {
     int rc = pcmk_rc_ok;
 
     /* Erase the resource's entire LRM history in the CIB, even if we're only
      * clearing a single operation's fail count. If we erased only entries for a
      * single operation, we might wind up with a wrong idea of the current
      * resource state, and we might not re-probe the resource.
      */
     rc = send_lrm_rsc_op(out, controld_api, false, host_uname, rsc_id, data_set);
     if (rc != pcmk_rc_ok) {
         return rc;
     }
 
     crm_trace("Processing %d mainloop inputs",
               pcmk_controld_api_replies_expected(controld_api));
     while (g_main_context_iteration(NULL, FALSE)) {
         crm_trace("Processed mainloop input, %d still remaining",
                   pcmk_controld_api_replies_expected(controld_api));
     }
     return rc;
 }
 
 // \return Standard Pacemaker return code
 static int
 clear_rsc_failures(pcmk__output_t *out, pcmk_ipc_api_t *controld_api,
                    const char *node_name, const char *rsc_id, const char *operation,
                    const char *interval_spec, pe_working_set_t *data_set)
 {
     int rc = pcmk_rc_ok;
     const char *failed_value = NULL;
     const char *failed_id = NULL;
     const char *interval_ms_s = NULL;
     GHashTable *rscs = NULL;
     GHashTableIter iter;
 
     /* Create a hash table to use as a set of resources to clean. This lets us
      * clean each resource only once (per node) regardless of how many failed
      * operations it has.
      */
     rscs = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, NULL);
 
     // Normalize interval to milliseconds for comparison to history entry
     if (operation) {
         interval_ms_s = crm_strdup_printf("%u",
                                           crm_parse_interval_spec(interval_spec));
     }
 
     for (xmlNode *xml_op = pcmk__xml_first_child(data_set->failed);
          xml_op != NULL;
          xml_op = pcmk__xml_next(xml_op)) {
 
         failed_id = crm_element_value(xml_op, XML_LRM_ATTR_RSCID);
         if (failed_id == NULL) {
             // Malformed history entry, should never happen
             continue;
         }
 
         // No resource specified means all resources match
         if (rsc_id) {
             pe_resource_t *fail_rsc = pe_find_resource_with_flags(data_set->resources,
                                                                   failed_id,
                                                                   pe_find_renamed|pe_find_anon);
 
             if (!fail_rsc || !pcmk__str_eq(rsc_id, fail_rsc->id, pcmk__str_casei)) {
                 continue;
             }
         }
 
         // Host name should always have been provided by this point
         failed_value = crm_element_value(xml_op, XML_ATTR_UNAME);
         if (!pcmk__str_eq(node_name, failed_value, pcmk__str_casei)) {
             continue;
         }
 
         // No operation specified means all operations match
         if (operation) {
             failed_value = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
             if (!pcmk__str_eq(operation, failed_value, pcmk__str_casei)) {
                 continue;
             }
 
             // Interval (if operation was specified) defaults to 0 (not all)
             failed_value = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL_MS);
             if (!pcmk__str_eq(interval_ms_s, failed_value, pcmk__str_casei)) {
                 continue;
             }
         }
 
         /* not available until glib 2.32
         g_hash_table_add(rscs, (gpointer) failed_id);
         */
         g_hash_table_insert(rscs, (gpointer) failed_id, (gpointer) failed_id);
     }
 
     g_hash_table_iter_init(&iter, rscs);
     while (g_hash_table_iter_next(&iter, (gpointer *) &failed_id, NULL)) {
         crm_debug("Erasing failures of %s on %s", failed_id, node_name);
         rc = clear_rsc_history(out, controld_api, node_name, failed_id, data_set);
         if (rc != pcmk_rc_ok) {
             return rc;
         }
     }
     g_hash_table_destroy(rscs);
     return rc;
 }
 
 // \return Standard Pacemaker return code
 static int
 clear_rsc_fail_attrs(pe_resource_t *rsc, const char *operation,
                      const char *interval_spec, pe_node_t *node)
 {
     int rc = pcmk_rc_ok;
     int attr_options = pcmk__node_attr_none;
     char *rsc_name = rsc_fail_name(rsc);
 
     if (pe__is_guest_or_remote_node(node)) {
         attr_options |= pcmk__node_attr_remote;
     }
     rc = pcmk__node_attr_request_clear(NULL, node->details->uname, rsc_name,
                                        operation, interval_spec, NULL,
                                        attr_options);
     free(rsc_name);
     return rc;
 }
 
 // \return Standard Pacemaker return code
 int
 cli_resource_delete(pcmk__output_t *out, pcmk_ipc_api_t *controld_api,
                     const char *host_uname, pe_resource_t *rsc, const char *operation,
                     const char *interval_spec, bool just_failures,
                     pe_working_set_t *data_set, gboolean force)
 {
     int rc = pcmk_rc_ok;
     pe_node_t *node = NULL;
 
     if (rsc == NULL) {
         return ENXIO;
 
     } else if (rsc->children) {
         GListPtr lpc = NULL;
 
         for (lpc = rsc->children; lpc != NULL; lpc = lpc->next) {
             pe_resource_t *child = (pe_resource_t *) lpc->data;
 
             rc = cli_resource_delete(out, controld_api, host_uname, child, operation,
                                      interval_spec, just_failures, data_set,
                                      force);
             if (rc != pcmk_rc_ok) {
                 return rc;
             }
         }
         return pcmk_rc_ok;
 
     } else if (host_uname == NULL) {
         GListPtr lpc = NULL;
         GListPtr nodes = g_hash_table_get_values(rsc->known_on);
 
         if(nodes == NULL && force) {
             nodes = pcmk__copy_node_list(data_set->nodes, false);
 
         } else if(nodes == NULL && rsc->exclusive_discover) {
             GHashTableIter iter;
             pe_node_t *node = NULL;
 
             g_hash_table_iter_init(&iter, rsc->allowed_nodes);
             while (g_hash_table_iter_next(&iter, NULL, (void**)&node)) {
                 if(node->weight >= 0) {
                     nodes = g_list_prepend(nodes, node);
                 }
             }
 
         } else if(nodes == NULL) {
             nodes = g_hash_table_get_values(rsc->allowed_nodes);
         }
 
         for (lpc = nodes; lpc != NULL; lpc = lpc->next) {
             node = (pe_node_t *) lpc->data;
 
             if (node->details->online) {
                 rc = cli_resource_delete(out, controld_api, node->details->uname,
                                          rsc, operation, interval_spec,
                                          just_failures, data_set, force);
             }
             if (rc != pcmk_rc_ok) {
                 g_list_free(nodes);
                 return rc;
             }
         }
 
         g_list_free(nodes);
         return pcmk_rc_ok;
     }
 
     node = pe_find_node(data_set->nodes, host_uname);
 
     if (node == NULL) {
         out->err(out, "Unable to clean up %s because node %s not found",
                  rsc->id, host_uname);
         return ENODEV;
     }
 
     if (!node->details->rsc_discovery_enabled) {
         out->err(out, "Unable to clean up %s because resource discovery disabled on %s",
                  rsc->id, host_uname);
         return EOPNOTSUPP;
     }
 
     if (controld_api == NULL) {
         out->err(out, "Dry run: skipping clean-up of %s on %s due to CIB_file",
                  rsc->id, host_uname);
         return pcmk_rc_ok;
     }
 
     rc = clear_rsc_fail_attrs(rsc, operation, interval_spec, node);
     if (rc != pcmk_rc_ok) {
         out->err(out, "Unable to clean up %s failures on %s: %s",
                  rsc->id, host_uname, pcmk_rc_str(rc));
         return rc;
     }
 
     if (just_failures) {
         rc = clear_rsc_failures(out, controld_api, host_uname, rsc->id, operation,
                                 interval_spec, data_set);
     } else {
         rc = clear_rsc_history(out, controld_api, host_uname, rsc->id, data_set);
     }
     if (rc != pcmk_rc_ok) {
         out->err(out, "Cleaned %s failures on %s, but unable to clean history: %s",
                  rsc->id, host_uname, pcmk_strerror(rc));
     } else {
         out->info(out, "Cleaned up %s on %s", rsc->id, host_uname);
     }
     return rc;
 }
 
 // \return Standard Pacemaker return code
 int
 cli_cleanup_all(pcmk__output_t *out, pcmk_ipc_api_t *controld_api,
                 const char *node_name, const char *operation,
                 const char *interval_spec, pe_working_set_t *data_set)
 {
     int rc = pcmk_rc_ok;
     int attr_options = pcmk__node_attr_none;
     const char *display_name = node_name? node_name : "all nodes";
 
     if (controld_api == NULL) {
         out->info(out, "Dry run: skipping clean-up of %s due to CIB_file",
                   display_name);
         return rc;
     }
 
     if (node_name) {
         pe_node_t *node = pe_find_node(data_set->nodes, node_name);
 
         if (node == NULL) {
             out->err(out, "Unknown node: %s", node_name);
             return ENXIO;
         }
         if (pe__is_guest_or_remote_node(node)) {
             attr_options |= pcmk__node_attr_remote;
         }
     }
 
     rc = pcmk__node_attr_request_clear(NULL, node_name, NULL, operation,
                                        interval_spec, NULL, attr_options);
     if (rc != pcmk_rc_ok) {
         out->err(out, "Unable to clean up all failures on %s: %s",
                  display_name, pcmk_rc_str(rc));
         return rc;
     }
 
     if (node_name) {
         rc = clear_rsc_failures(out, controld_api, node_name, NULL,
                                 operation, interval_spec, data_set);
         if (rc != pcmk_rc_ok) {
             out->err(out, "Cleaned all resource failures on %s, but unable to clean history: %s",
                      node_name, pcmk_strerror(rc));
             return rc;
         }
     } else {
         for (GList *iter = data_set->nodes; iter; iter = iter->next) {
             pe_node_t *node = (pe_node_t *) iter->data;
 
             rc = clear_rsc_failures(out, controld_api, node->details->uname, NULL,
                                     operation, interval_spec, data_set);
             if (rc != pcmk_rc_ok) {
                 out->err(out, "Cleaned all resource failures on all nodes, but unable to clean history: %s",
                          pcmk_strerror(rc));
                 return rc;
             }
         }
     }
 
     out->info(out, "Cleaned up all resources on %s", display_name);
     return rc;
 }
 
 int
 cli_resource_check(pcmk__output_t *out, cib_t * cib_conn, pe_resource_t *rsc)
 {
     char *role_s = NULL;
     char *managed = NULL;
     pe_resource_t *parent = uber_parent(rsc);
     int rc = pcmk_rc_no_output;
     resource_checks_t *checks = NULL;
 
     find_resource_attr(out, cib_conn, XML_NVPAIR_ATTR_VALUE, parent->id,
                        NULL, NULL, NULL, XML_RSC_ATTR_MANAGED, &managed);
 
     find_resource_attr(out, cib_conn, XML_NVPAIR_ATTR_VALUE, parent->id,
                        NULL, NULL, NULL, XML_RSC_ATTR_TARGET_ROLE, &role_s);
 
     checks = cli_check_resource(rsc, role_s, managed);
 
     if (checks->flags != 0 || checks->lock_node != NULL) {
-        rc = out->message(out, "resource-check", checks);
+        rc = out->message(out, "resource-check-list", checks);
     }
 
     free(role_s);
     free(managed);
     free(checks);
     return rc;
 }
 
 // \return Standard Pacemaker return code
 int
 cli_resource_fail(pcmk__output_t *out, pcmk_ipc_api_t *controld_api,
                   const char *host_uname, const char *rsc_id,
                   pe_working_set_t *data_set)
 {
     crm_notice("Failing %s on %s", rsc_id, host_uname);
     return send_lrm_rsc_op(out, controld_api, true, host_uname, rsc_id, data_set);
 }
 
 static GHashTable *
 generate_resource_params(pe_resource_t * rsc, pe_working_set_t * data_set)
 {
     GHashTable *params = NULL;
     GHashTable *meta = NULL;
     GHashTable *combined = NULL;
     GHashTableIter iter;
 
     if (!rsc) {
         crm_err("Resource does not exist in config");
         return NULL;
     }
 
     params = crm_str_table_new();
     meta = crm_str_table_new();
     combined = crm_str_table_new();
 
     get_rsc_attributes(params, rsc, NULL /* TODO: Pass in local node */ , data_set);
     get_meta_attributes(meta, rsc, NULL /* TODO: Pass in local node */ , data_set);
 
     if (params) {
         char *key = NULL;
         char *value = NULL;
 
         g_hash_table_iter_init(&iter, params);
         while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) {
             g_hash_table_insert(combined, strdup(key), strdup(value));
         }
         g_hash_table_destroy(params);
     }
 
     if (meta) {
         char *key = NULL;
         char *value = NULL;
 
         g_hash_table_iter_init(&iter, meta);
         while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) {
             char *crm_name = crm_meta_name(key);
 
             g_hash_table_insert(combined, crm_name, strdup(value));
         }
         g_hash_table_destroy(meta);
     }
 
     return combined;
 }
 
 bool resource_is_running_on(pe_resource_t *rsc, const char *host)
 {
     bool found = TRUE;
     GListPtr hIter = NULL;
     GListPtr hosts = NULL;
 
     if(rsc == NULL) {
         return FALSE;
     }
 
     rsc->fns->location(rsc, &hosts, TRUE);
     for (hIter = hosts; host != NULL && hIter != NULL; hIter = hIter->next) {
         pe_node_t *node = (pe_node_t *) hIter->data;
 
         if(strcmp(host, node->details->uname) == 0) {
             crm_trace("Resource %s is running on %s\n", rsc->id, host);
             goto done;
         } else if(strcmp(host, node->details->id) == 0) {
             crm_trace("Resource %s is running on %s\n", rsc->id, host);
             goto done;
         }
     }
 
     if(host != NULL) {
         crm_trace("Resource %s is not running on: %s\n", rsc->id, host);
         found = FALSE;
 
     } else if(host == NULL && hosts == NULL) {
         crm_trace("Resource %s is not running\n", rsc->id);
         found = FALSE;
     }
 
   done:
 
     g_list_free(hosts);
     return found;
 }
 
 /*!
  * \internal
  * \brief Create a list of all resources active on host from a given list
  *
  * \param[in] host      Name of host to check whether resources are active
  * \param[in] rsc_list  List of resources to check
  *
  * \return New list of resources from list that are active on host
  */
 static GList *
 get_active_resources(const char *host, GList *rsc_list)
 {
     GList *rIter = NULL;
     GList *active = NULL;
 
     for (rIter = rsc_list; rIter != NULL; rIter = rIter->next) {
         pe_resource_t *rsc = (pe_resource_t *) rIter->data;
 
         /* Expand groups to their members, because if we're restarting a member
          * other than the first, we can't otherwise tell which resources are
          * stopping and starting.
          */
         if (rsc->variant == pe_group) {
             active = g_list_concat(active,
                                    get_active_resources(host, rsc->children));
         } else if (resource_is_running_on(rsc, host)) {
             active = g_list_append(active, strdup(rsc->id));
         }
     }
     return active;
 }
 
 static void dump_list(GList *items, const char *tag) 
 {
     int lpc = 0;
     GList *item = NULL;
 
     for (item = items; item != NULL; item = item->next) {
         crm_trace("%s[%d]: %s", tag, lpc, (char*)item->data);
         lpc++;
     }
 }
 
 static void display_list(pcmk__output_t *out, GList *items, const char *tag)
 {
     GList *item = NULL;
 
     for (item = items; item != NULL; item = item->next) {
         out->info(out, "%s%s", tag, (const char *)item->data);
     }
 }
 
 /*!
  * \internal
  * \brief Upgrade XML to latest schema version and use it as working set input
  *
  * This also updates the working set timestamp to the current time.
  *
  * \param[in] data_set   Working set instance to update
  * \param[in] xml        XML to use as input
  *
  * \return Standard Pacemaker return code
  * \note On success, caller is responsible for freeing memory allocated for
  *       data_set->now.
  * \todo This follows the example of other callers of cli_config_update()
  *       and returns ENOKEY ("Required key not available") if that fails,
  *       but perhaps pcmk_rc_schema_validation would be better in that case.
  */
 int
 update_working_set_xml(pe_working_set_t *data_set, xmlNode **xml)
 {
     if (cli_config_update(xml, NULL, FALSE) == FALSE) {
         return ENOKEY;
     }
     data_set->input = *xml;
     data_set->now = crm_time_new(NULL);
     return pcmk_rc_ok;
 }
 
 /*!
  * \internal
  * \brief Update a working set's XML input based on a CIB query
  *
  * \param[in] data_set   Data set instance to initialize
  * \param[in] cib        Connection to the CIB manager
  *
  * \return Standard Pacemaker return code
  * \note On success, caller is responsible for freeing memory allocated for
  *       data_set->input and data_set->now.
  */
 static int
 update_working_set_from_cib(pcmk__output_t *out, pe_working_set_t * data_set,
                             cib_t *cib)
 {
     xmlNode *cib_xml_copy = NULL;
     int rc = pcmk_rc_ok;
 
     rc = cib->cmds->query(cib, NULL, &cib_xml_copy, cib_scope_local | cib_sync_call);
     rc = pcmk_legacy2rc(rc);
 
     if (rc != pcmk_rc_ok) {
         out->err(out, "Could not obtain the current CIB: %s (%d)", pcmk_strerror(rc), rc);
         return rc;
     }
     rc = update_working_set_xml(data_set, &cib_xml_copy);
     if (rc != pcmk_rc_ok) {
         out->err(out, "Could not upgrade the current CIB XML");
         free_xml(cib_xml_copy);
         return rc;
     }
 
     return rc;
 }
 
 // \return Standard Pacemaker return code
 static int
 update_dataset(pcmk__output_t *out, cib_t *cib, pe_working_set_t * data_set,
                bool simulate)
 {
     char *pid = NULL;
     char *shadow_file = NULL;
     cib_t *shadow_cib = NULL;
     int rc = pcmk_rc_ok;
 
     pe_reset_working_set(data_set);
     rc = update_working_set_from_cib(out, data_set, cib);
     if (rc != pcmk_rc_ok) {
         return rc;
     }
 
     if(simulate) {
         pid = pcmk__getpid_s();
         shadow_cib = cib_shadow_new(pid);
         shadow_file = get_shadow_file(pid);
 
         if (shadow_cib == NULL) {
             out->err(out, "Could not create shadow cib: '%s'", pid);
             rc = ENXIO;
             goto cleanup;
         }
 
         rc = write_xml_file(data_set->input, shadow_file, FALSE);
 
         if (rc < 0) {
             out->err(out, "Could not populate shadow cib: %s (%d)", pcmk_strerror(rc), rc);
             goto cleanup;
         }
 
         rc = shadow_cib->cmds->signon(shadow_cib, crm_system_name, cib_command);
         rc = pcmk_legacy2rc(rc);
 
         if (rc != pcmk_rc_ok) {
             out->err(out, "Could not connect to shadow cib: %s (%d)", pcmk_strerror(rc), rc);
             goto cleanup;
         }
 
         pcmk__schedule_actions(data_set, data_set->input, NULL);
         run_simulation(data_set, shadow_cib, NULL, TRUE);
         rc = update_dataset(out, shadow_cib, data_set, FALSE);
 
     } else {
         cluster_status(data_set);
     }
 
   cleanup:
     /* Do not free data_set->input here, we need rsc->xml to be valid later on */
     cib_delete(shadow_cib);
     free(pid);
 
     if(shadow_file) {
         unlink(shadow_file);
         free(shadow_file);
     }
 
     return rc;
 }
 
 static int
 max_delay_for_resource(pe_working_set_t * data_set, pe_resource_t *rsc) 
 {
     int delay = 0;
     int max_delay = 0;
 
     if(rsc && rsc->children) {
         GList *iter = NULL;
 
         for(iter = rsc->children; iter; iter = iter->next) {
             pe_resource_t *child = (pe_resource_t *)iter->data;
 
             delay = max_delay_for_resource(data_set, child);
             if(delay > max_delay) {
                 double seconds = delay / 1000.0;
                 crm_trace("Calculated new delay of %.1fs due to %s", seconds, child->id);
                 max_delay = delay;
             }
         }
 
     } else if(rsc) {
         char *key = crm_strdup_printf("%s_%s_0", rsc->id, RSC_STOP);
         pe_action_t *stop = custom_action(rsc, key, RSC_STOP, NULL, TRUE, FALSE, data_set);
         const char *value = g_hash_table_lookup(stop->meta, XML_ATTR_TIMEOUT);
 
         max_delay = value? (int) crm_parse_ll(value, NULL) : -1;
         pe_free_action(stop);
     }
 
 
     return max_delay;
 }
 
 static int
 max_delay_in(pe_working_set_t * data_set, GList *resources) 
 {
     int max_delay = 0;
     GList *item = NULL;
 
     for (item = resources; item != NULL; item = item->next) {
         int delay = 0;
         pe_resource_t *rsc = pe_find_resource(data_set->resources, (const char *)item->data);
 
         delay = max_delay_for_resource(data_set, rsc);
 
         if(delay > max_delay) {
             double seconds = delay / 1000.0;
             crm_trace("Calculated new delay of %.1fs due to %s", seconds, rsc->id);
             max_delay = delay;
         }
     }
 
     return 5 + (max_delay / 1000);
 }
 
 #define waiting_for_starts(d, r, h) ((d != NULL) || \
                                     (resource_is_running_on((r), (h)) == FALSE))
 
 /*!
  * \internal
  * \brief Restart a resource (on a particular host if requested).
  *
  * \param[in] rsc        The resource to restart
  * \param[in] host       The host to restart the resource on (or NULL for all)
  * \param[in] timeout_ms Consider failed if actions do not complete in this time
  *                       (specified in milliseconds, but a two-second
  *                       granularity is actually used; if 0, a timeout will be
  *                       calculated based on the resource timeout)
  * \param[in] cib        Connection to the CIB manager
  *
  * \return Standard Pacemaker return code (exits on certain failures)
  */
 int
 cli_resource_restart(pcmk__output_t *out, pe_resource_t *rsc, const char *host,
                      const char *move_lifetime, int timeout_ms, cib_t *cib,
                      int cib_options, gboolean promoted_role_only, gboolean force)
 {
     int rc = pcmk_rc_ok;
     int lpc = 0;
     int before = 0;
     int step_timeout_s = 0;
     int sleep_interval = 2;
     int timeout = timeout_ms / 1000;
 
     bool stop_via_ban = FALSE;
     char *rsc_id = NULL;
     char *orig_target_role = NULL;
 
     GList *list_delta = NULL;
     GList *target_active = NULL;
     GList *current_active = NULL;
     GList *restart_target_active = NULL;
 
     pe_working_set_t *data_set = NULL;
 
     if(resource_is_running_on(rsc, host) == FALSE) {
         const char *id = rsc->clone_name?rsc->clone_name:rsc->id;
         if(host) {
             out->err(out, "%s is not running on %s and so cannot be restarted", id, host);
         } else {
             out->err(out, "%s is not running anywhere and so cannot be restarted", id);
         }
         return ENXIO;
     }
 
     rsc_id = strdup(rsc->id);
     if ((pe_rsc_is_clone(rsc) || pe_bundle_replicas(rsc)) && host) {
         stop_via_ban = TRUE;
     }
 
     /*
       grab full cib
       determine originally active resources
       disable or ban
       poll cib and watch for affected resources to get stopped
       without --timeout, calculate the stop timeout for each step and wait for that
       if we hit --timeout or the service timeout, re-enable or un-ban, report failure and indicate which resources we couldn't take down
       if everything stopped, re-enable or un-ban
       poll cib and watch for affected resources to get started
       without --timeout, calculate the start timeout for each step and wait for that
       if we hit --timeout or the service timeout, report (different) failure and indicate which resources we couldn't bring back up
       report success
 
       Optimizations:
       - use constraints to determine ordered list of affected resources
       - Allow a --no-deps option (aka. --force-restart)
     */
 
     data_set = pe_new_working_set();
     if (data_set == NULL) {
         crm_perror(LOG_ERR, "Could not allocate working set");
         rc = ENOMEM;
         goto done;
     }
     pe__set_working_set_flags(data_set, pe_flag_no_counts|pe_flag_no_compat);
     rc = update_dataset(out, cib, data_set, FALSE);
     if(rc != pcmk_rc_ok) {
         out->err(out, "Could not get new resource list: %s (%d)", pcmk_strerror(rc), rc);
         goto done;
     }
 
     restart_target_active = get_active_resources(host, data_set->resources);
     current_active = get_active_resources(host, data_set->resources);
 
     dump_list(current_active, "Origin");
 
     if (stop_via_ban) {
         /* Stop the clone or bundle instance by banning it from the host */
         out->quiet = true;
         rc = cli_resource_ban(out, rsc_id, host, move_lifetime, NULL, cib,
                               cib_options, promoted_role_only);
 
     } else {
         /* Stop the resource by setting target-role to Stopped.
          * Remember any existing target-role so we can restore it later
          * (though it only makes any difference if it's Slave).
          */
         char *lookup_id = clone_strip(rsc->id);
 
         find_resource_attr(out, cib, XML_NVPAIR_ATTR_VALUE, lookup_id, NULL, NULL,
                            NULL, XML_RSC_ATTR_TARGET_ROLE, &orig_target_role);
         free(lookup_id);
         rc = cli_resource_update_attribute(out, rsc, rsc_id, NULL, XML_TAG_META_SETS,
                                            NULL, XML_RSC_ATTR_TARGET_ROLE,
                                            RSC_STOPPED, FALSE, cib, cib_options,
                                            data_set, force);
     }
     if(rc != pcmk_rc_ok) {
         out->err(out, "Could not set target-role for %s: %s (%d)", rsc_id, pcmk_strerror(rc), rc);
         if (current_active) {
             g_list_free_full(current_active, free);
         }
         if (restart_target_active) {
             g_list_free_full(restart_target_active, free);
         }
         goto done;
     }
 
     rc = update_dataset(out, cib, data_set, TRUE);
     if(rc != pcmk_rc_ok) {
         out->err(out, "Could not determine which resources would be stopped");
         goto failure;
     }
 
     target_active = get_active_resources(host, data_set->resources);
     dump_list(target_active, "Target");
 
     list_delta = pcmk__subtract_lists(current_active, target_active, (GCompareFunc) strcmp);
     out->info(out, "Waiting for %d resources to stop:", g_list_length(list_delta));
     display_list(out, list_delta, " * ");
 
     step_timeout_s = timeout / sleep_interval;
     while (list_delta != NULL) {
         before = g_list_length(list_delta);
         if(timeout_ms == 0) {
             step_timeout_s = max_delay_in(data_set, list_delta) / sleep_interval;
         }
 
         /* We probably don't need the entire step timeout */
         for(lpc = 0; (lpc < step_timeout_s) && (list_delta != NULL); lpc++) {
             sleep(sleep_interval);
             if(timeout) {
                 timeout -= sleep_interval;
                 crm_trace("%ds remaining", timeout);
             }
             rc = update_dataset(out, cib, data_set, FALSE);
             if(rc != pcmk_rc_ok) {
                 out->err(out, "Could not determine which resources were stopped");
                 goto failure;
             }
 
             if (current_active) {
                 g_list_free_full(current_active, free);
             }
             current_active = get_active_resources(host, data_set->resources);
             g_list_free(list_delta);
             list_delta = pcmk__subtract_lists(current_active, target_active, (GCompareFunc) strcmp);
             dump_list(current_active, "Current");
             dump_list(list_delta, "Delta");
         }
 
         crm_trace("%d (was %d) resources remaining", g_list_length(list_delta), before);
         if(before == g_list_length(list_delta)) {
             /* aborted during stop phase, print the contents of list_delta */
             out->info(out, "Could not complete shutdown of %s, %d resources remaining", rsc_id, g_list_length(list_delta));
             display_list(out, list_delta, " * ");
             rc = ETIME;
             goto failure;
         }
 
     }
 
     if (stop_via_ban) {
         rc = cli_resource_clear(rsc_id, host, NULL, cib, cib_options, TRUE, force);
 
     } else if (orig_target_role) {
         rc = cli_resource_update_attribute(out, rsc, rsc_id, NULL, XML_TAG_META_SETS,
                                            NULL, XML_RSC_ATTR_TARGET_ROLE,
                                            orig_target_role, FALSE, cib,
                                            cib_options, data_set, force);
         free(orig_target_role);
         orig_target_role = NULL;
     } else {
         rc = cli_resource_delete_attribute(out, rsc, rsc_id, NULL, XML_TAG_META_SETS,
                                            NULL, XML_RSC_ATTR_TARGET_ROLE, cib,
                                            cib_options, data_set, force);
     }
 
     if(rc != pcmk_rc_ok) {
         out->err(out, "Could not unset target-role for %s: %s (%d)", rsc_id, pcmk_strerror(rc), rc);
         goto done;
     }
 
     if (target_active) {
         g_list_free_full(target_active, free);
     }
     target_active = restart_target_active;
     list_delta = pcmk__subtract_lists(target_active, current_active, (GCompareFunc) strcmp);
     out->info(out, "Waiting for %d resources to start again:", g_list_length(list_delta));
     display_list(out, list_delta, " * ");
 
     step_timeout_s = timeout / sleep_interval;
     while (waiting_for_starts(list_delta, rsc, host)) {
         before = g_list_length(list_delta);
         if(timeout_ms == 0) {
             step_timeout_s = max_delay_in(data_set, list_delta) / sleep_interval;
         }
 
         /* We probably don't need the entire step timeout */
         for (lpc = 0; (lpc < step_timeout_s) && waiting_for_starts(list_delta, rsc, host); lpc++) {
 
             sleep(sleep_interval);
             if(timeout) {
                 timeout -= sleep_interval;
                 crm_trace("%ds remaining", timeout);
             }
 
             rc = update_dataset(out, cib, data_set, FALSE);
             if(rc != pcmk_rc_ok) {
                 out->err(out, "Could not determine which resources were started");
                 goto failure;
             }
 
             if (current_active) {
                 g_list_free_full(current_active, free);
             }
 
             /* It's OK if dependent resources moved to a different node,
              * so we check active resources on all nodes.
              */
             current_active = get_active_resources(NULL, data_set->resources);
             g_list_free(list_delta);
             list_delta = pcmk__subtract_lists(target_active, current_active, (GCompareFunc) strcmp);
             dump_list(current_active, "Current");
             dump_list(list_delta, "Delta");
         }
 
         if(before == g_list_length(list_delta)) {
             /* aborted during start phase, print the contents of list_delta */
             out->info(out, "Could not complete restart of %s, %d resources remaining", rsc_id, g_list_length(list_delta));
             display_list(out, list_delta, " * ");
             rc = ETIME;
             goto failure;
         }
 
     }
 
     rc = pcmk_rc_ok;
     goto done;
 
   failure:
     if (stop_via_ban) {
         cli_resource_clear(rsc_id, host, NULL, cib, cib_options, TRUE, force);
     } else if (orig_target_role) {
         cli_resource_update_attribute(out, rsc, rsc_id, NULL, XML_TAG_META_SETS, NULL,
                                       XML_RSC_ATTR_TARGET_ROLE, orig_target_role,
                                       FALSE, cib, cib_options, data_set, force);
         free(orig_target_role);
     } else {
         cli_resource_delete_attribute(out, rsc, rsc_id, NULL, XML_TAG_META_SETS, NULL,
                                       XML_RSC_ATTR_TARGET_ROLE, cib, cib_options,
                                       data_set, force);
     }
 
 done:
     if (list_delta) {
         g_list_free(list_delta);
     }
     if (current_active) {
         g_list_free_full(current_active, free);
     }
     if (target_active && (target_active != restart_target_active)) {
         g_list_free_full(target_active, free);
     }
     if (restart_target_active) {
         g_list_free_full(restart_target_active, free);
     }
     free(rsc_id);
     pe_free_working_set(data_set);
     return rc;
 }
 
 static inline bool action_is_pending(pe_action_t *action)
 {
     if (pcmk_any_flags_set(action->flags, pe_action_optional|pe_action_pseudo)
         || !pcmk_is_set(action->flags, pe_action_runnable)
         || pcmk__str_eq("notify", action->task, pcmk__str_casei)) {
         return false;
     }
     return true;
 }
 
 /*!
  * \internal
  * \brief Return TRUE if any actions in a list are pending
  *
  * \param[in] actions   List of actions to check
  *
  * \return TRUE if any actions in the list are pending, FALSE otherwise
  */
 static bool
 actions_are_pending(GListPtr actions)
 {
     GListPtr action;
 
     for (action = actions; action != NULL; action = action->next) {
         pe_action_t *a = (pe_action_t *)action->data;
         if (action_is_pending(a)) {
             crm_notice("Waiting for %s (flags=0x%.8x)", a->uuid, a->flags);
             return TRUE;
         }
     }
     return FALSE;
 }
 
 static void
 print_pending_actions(pcmk__output_t *out, GListPtr actions)
 {
     GListPtr action;
 
     out->info(out, "Pending actions:");
     for (action = actions; action != NULL; action = action->next) {
         pe_action_t *a = (pe_action_t *) action->data;
 
         if (!action_is_pending(a)) {
             continue;
         }
 
         if (a->node) {
             out->info(out, "\tAction %d: %s\ton %s", a->id, a->uuid, a->node->details->uname);
         } else {
             out->info(out, "\tAction %d: %s", a->id, a->uuid);
         }
     }
 }
 
 /* For --wait, timeout (in seconds) to use if caller doesn't specify one */
 #define WAIT_DEFAULT_TIMEOUT_S (60 * 60)
 
 /* For --wait, how long to sleep between cluster state checks */
 #define WAIT_SLEEP_S (2)
 
 /*!
  * \internal
  * \brief Wait until all pending cluster actions are complete
  *
  * This waits until either the CIB's transition graph is idle or a timeout is
  * reached.
  *
  * \param[in] timeout_ms Consider failed if actions do not complete in this time
  *                       (specified in milliseconds, but one-second granularity
  *                       is actually used; if 0, a default will be used)
  * \param[in] cib        Connection to the CIB manager
  *
  * \return Standard Pacemaker return code
  */
 int
 wait_till_stable(pcmk__output_t *out, int timeout_ms, cib_t * cib)
 {
     pe_working_set_t *data_set = NULL;
     int rc = pcmk_rc_ok;
     int timeout_s = timeout_ms? ((timeout_ms + 999) / 1000) : WAIT_DEFAULT_TIMEOUT_S;
     time_t expire_time = time(NULL) + timeout_s;
     time_t time_diff;
     bool printed_version_warning = out->is_quiet(out); // i.e. don't print if quiet
 
     data_set = pe_new_working_set();
     if (data_set == NULL) {
         return ENOMEM;
     }
     pe__set_working_set_flags(data_set, pe_flag_no_counts|pe_flag_no_compat);
 
     do {
 
         /* Abort if timeout is reached */
         time_diff = expire_time - time(NULL);
         if (time_diff > 0) {
             crm_info("Waiting up to %ld seconds for cluster actions to complete", time_diff);
         } else {
             print_pending_actions(out, data_set->actions);
             pe_free_working_set(data_set);
             return ETIME;
         }
         if (rc == pcmk_rc_ok) { /* this avoids sleep on first loop iteration */
             sleep(WAIT_SLEEP_S);
         }
 
         /* Get latest transition graph */
         pe_reset_working_set(data_set);
         rc = update_working_set_from_cib(out, data_set, cib);
         if (rc != pcmk_rc_ok) {
             pe_free_working_set(data_set);
             return rc;
         }
         pcmk__schedule_actions(data_set, data_set->input, NULL);
 
         if (!printed_version_warning) {
             /* If the DC has a different version than the local node, the two
              * could come to different conclusions about what actions need to be
              * done. Warn the user in this case.
              *
              * @TODO A possible long-term solution would be to reimplement the
              * wait as a new controller operation that would be forwarded to the
              * DC. However, that would have potential problems of its own.
              */
             const char *dc_version = g_hash_table_lookup(data_set->config_hash,
                                                          "dc-version");
 
             if (!pcmk__str_eq(dc_version, PACEMAKER_VERSION "-" BUILD_VERSION, pcmk__str_casei)) {
                 out->info(out, "warning: wait option may not work properly in "
                           "mixed-version cluster");
                 printed_version_warning = TRUE;
             }
         }
 
     } while (actions_are_pending(data_set->actions));
 
     pe_free_working_set(data_set);
     return rc;
 }
 
 crm_exit_t
 cli_resource_execute_from_params(pcmk__output_t *out, const char *rsc_name,
                                  const char *rsc_class, const char *rsc_prov,
                                  const char *rsc_type, const char *action,
                                  GHashTable *params, GHashTable *override_hash,
                                  int timeout_ms, int resource_verbose, gboolean force)
 {
     GHashTable *params_copy = NULL;
     crm_exit_t exit_code = CRM_EX_OK;
     svc_action_t *op = NULL;
 
     if (pcmk__str_eq(rsc_class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) {
         out->err(out, "Sorry, the %s option doesn't support %s resources yet",
                  action, rsc_class);
         crm_exit(CRM_EX_UNIMPLEMENT_FEATURE);
     }
 
     /* If no timeout was provided, grab the default. */
     if (timeout_ms == 0) {
         timeout_ms = crm_get_msec(CRM_DEFAULT_OP_TIMEOUT_S);
     }
 
     /* add meta_timeout env needed by some resource agents */
     g_hash_table_insert(params, strdup("CRM_meta_timeout"),
                         crm_strdup_printf("%d", timeout_ms));
 
     /* add crm_feature_set env needed by some resource agents */
     g_hash_table_insert(params, strdup(XML_ATTR_CRM_VERSION), strdup(CRM_FEATURE_SET));
 
     /* resources_action_create frees the params hash table it's passed, but we
      * may need to reuse it in a second call to resources_action_create.  Thus
      * we'll make a copy here so that gets freed and the original remains for
      * reuse.
      */
     params_copy = crm_str_table_dup(params);
 
     op = resources_action_create(rsc_name, rsc_class, rsc_prov, rsc_type, action, 0,
                                  timeout_ms, params_copy, 0);
     if (op == NULL) {
         /* Re-run with stderr enabled so we can display a sane error message */
         crm_enable_stderr(TRUE);
         params_copy = crm_str_table_dup(params);
         op = resources_action_create(rsc_name, rsc_class, rsc_prov, rsc_type, action, 0,
                                      timeout_ms, params_copy, 0);
 
         /* Callers of cli_resource_execute expect that the params hash table will
          * be freed.  That function uses this one, so for that reason and for
          * making the two act the same, we should free the hash table here too.
          */
         g_hash_table_destroy(params);
 
         /* We know op will be NULL, but this makes static analysis happy */
         services_action_free(op);
         crm_exit(CRM_EX_DATAERR);
         return exit_code; // Never reached, but helps static analysis
     }
 
     setenv("HA_debug", resource_verbose > 0 ? "1" : "0", 1);
     if(resource_verbose > 1) {
         setenv("OCF_TRACE_RA", "1", 1);
     }
 
     /* A resource agent using the standard ocf-shellfuncs library will not print
      * messages to stderr if it doesn't have a controlling terminal (e.g. if
      * crm_resource is called via script or ssh). This forces it to do so.
      */
     setenv("OCF_TRACE_FILE", "/dev/stderr", 0);
 
     if (override_hash) {
         GHashTableIter iter;
         char *name = NULL;
         char *value = NULL;
 
         g_hash_table_iter_init(&iter, override_hash);
         while (g_hash_table_iter_next(&iter, (gpointer *) & name, (gpointer *) & value)) {
             out->info(out, "Overriding the cluster configuration for '%s' with '%s' = '%s'",
                       rsc_name, name, value);
             g_hash_table_replace(op->params, strdup(name), strdup(value));
         }
     }
 
     if (services_action_sync(op)) {
         exit_code = op->rc;
 
         if (op->status == PCMK_LRM_OP_DONE) {
             out->info(out, "Operation %s for %s (%s:%s:%s) returned: '%s' (%d)",
                       action, rsc_name, rsc_class, rsc_prov ? rsc_prov : "", rsc_type,
                       services_ocf_exitcode_str(op->rc), op->rc);
         } else {
             out->info(out, "Operation %s for %s (%s:%s:%s) failed: '%s' (%d)",
                       action, rsc_name, rsc_class, rsc_prov ? rsc_prov : "", rsc_type,
                       services_lrm_status_str(op->status), op->status);
         }
 
         /* hide output for validate-all if not in verbose */
         if (resource_verbose == 0 && pcmk__str_eq(action, "validate-all", pcmk__str_casei))
             goto done;
 
         if (op->stdout_data || op->stderr_data) {
             out->subprocess_output(out, op->rc, op->stdout_data, op->stderr_data);
         }
     } else {
         exit_code = op->rc == 0 ? CRM_EX_ERROR : op->rc;
     }
 
 done:
     services_action_free(op);
     /* See comment above about why we free params here. */
     g_hash_table_destroy(params);
     return exit_code;
 }
 
 crm_exit_t
 cli_resource_execute(pcmk__output_t *out, pe_resource_t *rsc,
                      const char *requested_name, const char *rsc_action,
                      GHashTable *override_hash, int timeout_ms,
                      cib_t * cib, pe_working_set_t *data_set,
                      int resource_verbose, gboolean force)
 {
     crm_exit_t exit_code = CRM_EX_OK;
     const char *rid = NULL;
     const char *rtype = NULL;
     const char *rprov = NULL;
     const char *rclass = NULL;
     const char *action = NULL;
     GHashTable *params = NULL;
 
     if (pcmk__str_eq(rsc_action, "validate", pcmk__str_casei)) {
         action = "validate-all";
 
     } else if (pcmk__str_eq(rsc_action, "force-check", pcmk__str_casei)) {
         action = "monitor";
 
     } else if (pcmk__str_eq(rsc_action, "force-stop", pcmk__str_casei)) {
         action = rsc_action+6;
 
     } else if (pcmk__strcase_any_of(rsc_action, "force-start", "force-demote",
                                     "force-promote", NULL)) {
         action = rsc_action+6;
 
         if(pe_rsc_is_clone(rsc)) {
             GListPtr rscs = cli_resource_search(out, rsc, requested_name, data_set);
             if(rscs != NULL && force == FALSE) {
                 out->err(out, "It is not safe to %s %s here: the cluster claims it is already active",
                          action, rsc->id);
                 out->err(out, "Try setting target-role=Stopped first or specifying "
                          "the force option");
                 return CRM_EX_UNSAFE;
             }
         }
 
     } else {
         action = rsc_action;
     }
 
     if(pe_rsc_is_clone(rsc)) {
         /* Grab the first child resource in the hope it's not a group */
         rsc = rsc->children->data;
     }
 
     if(rsc->variant == pe_group) {
         out->err(out, "Sorry, the %s option doesn't support group resources", rsc_action);
         return CRM_EX_UNIMPLEMENT_FEATURE;
     }
 
     rclass = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
     rprov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER);
     rtype = crm_element_value(rsc->xml, XML_ATTR_TYPE);
 
     params = generate_resource_params(rsc, data_set);
 
     if (timeout_ms == 0) {
         timeout_ms = pe_get_configured_timeout(rsc, action, data_set);
     }
 
     rid = pe_rsc_is_anon_clone(rsc->parent)? requested_name : rsc->id;
 
     exit_code = cli_resource_execute_from_params(out, rid, rclass, rprov, rtype, action,
                                                  params, override_hash, timeout_ms,
                                                  resource_verbose, force);
     return exit_code;
 }
 
 // \return Standard Pacemaker return code
 int
 cli_resource_move(pcmk__output_t *out, pe_resource_t *rsc, const char *rsc_id,
                   const char *host_name, const char *move_lifetime, cib_t *cib,
                   int cib_options, pe_working_set_t *data_set,
                   gboolean promoted_role_only, gboolean force)
 {
     int rc = pcmk_rc_ok;
     unsigned int count = 0;
     pe_node_t *current = NULL;
     pe_node_t *dest = pe_find_node(data_set->nodes, host_name);
     bool cur_is_dest = FALSE;
 
     if (dest == NULL) {
         return pcmk_rc_node_unknown;
     }
 
     if (promoted_role_only && !pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
         pe_resource_t *p = uber_parent(rsc);
 
         if (pcmk_is_set(p->flags, pe_rsc_promotable)) {
             out->info(out, "Using parent '%s' for move instead of '%s'.", rsc->id, rsc_id);
             rsc_id = p->id;
             rsc = p;
 
         } else {
             out->info(out, "Ignoring master option: %s is not promotable", rsc_id);
             promoted_role_only = FALSE;
         }
     }
 
     current = pe__find_active_requires(rsc, &count);
 
     if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
         GListPtr iter = NULL;
         unsigned int master_count = 0;
         pe_node_t *master_node = NULL;
 
         for(iter = rsc->children; iter; iter = iter->next) {
             pe_resource_t *child = (pe_resource_t *)iter->data;
             enum rsc_role_e child_role = child->fns->state(child, TRUE);
 
             if(child_role == RSC_ROLE_MASTER) {
                 rsc = child;
                 master_node = pe__current_node(child);
                 master_count++;
             }
         }
         if (promoted_role_only || master_count) {
             count = master_count;
             current = master_node;
         }
 
     }
 
     if (count > 1) {
         if (pe_rsc_is_clone(rsc)) {
             current = NULL;
         } else {
             return pcmk_rc_multiple;
         }
     }
 
     if (current && (current->details == dest->details)) {
         cur_is_dest = TRUE;
         if (force) {
             crm_info("%s is already %s on %s, reinforcing placement with location constraint.",
                      rsc_id, promoted_role_only?"promoted":"active", dest->details->uname);
         } else {
             return pcmk_rc_already;
         }
     }
 
     /* Clear any previous prefer constraints across all nodes. */
     cli_resource_clear(rsc_id, NULL, data_set->nodes, cib, cib_options, FALSE, force);
 
     /* Clear any previous ban constraints on 'dest'. */
     cli_resource_clear(rsc_id, dest->details->uname, data_set->nodes, cib,
                        cib_options, TRUE, force);
 
     /* Record an explicit preference for 'dest' */
     rc = cli_resource_prefer(out, rsc_id, dest->details->uname, move_lifetime,
                              cib, cib_options, promoted_role_only);
 
     crm_trace("%s%s now prefers node %s%s",
               rsc->id, promoted_role_only?" (master)":"", dest->details->uname, force?"(forced)":"");
 
     /* only ban the previous location if current location != destination location.
      * it is possible to use -M to enforce a location without regard of where the
      * resource is currently located */
     if(force && (cur_is_dest == FALSE)) {
         /* Ban the original location if possible */
         if(current) {
             (void)cli_resource_ban(out, rsc_id, current->details->uname, move_lifetime,
                                    NULL, cib, cib_options, promoted_role_only);
 
         } else if(count > 1) {
             out->info(out, "Resource '%s' is currently %s in %d locations. "
                       "One may now move to %s",
                       rsc_id, (promoted_role_only? "promoted" : "active"),
                       count, dest->details->uname);
             out->info(out, "To prevent '%s' from being %s at a specific location, "
                       "specify a node.",
                       rsc_id, (promoted_role_only? "promoted" : "active"));
 
         } else {
             crm_trace("Not banning %s from its current location: not active", rsc_id);
         }
     }
 
     return rc;
 }