Page Menu
Home
ClusterLabs Projects
Search
Configure Global Search
Log In
Files
F4624437
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
229 KB
Referenced Files
None
Subscribers
None
View Options
diff --git a/cts/cli/regression.crm_mon.exp b/cts/cli/regression.crm_mon.exp
index edcdda6b34..b7a90934ce 100644
--- a/cts/cli/regression.crm_mon.exp
+++ b/cts/cli/regression.crm_mon.exp
@@ -1,3118 +1,3118 @@
=#=#=#= Begin test: Basic text output =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Active Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
* Fencing (stonith:fence_xvm): Started cluster01
* dummy (ocf::pacemaker:Dummy): Started cluster02
* Resource Group: exim-group:
* Public-IP (ocf::heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
* Clone Set: mysql-clone-group [mysql-group]:
* Started: [ cluster01 cluster02 ]
=#=#=#= End test: Basic text output - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output
=#=#=#= Begin test: XML output =#=#=#=
<pacemaker-result api-version="X" request="crm_mon --output-as=xml">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
<resources_configured number="27" disabled="4" blocked="0"/>
<cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
</summary>
<nodes>
<node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
<node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
<node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
</nodes>
<resources>
<clone id="ping-clone" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
<resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
<resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
</clone>
<resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
<resource id="dummy" resource_agent="ocf::pacemaker:Dummy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
<clone id="inactive-clone" multi_state="false" unique="false" managed="true" disabled="true" failed="false" failure_ignored="false" target_role="stopped">
<resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</clone>
<group id="inactive-group" number_resources="2" managed="true" disabled="true">
<resource id="inactive-dummy-1" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="inactive-dummy-2" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
<bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
<replica id="0">
<resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-0" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-0" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
<replica id="1">
<resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-1" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-1" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
<replica id="2">
<resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-2" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-2" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
</bundle>
<group id="exim-group" number_resources="2" managed="true" disabled="false">
<resource id="Public-IP" resource_agent="ocf::heartbeat:IPaddr" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
<resource id="Email" resource_agent="lsb:exim" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
</group>
<clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
<group id="mysql-group:0" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
</group>
<group id="mysql-group:1" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
</group>
<group id="mysql-group:2" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
<group id="mysql-group:3" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
<group id="mysql-group:4" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
</clone>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history>
<node name="cluster02">
<resource_history id="ping" orphan="false" migration-threshold="1000000">
<operation_history call="11" task="start" exec-time="2044ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="12" task="monitor" interval="10000ms" exec-time="2031ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="dummy" orphan="false" migration-threshold="1000000">
<operation_history call="18" task="start" exec-time="6020ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="19" task="monitor" interval="60000ms" exec-time="6015ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="Public-IP" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="Email" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
<node name="cluster01">
<resource_history id="ping" orphan="false" migration-threshold="1000000">
<operation_history call="17" task="start" exec-time="2038ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="18" task="monitor" interval="10000ms" exec-time="2034ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="Fencing" orphan="false" migration-threshold="1000000">
<operation_history call="15" task="start" exec-time="36ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="19" task="monitor" interval="60000ms" exec-time="24ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="dummy" orphan="false" migration-threshold="1000000">
<operation_history call="16" task="stop" exec-time="6048ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
</node_history>
<bans>
<ban id="not-on-cluster1" resource="dummy" node="cluster01" weight="-1000000" master_only="false"/>
</bans>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output - OK (0) =#=#=#=
* Passed: crm_mon - XML output
=#=#=#= Begin test: Basic text output without node section =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Active Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
* Fencing (stonith:fence_xvm): Started cluster01
* dummy (ocf::pacemaker:Dummy): Started cluster02
* Resource Group: exim-group:
* Public-IP (ocf::heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
* Clone Set: mysql-clone-group [mysql-group]:
* Started: [ cluster01 cluster02 ]
=#=#=#= End test: Basic text output without node section - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output without node section
=#=#=#= Begin test: XML output without the node section =#=#=#=
<pacemaker-result api-version="X" request="crm_mon --output-as=xml --exclude=nodes">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
<resources_configured number="27" disabled="4" blocked="0"/>
<cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
</summary>
<resources>
<clone id="ping-clone" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
<resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
<resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
</clone>
<resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
<resource id="dummy" resource_agent="ocf::pacemaker:Dummy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
<clone id="inactive-clone" multi_state="false" unique="false" managed="true" disabled="true" failed="false" failure_ignored="false" target_role="stopped">
<resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</clone>
<group id="inactive-group" number_resources="2" managed="true" disabled="true">
<resource id="inactive-dummy-1" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="inactive-dummy-2" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
<bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
<replica id="0">
<resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-0" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-0" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
<replica id="1">
<resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-1" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-1" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
<replica id="2">
<resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-2" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-2" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
</bundle>
<group id="exim-group" number_resources="2" managed="true" disabled="false">
<resource id="Public-IP" resource_agent="ocf::heartbeat:IPaddr" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
<resource id="Email" resource_agent="lsb:exim" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
</group>
<clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
<group id="mysql-group:0" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
</group>
<group id="mysql-group:1" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
</group>
<group id="mysql-group:2" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
<group id="mysql-group:3" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
<group id="mysql-group:4" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
</clone>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history>
<node name="cluster02">
<resource_history id="ping" orphan="false" migration-threshold="1000000">
<operation_history call="11" task="start" exec-time="2044ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="12" task="monitor" interval="10000ms" exec-time="2031ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="dummy" orphan="false" migration-threshold="1000000">
<operation_history call="18" task="start" exec-time="6020ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="19" task="monitor" interval="60000ms" exec-time="6015ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="Public-IP" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="Email" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
<node name="cluster01">
<resource_history id="ping" orphan="false" migration-threshold="1000000">
<operation_history call="17" task="start" exec-time="2038ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="18" task="monitor" interval="10000ms" exec-time="2034ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="Fencing" orphan="false" migration-threshold="1000000">
<operation_history call="15" task="start" exec-time="36ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="19" task="monitor" interval="60000ms" exec-time="24ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="dummy" orphan="false" migration-threshold="1000000">
<operation_history call="16" task="stop" exec-time="6048ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
</node_history>
<bans>
<ban id="not-on-cluster1" resource="dummy" node="cluster01" weight="-1000000" master_only="false"/>
</bans>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output without the node section - OK (0) =#=#=#=
* Passed: crm_mon - XML output without the node section
=#=#=#= Begin test: Text output with only the node section =#=#=#=
Node List:
* Online: [ cluster01 cluster02 ]
=#=#=#= End test: Text output with only the node section - OK (0) =#=#=#=
* Passed: crm_mon - Text output with only the node section
=#=#=#= Begin test: Complete text output =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Active Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
* Fencing (stonith:fence_xvm): Started cluster01
* dummy (ocf::pacemaker:Dummy): Started cluster02
* Resource Group: exim-group:
* Public-IP (ocf::heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
* Clone Set: mysql-clone-group [mysql-group]:
* Started: [ cluster01 cluster02 ]
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* dummy: migration-threshold=1000000:
* (18) start
* (19) monitor: interval="60000ms"
* Public-IP: migration-threshold=1000000:
* (2) start
* Email: migration-threshold=1000000:
* (2) start
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* Node: cluster01:
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
* Fencing: migration-threshold=1000000:
* (15) start
* (19) monitor: interval="60000ms"
* dummy: migration-threshold=1000000:
* (16) stop
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
Negative Location Constraints:
* not-on-cluster1 prevents dummy from running on cluster01
=#=#=#= End test: Complete text output - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output
=#=#=#= Begin test: Complete text output with detail =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (2) (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 (1) cluster02 (2) ]
Active Resources:
* Clone Set: ping-clone [ping]:
* ping (ocf::pacemaker:ping): Started cluster02
* ping (ocf::pacemaker:ping): Started cluster01
* Fencing (stonith:fence_xvm): Started cluster01
* dummy (ocf::pacemaker:Dummy): Started cluster02
* Resource Group: exim-group:
* Public-IP (ocf::heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
* Clone Set: mysql-clone-group [mysql-group]:
* Resource Group: mysql-group:0:
* mysql-proxy (lsb:mysql-proxy): Started cluster02
* Resource Group: mysql-group:1:
* mysql-proxy (lsb:mysql-proxy): Started cluster01
* Resource Group: mysql-group:2:
* mysql-proxy (lsb:mysql-proxy): Stopped
* Resource Group: mysql-group:3:
* mysql-proxy (lsb:mysql-proxy): Stopped
* Resource Group: mysql-group:4:
* mysql-proxy (lsb:mysql-proxy): Stopped
Node Attributes:
* Node: cluster01 (1):
* location : office
* pingd : 1000
* Node: cluster02 (2):
* pingd : 1000
Operations:
* Node: cluster02 (2):
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* dummy: migration-threshold=1000000:
* (18) start
* (19) monitor: interval="60000ms"
* Public-IP: migration-threshold=1000000:
* (2) start
* Email: migration-threshold=1000000:
* (2) start
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* Node: cluster01 (1):
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
* Fencing: migration-threshold=1000000:
* (15) start
* (19) monitor: interval="60000ms"
* dummy: migration-threshold=1000000:
* (16) stop
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
Negative Location Constraints:
* not-on-cluster1 prevents dummy from running on cluster01 (1)
=#=#=#= End test: Complete text output with detail - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output with detail
=#=#=#= Begin test: Complete brief text output =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Active Resources:
* 1 (ocf::pacemaker:Dummy): Active cluster02
* 1 (stonith:fence_xvm): Active cluster01
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
* Resource Group: exim-group:
* 1/1 (lsb:exim): Active cluster02
* 1/1 (ocf::heartbeat:IPaddr): Active cluster02
* Clone Set: mysql-clone-group [mysql-group]:
* Started: [ cluster01 cluster02 ]
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* dummy: migration-threshold=1000000:
* (18) start
* (19) monitor: interval="60000ms"
* Public-IP: migration-threshold=1000000:
* (2) start
* Email: migration-threshold=1000000:
* (2) start
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* Node: cluster01:
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
* Fencing: migration-threshold=1000000:
* (15) start
* (19) monitor: interval="60000ms"
* dummy: migration-threshold=1000000:
* (16) stop
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
Negative Location Constraints:
* not-on-cluster1 prevents dummy from running on cluster01
=#=#=#= End test: Complete brief text output - OK (0) =#=#=#=
* Passed: crm_mon - Complete brief text output
=#=#=#= Begin test: Complete text output grouped by node =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Node List:
* Node cluster01: online:
* Resources:
* ping (ocf::pacemaker:ping): Started
* Fencing (stonith:fence_xvm): Started
* mysql-proxy (lsb:mysql-proxy): Started
* Node cluster02: online:
* Resources:
* ping (ocf::pacemaker:ping): Started
* dummy (ocf::pacemaker:Dummy): Started
* Public-IP (ocf::heartbeat:IPaddr): Started
* Email (lsb:exim): Started
* mysql-proxy (lsb:mysql-proxy): Started
* GuestNode httpd-bundle-0@: OFFLINE:
* Resources:
* GuestNode httpd-bundle-1@: OFFLINE:
* Resources:
* GuestNode httpd-bundle-2@: OFFLINE:
* Resources:
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* dummy: migration-threshold=1000000:
* (18) start
* (19) monitor: interval="60000ms"
* Public-IP: migration-threshold=1000000:
* (2) start
* Email: migration-threshold=1000000:
* (2) start
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* Node: cluster01:
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
* Fencing: migration-threshold=1000000:
* (15) start
* (19) monitor: interval="60000ms"
* dummy: migration-threshold=1000000:
* (16) stop
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
Negative Location Constraints:
* not-on-cluster1 prevents dummy from running on cluster01
=#=#=#= End test: Complete text output grouped by node - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output grouped by node
=#=#=#= Begin test: Complete brief text output grouped by node =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Node List:
* Node cluster01: online:
* Resources:
* 1 (lsb:mysql-proxy): Active
* 1 (ocf::pacemaker:ping): Active
* 1 (stonith:fence_xvm): Active
* Node cluster02: online:
* Resources:
* 1 (lsb:exim): Active
* 1 (lsb:mysql-proxy): Active
* 1 (ocf::heartbeat:IPaddr): Active
* 1 (ocf::pacemaker:Dummy): Active
* 1 (ocf::pacemaker:ping): Active
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* dummy: migration-threshold=1000000:
* (18) start
* (19) monitor: interval="60000ms"
* Public-IP: migration-threshold=1000000:
* (2) start
* Email: migration-threshold=1000000:
* (2) start
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* Node: cluster01:
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
* Fencing: migration-threshold=1000000:
* (15) start
* (19) monitor: interval="60000ms"
* dummy: migration-threshold=1000000:
* (16) stop
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
Negative Location Constraints:
* not-on-cluster1 prevents dummy from running on cluster01
=#=#=#= End test: Complete brief text output grouped by node - OK (0) =#=#=#=
* Passed: crm_mon - Complete brief text output grouped by node
=#=#=#= Begin test: XML output grouped by node =#=#=#=
<pacemaker-result api-version="X" request="crm_mon -1 --output-as=xml --group-by-node">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
<resources_configured number="27" disabled="4" blocked="0"/>
<cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
</summary>
<nodes>
<node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member">
<resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
<resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
</node>
<node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member">
<resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
<resource id="dummy" resource_agent="ocf::pacemaker:Dummy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
<resource id="Public-IP" resource_agent="ocf::heartbeat:IPaddr" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
<resource id="Email" resource_agent="lsb:exim" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
</node>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
<node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
</nodes>
<resources>
<clone id="inactive-clone" multi_state="false" unique="false" managed="true" disabled="true" failed="false" failure_ignored="false" target_role="stopped">
<resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</clone>
<group id="inactive-group" number_resources="2" managed="true" disabled="true">
<resource id="inactive-dummy-1" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="inactive-dummy-2" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
<bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
<replica id="0">
<resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-0" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-0" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
<replica id="1">
<resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-1" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-1" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
<replica id="2">
<resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-2" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-2" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
</bundle>
<clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
<group id="mysql-group:0" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
</group>
<group id="mysql-group:1" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
</group>
<group id="mysql-group:2" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
<group id="mysql-group:3" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
<group id="mysql-group:4" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
</clone>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history>
<node name="cluster02">
<resource_history id="ping" orphan="false" migration-threshold="1000000">
<operation_history call="11" task="start" exec-time="2044ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="12" task="monitor" interval="10000ms" exec-time="2031ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="dummy" orphan="false" migration-threshold="1000000">
<operation_history call="18" task="start" exec-time="6020ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="19" task="monitor" interval="60000ms" exec-time="6015ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="Public-IP" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="Email" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
<node name="cluster01">
<resource_history id="ping" orphan="false" migration-threshold="1000000">
<operation_history call="17" task="start" exec-time="2038ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="18" task="monitor" interval="10000ms" exec-time="2034ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="Fencing" orphan="false" migration-threshold="1000000">
<operation_history call="15" task="start" exec-time="36ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="19" task="monitor" interval="60000ms" exec-time="24ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="dummy" orphan="false" migration-threshold="1000000">
<operation_history call="16" task="stop" exec-time="6048ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
</node_history>
<bans>
<ban id="not-on-cluster1" resource="dummy" node="cluster01" weight="-1000000" master_only="false"/>
</bans>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output grouped by node - OK (0) =#=#=#=
* Passed: crm_mon - XML output grouped by node
=#=#=#= Begin test: Complete text output filtered by node =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 ]
Active Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 ]
* Fencing (stonith:fence_xvm): Started cluster01
* Clone Set: mysql-clone-group [mysql-group]:
* Started: [ cluster01 ]
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
Operations:
* Node: cluster01:
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
* Fencing: migration-threshold=1000000:
* (15) start
* (19) monitor: interval="60000ms"
* dummy: migration-threshold=1000000:
* (16) stop
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
Negative Location Constraints:
* not-on-cluster1 prevents dummy from running on cluster01
=#=#=#= End test: Complete text output filtered by node - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output filtered by node
=#=#=#= Begin test: XML output filtered by node =#=#=#=
<pacemaker-result api-version="X" request="crm_mon --output-as xml --include=all --node=cluster01">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
<resources_configured number="27" disabled="4" blocked="0"/>
<cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
</summary>
<nodes>
<node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
</nodes>
<resources>
<clone id="ping-clone" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
<resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
</clone>
<resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
<clone id="inactive-clone" multi_state="false" unique="false" managed="true" disabled="true" failed="false" failure_ignored="false" target_role="stopped">
<resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</clone>
<group id="inactive-group" number_resources="2" managed="true" disabled="true">
<resource id="inactive-dummy-1" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="inactive-dummy-2" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
<bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
<replica id="0">
<resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-0" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-0" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
<replica id="1">
<resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-1" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-1" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
<replica id="2">
<resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-2" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-2" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
</bundle>
<clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
<group id="mysql-group:1" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
</group>
<group id="mysql-group:2" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
<group id="mysql-group:3" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
<group id="mysql-group:4" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
</clone>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history>
<node name="cluster01">
<resource_history id="ping" orphan="false" migration-threshold="1000000">
<operation_history call="17" task="start" exec-time="2038ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="18" task="monitor" interval="10000ms" exec-time="2034ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="Fencing" orphan="false" migration-threshold="1000000">
<operation_history call="15" task="start" exec-time="36ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="19" task="monitor" interval="60000ms" exec-time="24ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="dummy" orphan="false" migration-threshold="1000000">
<operation_history call="16" task="stop" exec-time="6048ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
</node_history>
<bans>
<ban id="not-on-cluster1" resource="dummy" node="cluster01" weight="-1000000" master_only="false"/>
</bans>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output filtered by node - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by node
=#=#=#= Begin test: Complete text output filtered by tag =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster02 ]
Active Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster02 ]
* dummy (ocf::pacemaker:Dummy): Started cluster02
* Resource Group: exim-group:
* Public-IP (ocf::heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
* Clone Set: mysql-clone-group [mysql-group]:
* Started: [ cluster02 ]
Node Attributes:
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* dummy: migration-threshold=1000000:
* (18) start
* (19) monitor: interval="60000ms"
* Public-IP: migration-threshold=1000000:
* (2) start
* Email: migration-threshold=1000000:
* (2) start
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
Negative Location Constraints:
* not-on-cluster1 prevents dummy from running on cluster01
=#=#=#= End test: Complete text output filtered by tag - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output filtered by tag
=#=#=#= Begin test: XML output filtered by tag =#=#=#=
<pacemaker-result api-version="X" request="crm_mon --output-as=xml --include=all --node=even-nodes">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
<resources_configured number="27" disabled="4" blocked="0"/>
<cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
</summary>
<nodes>
<node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
</nodes>
<resources>
<clone id="ping-clone" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
<resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
</clone>
<resource id="dummy" resource_agent="ocf::pacemaker:Dummy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
<clone id="inactive-clone" multi_state="false" unique="false" managed="true" disabled="true" failed="false" failure_ignored="false" target_role="stopped">
<resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</clone>
<group id="inactive-group" number_resources="2" managed="true" disabled="true">
<resource id="inactive-dummy-1" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="inactive-dummy-2" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
<bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
<replica id="0">
<resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-0" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-0" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
<replica id="1">
<resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-1" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-1" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
<replica id="2">
<resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-2" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-2" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
</bundle>
<group id="exim-group" number_resources="2" managed="true" disabled="false">
<resource id="Public-IP" resource_agent="ocf::heartbeat:IPaddr" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
<resource id="Email" resource_agent="lsb:exim" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
</group>
<clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
<group id="mysql-group:0" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
</group>
<group id="mysql-group:2" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
<group id="mysql-group:3" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
<group id="mysql-group:4" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
</clone>
</resources>
<node_attributes>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history>
<node name="cluster02">
<resource_history id="ping" orphan="false" migration-threshold="1000000">
<operation_history call="11" task="start" exec-time="2044ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="12" task="monitor" interval="10000ms" exec-time="2031ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="dummy" orphan="false" migration-threshold="1000000">
<operation_history call="18" task="start" exec-time="6020ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="19" task="monitor" interval="60000ms" exec-time="6015ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="Public-IP" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="Email" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
</node_history>
<bans>
<ban id="not-on-cluster1" resource="dummy" node="cluster01" weight="-1000000" master_only="false"/>
</bans>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output filtered by tag - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by tag
=#=#=#= Begin test: Complete text output filtered by resource tag =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Active Resources:
* Fencing (stonith:fence_xvm): Started cluster01
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster01:
* Fencing: migration-threshold=1000000:
* (15) start
* (19) monitor: interval="60000ms"
=#=#=#= End test: Complete text output filtered by resource tag - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output filtered by resource tag
=#=#=#= Begin test: XML output filtered by resource tag =#=#=#=
<pacemaker-result api-version="X" request="crm_mon --output-as=xml --include=all --resource=fencing-rscs">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
<resources_configured number="27" disabled="4" blocked="0"/>
<cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
</summary>
<nodes>
<node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
<node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
<node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
</nodes>
<resources>
<resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history>
<node name="cluster01">
<resource_history id="Fencing" orphan="false" migration-threshold="1000000">
<operation_history call="15" task="start" exec-time="36ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="19" task="monitor" interval="60000ms" exec-time="24ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
</node_history>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output filtered by resource tag - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by resource tag
=#=#=#= Begin test: Basic text output filtered by node that doesn't exist =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Active Resources:
* No active resources
=#=#=#= End test: Basic text output filtered by node that doesn't exist - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output filtered by node that doesn't exist
=#=#=#= Begin test: XML output filtered by node that doesn't exist =#=#=#=
<pacemaker-result api-version="X" request="crm_mon --output-as=xml --node=blah">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
<resources_configured number="27" disabled="4" blocked="0"/>
<cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
</summary>
<nodes/>
<resources>
<clone id="inactive-clone" multi_state="false" unique="false" managed="true" disabled="true" failed="false" failure_ignored="false" target_role="stopped">
<resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</clone>
<group id="inactive-group" number_resources="2" managed="true" disabled="true">
<resource id="inactive-dummy-1" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="inactive-dummy-2" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
<bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
<replica id="0">
<resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-0" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-0" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
<replica id="1">
<resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-1" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-1" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
<replica id="2">
<resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-2" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-2" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
</bundle>
</resources>
<bans>
<ban id="not-on-cluster1" resource="dummy" node="cluster01" weight="-1000000" master_only="false"/>
</bans>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output filtered by node that doesn't exist - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by node that doesn't exist
=#=#=#= Begin test: Basic text output with inactive resources =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Full List of Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
* Fencing (stonith:fence_xvm): Started cluster01
* dummy (ocf::pacemaker:Dummy): Started cluster02
* Clone Set: inactive-clone [inactive-dhcpd] (disabled):
* Stopped (disabled): [ cluster01 cluster02 ]
* Resource Group: inactive-group (disabled):
* inactive-dummy-1 (ocf::pacemaker:Dummy): Stopped (disabled)
* inactive-dummy-2 (ocf::pacemaker:Dummy): Stopped (disabled)
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf::heartbeat:apache): Stopped
* httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped
* httpd-bundle-2 (192.168.122.133) (ocf::heartbeat:apache): Stopped
* Resource Group: exim-group:
* Public-IP (ocf::heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
* Clone Set: mysql-clone-group [mysql-group]:
* Started: [ cluster01 cluster02 ]
=#=#=#= End test: Basic text output with inactive resources - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output with inactive resources
=#=#=#= Begin test: Basic text output with inactive resources, filtered by node =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster02 ]
Full List of Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster02 ]
* dummy (ocf::pacemaker:Dummy): Started cluster02
* Clone Set: inactive-clone [inactive-dhcpd] (disabled):
* Stopped (disabled): [ cluster02 ]
* Resource Group: inactive-group (disabled):
* inactive-dummy-1 (ocf::pacemaker:Dummy): Stopped (disabled)
* inactive-dummy-2 (ocf::pacemaker:Dummy): Stopped (disabled)
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf::heartbeat:apache): Stopped
* httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped
* httpd-bundle-2 (192.168.122.133) (ocf::heartbeat:apache): Stopped
* Resource Group: exim-group:
* Public-IP (ocf::heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
* Clone Set: mysql-clone-group [mysql-group]:
* Started: [ cluster02 ]
=#=#=#= End test: Basic text output with inactive resources, filtered by node - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output with inactive resources, filtered by node
=#=#=#= Begin test: Complete text output filtered by primitive resource =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Active Resources:
* Fencing (stonith:fence_xvm): Started cluster01
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster01:
* Fencing: migration-threshold=1000000:
* (15) start
* (19) monitor: interval="60000ms"
=#=#=#= End test: Complete text output filtered by primitive resource - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output filtered by primitive resource
=#=#=#= Begin test: XML output filtered by primitive resource =#=#=#=
<pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=Fencing">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
<resources_configured number="27" disabled="4" blocked="0"/>
<cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
</summary>
<nodes>
<node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
<node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
<node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
</nodes>
<resources>
<resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history>
<node name="cluster01">
<resource_history id="Fencing" orphan="false" migration-threshold="1000000">
<operation_history call="15" task="start" exec-time="36ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="19" task="monitor" interval="60000ms" exec-time="24ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
</node_history>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output filtered by primitive resource - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by primitive resource
=#=#=#= Begin test: Complete text output filtered by group resource =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Active Resources:
* Resource Group: exim-group:
* Public-IP (ocf::heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* Public-IP: migration-threshold=1000000:
* (2) start
* Email: migration-threshold=1000000:
* (2) start
=#=#=#= End test: Complete text output filtered by group resource - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output filtered by group resource
=#=#=#= Begin test: XML output filtered by group resource =#=#=#=
<pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=exim-group">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
<resources_configured number="27" disabled="4" blocked="0"/>
<cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
</summary>
<nodes>
<node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
<node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
<node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
</nodes>
<resources>
<group id="exim-group" number_resources="2" managed="true" disabled="false">
<resource id="Public-IP" resource_agent="ocf::heartbeat:IPaddr" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
<resource id="Email" resource_agent="lsb:exim" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
</group>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history>
<node name="cluster02">
<resource_history id="Public-IP" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="Email" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
</node_history>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output filtered by group resource - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by group resource
=#=#=#= Begin test: Complete text output filtered by group resource member =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Active Resources:
* Resource Group: exim-group:
* Public-IP (ocf::heartbeat:IPaddr): Started cluster02
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* Public-IP: migration-threshold=1000000:
* (2) start
=#=#=#= End test: Complete text output filtered by group resource member - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output filtered by group resource member
=#=#=#= Begin test: XML output filtered by group resource member =#=#=#=
<pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=Email">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
<resources_configured number="27" disabled="4" blocked="0"/>
<cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
</summary>
<nodes>
<node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
<node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
<node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
</nodes>
<resources>
<group id="exim-group" number_resources="2" managed="true" disabled="false">
<resource id="Email" resource_agent="lsb:exim" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
</group>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history>
<node name="cluster02">
<resource_history id="Email" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
</node_history>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output filtered by group resource member - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by group resource member
=#=#=#= Begin test: Complete text output filtered by clone resource =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Active Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* Node: cluster01:
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
=#=#=#= End test: Complete text output filtered by clone resource - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output filtered by clone resource
=#=#=#= Begin test: XML output filtered by clone resource =#=#=#=
<pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=ping-clone">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
<resources_configured number="27" disabled="4" blocked="0"/>
<cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
</summary>
<nodes>
<node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
<node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
<node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
</nodes>
<resources>
<clone id="ping-clone" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
<resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
<resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
</clone>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history>
<node name="cluster02">
<resource_history id="ping" orphan="false" migration-threshold="1000000">
<operation_history call="11" task="start" exec-time="2044ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="12" task="monitor" interval="10000ms" exec-time="2031ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
<node name="cluster01">
<resource_history id="ping" orphan="false" migration-threshold="1000000">
<operation_history call="17" task="start" exec-time="2038ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="18" task="monitor" interval="10000ms" exec-time="2034ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
</node_history>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output filtered by clone resource - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by clone resource
=#=#=#= Begin test: Complete text output filtered by clone resource instance =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Active Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* Node: cluster01:
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
=#=#=#= End test: Complete text output filtered by clone resource instance - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output filtered by clone resource instance
=#=#=#= Begin test: XML output filtered by clone resource instance =#=#=#=
<pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=ping">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
<resources_configured number="27" disabled="4" blocked="0"/>
<cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
</summary>
<nodes>
<node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
<node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
<node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
</nodes>
<resources>
<clone id="ping-clone" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
<resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
<resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
</clone>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history>
<node name="cluster02">
<resource_history id="ping" orphan="false" migration-threshold="1000000">
<operation_history call="11" task="start" exec-time="2044ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="12" task="monitor" interval="10000ms" exec-time="2031ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
<node name="cluster01">
<resource_history id="ping" orphan="false" migration-threshold="1000000">
<operation_history call="17" task="start" exec-time="2038ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="18" task="monitor" interval="10000ms" exec-time="2034ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
</node_history>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output filtered by clone resource instance - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by clone resource instance
=#=#=#= Begin test: Complete text output filtered by exact clone resource instance =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (2) (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 (1) cluster02 (2) ]
Active Resources:
* Clone Set: ping-clone [ping]:
* ping (ocf::pacemaker:ping): Started cluster02
Node Attributes:
* Node: cluster01 (1):
* location : office
* pingd : 1000
* Node: cluster02 (2):
* pingd : 1000
Operations:
* Node: cluster02 (2):
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* Node: cluster01 (1):
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
=#=#=#= End test: Complete text output filtered by exact clone resource instance - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output filtered by exact clone resource instance
=#=#=#= Begin test: XML output filtered by exact clone resource instance =#=#=#=
<pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=ping:1">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
<resources_configured number="27" disabled="4" blocked="0"/>
<cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
</summary>
<nodes>
<node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
<node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
<node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
</nodes>
<resources>
<clone id="ping-clone" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
<resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
</clone>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history>
<node name="cluster02">
<resource_history id="ping" orphan="false" migration-threshold="1000000">
<operation_history call="11" task="start" exec-time="2044ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="12" task="monitor" interval="10000ms" exec-time="2031ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
<node name="cluster01">
<resource_history id="ping" orphan="false" migration-threshold="1000000">
<operation_history call="17" task="start" exec-time="2038ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="18" task="monitor" interval="10000ms" exec-time="2034ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
</node_history>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output filtered by exact clone resource instance - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by exact clone resource instance
=#=#=#= Begin test: Basic text output filtered by resource that doesn't exist =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Active Resources:
* No active resources
=#=#=#= End test: Basic text output filtered by resource that doesn't exist - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output filtered by resource that doesn't exist
=#=#=#= Begin test: XML output filtered by resource that doesn't exist =#=#=#=
<pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=blah">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
<resources_configured number="27" disabled="4" blocked="0"/>
<cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
</summary>
<nodes>
<node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
<node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
<node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
</nodes>
<resources/>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history/>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output filtered by resource that doesn't exist - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by resource that doesn't exist
=#=#=#= Begin test: Basic text output with inactive resources, filtered by tag =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Full List of Resources:
* Clone Set: inactive-clone [inactive-dhcpd] (disabled):
* Stopped (disabled): [ cluster01 cluster02 ]
* Resource Group: inactive-group (disabled):
* inactive-dummy-1 (ocf::pacemaker:Dummy): Stopped (disabled)
* inactive-dummy-2 (ocf::pacemaker:Dummy): Stopped (disabled)
=#=#=#= End test: Basic text output with inactive resources, filtered by tag - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output with inactive resources, filtered by tag
=#=#=#= Begin test: Basic text output with inactive resources, filtered by bundle resource =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Full List of Resources:
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf::heartbeat:apache): Stopped
* httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped
* httpd-bundle-2 (192.168.122.133) (ocf::heartbeat:apache): Stopped
=#=#=#= End test: Basic text output with inactive resources, filtered by bundle resource - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output with inactive resources, filtered by bundle resource
=#=#=#= Begin test: XML output filtered by inactive bundle resource =#=#=#=
<pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=httpd-bundle">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
<resources_configured number="27" disabled="4" blocked="0"/>
<cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
</summary>
<nodes>
<node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
<node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
<node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
</nodes>
<resources>
<bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
<replica id="0">
<resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-0" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-0" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
<replica id="1">
<resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-1" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-1" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
<replica id="2">
<resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-2" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-2" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
</bundle>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history/>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output filtered by inactive bundle resource - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by inactive bundle resource
=#=#=#= Begin test: Basic text output with inactive resources, filtered by bundled IP address resource =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Full List of Resources:
* Container bundle set: httpd-bundle [pcmk:http]:
* Replica[0]
* httpd-bundle-ip-192.168.122.131 (ocf::heartbeat:IPaddr2): Stopped
=#=#=#= End test: Basic text output with inactive resources, filtered by bundled IP address resource - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output with inactive resources, filtered by bundled IP address resource
=#=#=#= Begin test: XML output filtered by bundled IP address resource =#=#=#=
<pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=httpd-bundle-ip-192.168.122.132">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
<resources_configured number="27" disabled="4" blocked="0"/>
<cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
</summary>
<nodes>
<node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
<node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
<node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
</nodes>
<resources>
<bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
<replica id="1">
<resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
</bundle>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history/>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output filtered by bundled IP address resource - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by bundled IP address resource
=#=#=#= Begin test: Basic text output with inactive resources, filtered by bundled container =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Full List of Resources:
* Container bundle set: httpd-bundle [pcmk:http]:
* Replica[1]
* httpd-bundle-docker-1 (ocf::heartbeat:docker): Stopped
=#=#=#= End test: Basic text output with inactive resources, filtered by bundled container - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output with inactive resources, filtered by bundled container
=#=#=#= Begin test: XML output filtered by bundled container =#=#=#=
<pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=httpd-bundle-docker-2">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
<resources_configured number="27" disabled="4" blocked="0"/>
<cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
</summary>
<nodes>
<node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
<node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
<node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
</nodes>
<resources>
<bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
<replica id="2">
<resource id="httpd-bundle-docker-2" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
</bundle>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history/>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output filtered by bundled container - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by bundled container
=#=#=#= Begin test: Basic text output with inactive resources, filtered by bundle connection =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Full List of Resources:
* Container bundle set: httpd-bundle [pcmk:http]:
* Replica[0]
* httpd-bundle-0 (ocf::pacemaker:remote): Stopped
=#=#=#= End test: Basic text output with inactive resources, filtered by bundle connection - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output with inactive resources, filtered by bundle connection
=#=#=#= Begin test: XML output filtered by bundle connection =#=#=#=
<pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=httpd-bundle-0">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
<resources_configured number="27" disabled="4" blocked="0"/>
<cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
</summary>
<nodes>
<node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
<node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
<node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
</nodes>
<resources>
<bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
<replica id="0">
<resource id="httpd-bundle-0" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
</bundle>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history/>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output filtered by bundle connection - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by bundle connection
=#=#=#= Begin test: Basic text output with inactive resources, filtered by bundled primitive resource =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Full List of Resources:
* Container bundle set: httpd-bundle [pcmk:http]:
* Replica[0]
* httpd (ocf::heartbeat:apache): Stopped
* Replica[1]
* httpd (ocf::heartbeat:apache): Stopped
* Replica[2]
* httpd (ocf::heartbeat:apache): Stopped
=#=#=#= End test: Basic text output with inactive resources, filtered by bundled primitive resource - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output with inactive resources, filtered by bundled primitive resource
=#=#=#= Begin test: XML output filtered by bundled primitive resource =#=#=#=
<pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=httpd">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
<resources_configured number="27" disabled="4" blocked="0"/>
<cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
</summary>
<nodes>
<node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
<node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
<node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
</nodes>
<resources>
<bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
<replica id="0">
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
<replica id="1">
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
<replica id="2">
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
</bundle>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history/>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output filtered by bundled primitive resource - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by bundled primitive resource
=#=#=#= Begin test: Complete text output, filtered by clone name in cloned group =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (2) (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 (1) cluster02 (2) ]
Active Resources:
* Clone Set: mysql-clone-group [mysql-group]:
* Resource Group: mysql-group:0:
* mysql-proxy (lsb:mysql-proxy): Started cluster02
* Resource Group: mysql-group:1:
* mysql-proxy (lsb:mysql-proxy): Started cluster01
* Resource Group: mysql-group:2:
* mysql-proxy (lsb:mysql-proxy): Stopped
* Resource Group: mysql-group:3:
* mysql-proxy (lsb:mysql-proxy): Stopped
* Resource Group: mysql-group:4:
* mysql-proxy (lsb:mysql-proxy): Stopped
Node Attributes:
* Node: cluster01 (1):
* location : office
* pingd : 1000
* Node: cluster02 (2):
* pingd : 1000
Operations:
* Node: cluster02 (2):
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* Node: cluster01 (1):
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
=#=#=#= End test: Complete text output, filtered by clone name in cloned group - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output, filtered by clone name in cloned group
=#=#=#= Begin test: XML output, filtered by clone name in cloned group =#=#=#=
<pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=mysql-clone-group">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
<resources_configured number="27" disabled="4" blocked="0"/>
<cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
</summary>
<nodes>
<node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
<node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
<node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
</nodes>
<resources>
<clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false"/>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history>
<node name="cluster02">
<resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
<node name="cluster01">
<resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
</node_history>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output, filtered by clone name in cloned group - OK (0) =#=#=#=
* Passed: crm_mon - XML output, filtered by clone name in cloned group
=#=#=#= Begin test: Complete text output, filtered by group name in cloned group =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (2) (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 (1) cluster02 (2) ]
Active Resources:
* Clone Set: mysql-clone-group [mysql-group]:
* Resource Group: mysql-group:0:
* mysql-proxy (lsb:mysql-proxy): Started cluster02
* Resource Group: mysql-group:1:
* mysql-proxy (lsb:mysql-proxy): Started cluster01
* Resource Group: mysql-group:2:
* mysql-proxy (lsb:mysql-proxy): Stopped
* Resource Group: mysql-group:3:
* mysql-proxy (lsb:mysql-proxy): Stopped
* Resource Group: mysql-group:4:
* mysql-proxy (lsb:mysql-proxy): Stopped
Node Attributes:
* Node: cluster01 (1):
* location : office
* pingd : 1000
* Node: cluster02 (2):
* pingd : 1000
Operations:
* Node: cluster02 (2):
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* Node: cluster01 (1):
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
=#=#=#= End test: Complete text output, filtered by group name in cloned group - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output, filtered by group name in cloned group
=#=#=#= Begin test: XML output, filtered by group name in cloned group =#=#=#=
<pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=mysql-group">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
<resources_configured number="27" disabled="4" blocked="0"/>
<cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
</summary>
<nodes>
<node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
<node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
<node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
</nodes>
<resources>
<clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
<group id="mysql-group:0" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
</group>
<group id="mysql-group:1" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
</group>
<group id="mysql-group:2" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
<group id="mysql-group:3" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
<group id="mysql-group:4" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
</clone>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history>
<node name="cluster02">
<resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
<node name="cluster01">
<resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
</node_history>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output, filtered by group name in cloned group - OK (0) =#=#=#=
* Passed: crm_mon - XML output, filtered by group name in cloned group
=#=#=#= Begin test: Complete text output, filtered by exact group instance name in cloned group =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (2) (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 (1) cluster02 (2) ]
Active Resources:
* Clone Set: mysql-clone-group [mysql-group]:
* Resource Group: mysql-group:1:
* mysql-proxy (lsb:mysql-proxy): Started cluster01
Node Attributes:
* Node: cluster01 (1):
* location : office
* pingd : 1000
* Node: cluster02 (2):
* pingd : 1000
Operations:
* Node: cluster02 (2):
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* Node: cluster01 (1):
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
=#=#=#= End test: Complete text output, filtered by exact group instance name in cloned group - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output, filtered by exact group instance name in cloned group
=#=#=#= Begin test: XML output, filtered by exact group instance name in cloned group =#=#=#=
<pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=mysql-group:1">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
<resources_configured number="27" disabled="4" blocked="0"/>
<cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
</summary>
<nodes>
<node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
<node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
<node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
</nodes>
<resources>
<clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
<group id="mysql-group:1" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
</group>
</clone>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history>
<node name="cluster02">
<resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
<node name="cluster01">
<resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
</node_history>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output, filtered by exact group instance name in cloned group - OK (0) =#=#=#=
* Passed: crm_mon - XML output, filtered by exact group instance name in cloned group
=#=#=#= Begin test: Complete text output, filtered by primitive name in cloned group =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (2) (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 (1) cluster02 (2) ]
Active Resources:
* Clone Set: mysql-clone-group [mysql-group]:
* Resource Group: mysql-group:0:
* mysql-proxy (lsb:mysql-proxy): Started cluster02
* Resource Group: mysql-group:1:
* mysql-proxy (lsb:mysql-proxy): Started cluster01
* Resource Group: mysql-group:2:
* mysql-proxy (lsb:mysql-proxy): Stopped
* Resource Group: mysql-group:3:
* mysql-proxy (lsb:mysql-proxy): Stopped
* Resource Group: mysql-group:4:
* mysql-proxy (lsb:mysql-proxy): Stopped
Node Attributes:
* Node: cluster01 (1):
* location : office
* pingd : 1000
* Node: cluster02 (2):
* pingd : 1000
Operations:
* Node: cluster02 (2):
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* Node: cluster01 (1):
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
=#=#=#= End test: Complete text output, filtered by primitive name in cloned group - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output, filtered by primitive name in cloned group
=#=#=#= Begin test: XML output, filtered by primitive name in cloned group =#=#=#=
<pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=mysql-proxy">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
<resources_configured number="27" disabled="4" blocked="0"/>
<cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
</summary>
<nodes>
<node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
<node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
<node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
</nodes>
<resources>
<clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
<group id="mysql-group:0" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
</group>
<group id="mysql-group:1" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
</group>
<group id="mysql-group:2" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
<group id="mysql-group:3" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
<group id="mysql-group:4" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
</clone>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history>
<node name="cluster02">
<resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
<node name="cluster01">
<resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
</node_history>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output, filtered by primitive name in cloned group - OK (0) =#=#=#=
* Passed: crm_mon - XML output, filtered by primitive name in cloned group
=#=#=#= Begin test: Complete text output, filtered by exact primitive instance name in cloned group =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (2) (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 (1) cluster02 (2) ]
Active Resources:
* Clone Set: mysql-clone-group [mysql-group]:
* Resource Group: mysql-group:1:
* mysql-proxy (lsb:mysql-proxy): Started cluster01
Node Attributes:
* Node: cluster01 (1):
* location : office
* pingd : 1000
* Node: cluster02 (2):
* pingd : 1000
Operations:
* Node: cluster02 (2):
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* Node: cluster01 (1):
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
=#=#=#= End test: Complete text output, filtered by exact primitive instance name in cloned group - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output, filtered by exact primitive instance name in cloned group
=#=#=#= Begin test: XML output, filtered by exact primitive instance name in cloned group =#=#=#=
<pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=mysql-proxy:1">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
<resources_configured number="27" disabled="4" blocked="0"/>
<cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
</summary>
<nodes>
<node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
<node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
<node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
</nodes>
<resources>
<clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
<group id="mysql-group:1" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
</group>
</clone>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history>
<node name="cluster02">
<resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
<node name="cluster01">
<resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
</node_history>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output, filtered by exact primitive instance name in cloned group - OK (0) =#=#=#=
* Passed: crm_mon - XML output, filtered by exact primitive instance name in cloned group
=#=#=#= Begin test: Text output of partially active resources =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 4 nodes configured
* 13 resource instances configured (1 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
* GuestOnline: [ httpd-bundle-0@cluster02 httpd-bundle-1@cluster01 ]
Active Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 ]
* Fencing (stonith:fence_xvm): Started cluster01
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf::heartbeat:apache): Started cluster02
* httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped cluster01
* Resource Group: partially-active-group:
* dummy-1 (ocf::pacemaker:Dummy): Started cluster02
* dummy-2 (ocf::pacemaker:Dummy): Stopped (disabled)
=#=#=#= End test: Text output of partially active resources - OK (0) =#=#=#=
* Passed: crm_mon - Text output of partially active resources
=#=#=#= Begin test: XML output of partially active resources =#=#=#=
<pacemaker-result api-version="X" request="crm_mon -1 --output-as=xml">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="4"/>
<resources_configured number="13" disabled="1" blocked="0"/>
<cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
</summary>
<nodes>
<node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="5" type="member"/>
<node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="4" type="member"/>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
</nodes>
<resources>
<clone id="ping-clone" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
<resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
<resource id="ping" resource_agent="ocf::pacemaker:ping" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</clone>
<resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
<bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
<replica id="0">
<resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf::heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="httpd-bundle-0" id="httpd-bundle-0" cached="true"/>
</resource>
<resource id="httpd-bundle-docker-0" resource_agent="ocf::heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
<resource id="httpd-bundle-0" resource_agent="ocf::pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
</replica>
<replica id="1">
<resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf::heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-1" resource_agent="ocf::heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
<resource id="httpd-bundle-1" resource_agent="ocf::pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
</replica>
</bundle>
<group id="partially-active-group" number_resources="2" managed="true" disabled="false">
<resource id="dummy-1" resource_agent="ocf::pacemaker:Dummy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
<resource id="dummy-2" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
</resources>
<node_attributes>
<node name="cluster01">
- <attribute name="pingd" value="1000"/>
+ <attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000"/>
</node>
</node_attributes>
<node_history>
<node name="cluster02">
<resource_history id="httpd-bundle-ip-192.168.122.131" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="60000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="httpd-bundle-docker-0" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="60000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="httpd-bundle-0" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="30000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="dummy-1" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
<node name="cluster01">
<resource_history id="Fencing" orphan="false" migration-threshold="1000000">
<operation_history call="15" task="start" exec-time="36ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="20" task="monitor" interval="60000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="ping" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="httpd-bundle-ip-192.168.122.132" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="60000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="httpd-bundle-docker-1" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="60000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="httpd-bundle-1" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="30000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
<node name="httpd-bundle-0">
<resource_history id="httpd" orphan="false" migration-threshold="1000000">
<operation_history call="1" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
</node_history>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output of partially active resources - OK (0) =#=#=#=
* Passed: crm_mon - XML output of partially active resources
=#=#=#= Begin test: Text output of partially active resources, with inactive resources =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 4 nodes configured
* 13 resource instances configured (1 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
* GuestOnline: [ httpd-bundle-0@cluster02 httpd-bundle-1@cluster01 ]
Full List of Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 ]
* Stopped: [ cluster02 ]
* Fencing (stonith:fence_xvm): Started cluster01
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf::heartbeat:apache): Started cluster02
* httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped cluster01
* Resource Group: partially-active-group:
* dummy-1 (ocf::pacemaker:Dummy): Started cluster02
* dummy-2 (ocf::pacemaker:Dummy): Stopped (disabled)
=#=#=#= End test: Text output of partially active resources, with inactive resources - OK (0) =#=#=#=
* Passed: crm_mon - Text output of partially active resources, with inactive resources
=#=#=#= Begin test: Complete brief text output, with inactive resources =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 4 nodes configured
* 13 resource instances configured (1 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
* GuestOnline: [ httpd-bundle-0@cluster02 httpd-bundle-1@cluster01 ]
Full List of Resources:
* 1/1 (stonith:fence_xvm): Active cluster01
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 ]
* Stopped: [ cluster02 ]
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf::heartbeat:apache): Started cluster02
* httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped cluster01
* Resource Group: partially-active-group:
* 1/2 (ocf::pacemaker:Dummy): Active cluster02
Node Attributes:
* Node: cluster01:
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* httpd-bundle-ip-192.168.122.131: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-docker-0: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-0: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="30000ms"
* dummy-1: migration-threshold=1000000:
* (2) start
* Node: cluster01:
* Fencing: migration-threshold=1000000:
* (15) start
* (20) monitor: interval="60000ms"
* ping: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* httpd-bundle-ip-192.168.122.132: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-docker-1: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-1: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="30000ms"
* Node: httpd-bundle-0@cluster02:
* httpd: migration-threshold=1000000:
* (1) start
=#=#=#= End test: Complete brief text output, with inactive resources - OK (0) =#=#=#=
* Passed: crm_mon - Complete brief text output, with inactive resources
=#=#=#= Begin test: Complete brief text output grouped by node, with inactive resources =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 4 nodes configured
* 13 resource instances configured (1 DISABLED)
Node List:
* Node cluster01: online:
* Resources:
* 1 (ocf::heartbeat:IPaddr2): Active
* 1 (ocf::heartbeat:docker): Active
* 1 (ocf::pacemaker:ping): Active
* 1 (ocf::pacemaker:remote): Active
* 1 (stonith:fence_xvm): Active
* Node cluster02: online:
* Resources:
* 1 (ocf::heartbeat:IPaddr2): Active
* 1 (ocf::heartbeat:docker): Active
* 1 (ocf::pacemaker:Dummy): Active
* 1 (ocf::pacemaker:remote): Active
* GuestNode httpd-bundle-0@cluster02: online:
* Resources:
* 1 (ocf::heartbeat:apache): Active
Inactive Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 ]
* Stopped: [ cluster02 ]
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf::heartbeat:apache): Started cluster02
* httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped cluster01
* Resource Group: partially-active-group:
* 1/2 (ocf::pacemaker:Dummy): Active cluster02
Node Attributes:
* Node: cluster01:
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* httpd-bundle-ip-192.168.122.131: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-docker-0: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-0: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="30000ms"
* dummy-1: migration-threshold=1000000:
* (2) start
* Node: cluster01:
* Fencing: migration-threshold=1000000:
* (15) start
* (20) monitor: interval="60000ms"
* ping: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* httpd-bundle-ip-192.168.122.132: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-docker-1: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-1: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="30000ms"
* Node: httpd-bundle-0@cluster02:
* httpd: migration-threshold=1000000:
* (1) start
=#=#=#= End test: Complete brief text output grouped by node, with inactive resources - OK (0) =#=#=#=
* Passed: crm_mon - Complete brief text output grouped by node, with inactive resources
=#=#=#= Begin test: Text output of partially active resources, with inactive resources, filtered by node =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 4 nodes configured
* 13 resource instances configured (1 DISABLED)
Node List:
* Online: [ cluster01 ]
Full List of Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 ]
* Fencing (stonith:fence_xvm): Started cluster01
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped cluster01
=#=#=#= End test: Text output of partially active resources, with inactive resources, filtered by node - OK (0) =#=#=#=
* Passed: crm_mon - Text output of partially active resources, with inactive resources, filtered by node
=#=#=#= Begin test: Text output of partially active resources, filtered by node =#=#=#=
<pacemaker-result api-version="X" request="crm_mon -1 --output-as=xml --node=cluster01">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="4"/>
<resources_configured number="13" disabled="1" blocked="0"/>
<cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
</summary>
<nodes>
<node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="5" type="member"/>
</nodes>
<resources>
<clone id="ping-clone" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
<resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
<resource id="ping" resource_agent="ocf::pacemaker:ping" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</clone>
<resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
<bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
<replica id="1">
<resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf::heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-1" resource_agent="ocf::heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
<resource id="httpd-bundle-1" resource_agent="ocf::pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
</replica>
</bundle>
</resources>
<node_attributes>
<node name="cluster01">
- <attribute name="pingd" value="1000"/>
+ <attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history>
<node name="cluster01">
<resource_history id="Fencing" orphan="false" migration-threshold="1000000">
<operation_history call="15" task="start" exec-time="36ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="20" task="monitor" interval="60000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="ping" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="httpd-bundle-ip-192.168.122.132" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="60000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="httpd-bundle-docker-1" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="60000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="httpd-bundle-1" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="30000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
</node_history>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Text output of partially active resources, filtered by node - OK (0) =#=#=#=
* Passed: crm_mon - Text output of partially active resources, filtered by node
=#=#=#= Begin test: Text output of all resources with maintenance-mode enabled =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
*** Resource management is DISABLED ***
The cluster will not attempt to start, stop or recover services
Node List:
* Online: [ cluster01 cluster02 ]
Full List of Resources:
* Clone Set: ping-clone [ping] (unmanaged):
* ping (ocf::pacemaker:ping): Started cluster02 (unmanaged)
* ping (ocf::pacemaker:ping): Started cluster01 (unmanaged)
* Fencing (stonith:fence_xvm): Started cluster01 (unmanaged)
* dummy (ocf::pacemaker:Dummy): Started cluster02 (unmanaged)
* Clone Set: inactive-clone [inactive-dhcpd] (unmanaged) (disabled):
* Stopped (disabled): [ cluster01 cluster02 ]
* Resource Group: inactive-group (unmanaged) (disabled):
* inactive-dummy-1 (ocf::pacemaker:Dummy): Stopped (disabled, unmanaged)
* inactive-dummy-2 (ocf::pacemaker:Dummy): Stopped (disabled, unmanaged)
* Container bundle set: httpd-bundle [pcmk:http] (unmanaged):
* httpd-bundle-0 (192.168.122.131) (ocf::heartbeat:apache): Stopped (unmanaged)
* httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped (unmanaged)
* httpd-bundle-2 (192.168.122.133) (ocf::heartbeat:apache): Stopped (unmanaged)
* Resource Group: exim-group (unmanaged):
* Public-IP (ocf::heartbeat:IPaddr): Started cluster02 (unmanaged)
* Email (lsb:exim): Started cluster02 (unmanaged)
* Clone Set: mysql-clone-group [mysql-group] (unmanaged):
* Resource Group: mysql-group:0 (unmanaged):
* mysql-proxy (lsb:mysql-proxy): Started cluster02 (unmanaged)
* Resource Group: mysql-group:1 (unmanaged):
* mysql-proxy (lsb:mysql-proxy): Started cluster01 (unmanaged)
=#=#=#= End test: Text output of all resources with maintenance-mode enabled - OK (0) =#=#=#=
* Passed: crm_mon - Text output of all resources with maintenance-mode enabled
diff --git a/tools/crm_mon_print.c b/tools/crm_mon_print.c
index ce3e47c77a..1a085632e4 100644
--- a/tools/crm_mon_print.c
+++ b/tools/crm_mon_print.c
@@ -1,997 +1,997 @@
/*
* Copyright 2019-2020 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include <glib.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#ifndef PCMK__CONFIG_H
# define PCMK__CONFIG_H
# include <config.h>
#endif
#include <crm/cib/util.h>
#include <crm/common/curses_internal.h>
#include <crm/common/iso8601_internal.h>
#include <crm/common/xml.h>
#include <crm/msg_xml.h>
#include <crm/pengine/internal.h>
#include <crm/pengine/pe_types.h>
#include <crm/stonith-ng.h>
#include <crm/common/internal.h>
#include <crm/common/xml_internal.h>
#include <crm/common/util.h>
#include <crm/fencing/internal.h>
#include "crm_mon.h"
static int print_rsc_history(pcmk__output_t *out, pe_working_set_t *data_set,
pe_node_t *node, xmlNode *rsc_entry, unsigned int mon_ops,
GListPtr op_list);
static int print_node_history(pcmk__output_t *out, pe_working_set_t *data_set,
pe_node_t *node, xmlNode *node_state, gboolean operations,
unsigned int mon_ops, GListPtr only_node, GListPtr only_rsc);
static gboolean add_extra_info(pcmk__output_t *out, pe_node_t * node, GListPtr rsc_list,
const char *attrname, int *expected_score);
static void print_node_attribute(gpointer name, gpointer user_data);
static int print_node_summary(pcmk__output_t *out, pe_working_set_t * data_set,
gboolean operations, unsigned int mon_ops,
GListPtr only_node, GListPtr only_rsc, gboolean print_spacer);
static int print_cluster_tickets(pcmk__output_t *out, pe_working_set_t * data_set,
gboolean print_spacer);
static int print_neg_locations(pcmk__output_t *out, pe_working_set_t *data_set,
unsigned int mon_ops, const char *prefix,
GListPtr only_rsc, gboolean print_spacer);
static int print_node_attributes(pcmk__output_t *out, pe_working_set_t *data_set,
unsigned int mon_ops, GListPtr only_node,
GListPtr only_rsc, gboolean print_spacer);
static int print_failed_actions(pcmk__output_t *out, pe_working_set_t *data_set,
GListPtr only_node, GListPtr only_rsc, gboolean print_spacer);
static GListPtr
build_uname_list(pe_working_set_t *data_set, const char *s) {
GListPtr unames = NULL;
if (pcmk__str_eq(s, "*", pcmk__str_null_matches)) {
/* Nothing was given so return a list of all node names. Or, '*' was
* given. This would normally fall into the pe__unames_with_tag branch
* where it will return an empty list. Catch it here instead.
*/
unames = g_list_prepend(unames, strdup("*"));
} else {
pe_node_t *node = pe_find_node(data_set->nodes, s);
if (node) {
/* The given string was a valid uname for a node. Return a
* singleton list containing just that uname.
*/
unames = g_list_prepend(unames, strdup(s));
} else {
/* The given string was not a valid uname. It's either a tag or
* it's a typo or something. In the first case, we'll return a
* list of all the unames of the nodes with the given tag. In the
* second case, we'll return a NULL pointer and nothing will
* get displayed.
*/
unames = pe__unames_with_tag(data_set, s);
}
}
return unames;
}
static GListPtr
build_rsc_list(pe_working_set_t *data_set, const char *s) {
GListPtr resources = NULL;
if (pcmk__str_eq(s, "*", pcmk__str_null_matches)) {
resources = g_list_prepend(resources, strdup("*"));
} else {
pe_resource_t *rsc = pe_find_resource_with_flags(data_set->resources, s,
pe_find_renamed|pe_find_any);
if (rsc) {
/* A colon in the name we were given means we're being asked to filter
* on a specific instance of a cloned resource. Put that exact string
* into the filter list. Otherwise, use the printable ID of whatever
* resource was found that matches what was asked for.
*/
if (strstr(s, ":") != NULL) {
resources = g_list_prepend(resources, strdup(rsc->id));
} else {
resources = g_list_prepend(resources, strdup(rsc_printable_id(rsc)));
}
} else {
/* The given string was not a valid resource name. It's either
* a tag or it's a typo or something. See build_uname_list for
* more detail.
*/
resources = pe__rscs_with_tag(data_set, s);
}
}
return resources;
}
static int
failure_count(pe_working_set_t *data_set, pe_node_t *node, pe_resource_t *rsc, time_t *last_failure) {
return rsc ? pe_get_failcount(node, rsc, last_failure, pe_fc_default,
NULL, data_set)
: 0;
}
static GListPtr
get_operation_list(xmlNode *rsc_entry) {
GListPtr op_list = NULL;
xmlNode *rsc_op = NULL;
for (rsc_op = pcmk__xe_first_child(rsc_entry); rsc_op != NULL;
rsc_op = pcmk__xe_next(rsc_op)) {
const char *task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK);
const char *interval_ms_s = crm_element_value(rsc_op,
XML_LRM_ATTR_INTERVAL_MS);
const char *op_rc = crm_element_value(rsc_op, XML_LRM_ATTR_RC);
int op_rc_i = crm_parse_int(op_rc, "0");
/* Display 0-interval monitors as "probe" */
if (pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_casei)
&& pcmk__str_eq(interval_ms_s, "0", pcmk__str_null_matches | pcmk__str_casei)) {
task = "probe";
}
/* Ignore notifies and some probes */
if (pcmk__str_eq(task, CRMD_ACTION_NOTIFY, pcmk__str_casei) || (pcmk__str_eq(task, "probe", pcmk__str_casei) && (op_rc_i == 7))) {
continue;
}
if (pcmk__str_eq((const char *)rsc_op->name, XML_LRM_TAG_RSC_OP, pcmk__str_none)) {
op_list = g_list_append(op_list, rsc_op);
}
}
op_list = g_list_sort(op_list, sort_op_by_callid);
return op_list;
}
/*!
* \internal
* \brief Print resource operation/failure history
*
* \param[in] out The output functions structure.
* \param[in] data_set Cluster state to display.
* \param[in] node Node that ran this resource.
* \param[in] rsc_entry Root of XML tree describing resource status.
* \param[in] mon_ops Bitmask of mon_op_*.
* \param[in] op_list A list of operations to print.
*/
static int
print_rsc_history(pcmk__output_t *out, pe_working_set_t *data_set, pe_node_t *node,
xmlNode *rsc_entry, unsigned int mon_ops, GListPtr op_list)
{
GListPtr gIter = NULL;
int rc = pcmk_rc_no_output;
const char *rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID);
pe_resource_t *rsc = pe_find_resource(data_set->resources, rsc_id);
/* Print each operation */
for (gIter = op_list; gIter != NULL; gIter = gIter->next) {
xmlNode *xml_op = (xmlNode *) gIter->data;
const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
const char *interval_ms_s = crm_element_value(xml_op,
XML_LRM_ATTR_INTERVAL_MS);
const char *op_rc = crm_element_value(xml_op, XML_LRM_ATTR_RC);
int op_rc_i = crm_parse_int(op_rc, "0");
/* Display 0-interval monitors as "probe" */
if (pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_casei)
&& pcmk__str_eq(interval_ms_s, "0", pcmk__str_null_matches | pcmk__str_casei)) {
task = "probe";
}
/* If this is the first printed operation, print heading for resource */
if (rc == pcmk_rc_no_output) {
time_t last_failure = 0;
int failcount = failure_count(data_set, node, rsc, &last_failure);
out->message(out, "resource-history", rsc, rsc_id, TRUE, failcount, last_failure, TRUE);
rc = pcmk_rc_ok;
}
/* Print the operation */
out->message(out, "op-history", xml_op, task, interval_ms_s,
op_rc_i, pcmk_is_set(mon_ops, mon_op_print_timing));
}
/* Free the list we created (no need to free the individual items) */
g_list_free(op_list);
PCMK__OUTPUT_LIST_FOOTER(out, rc);
return rc;
}
/*!
* \internal
* \brief Print node operation/failure history
*
* \param[in] out The output functions structure.
* \param[in] data_set Cluster state to display.
* \param[in] node_state Root of XML tree describing node status.
* \param[in] operations Whether to print operations or just failcounts.
* \param[in] mon_ops Bitmask of mon_op_*.
*/
static int
print_node_history(pcmk__output_t *out, pe_working_set_t *data_set,
pe_node_t *node, xmlNode *node_state, gboolean operations,
unsigned int mon_ops, GListPtr only_node, GListPtr only_rsc)
{
xmlNode *lrm_rsc = NULL;
xmlNode *rsc_entry = NULL;
int rc = pcmk_rc_no_output;
lrm_rsc = find_xml_node(node_state, XML_CIB_TAG_LRM, FALSE);
lrm_rsc = find_xml_node(lrm_rsc, XML_LRM_TAG_RESOURCES, FALSE);
/* Print history of each of the node's resources */
for (rsc_entry = pcmk__xe_first_child(lrm_rsc); rsc_entry != NULL;
rsc_entry = pcmk__xe_next(rsc_entry)) {
const char *rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID);
pe_resource_t *rsc = pe_find_resource(data_set->resources, rsc_id);
if (!pcmk__str_eq((const char *)rsc_entry->name, XML_LRM_TAG_RESOURCE, pcmk__str_none)) {
continue;
}
/* We can't use is_filtered here to filter group resources. For is_filtered,
* we have to decide whether to check the parent or not. If we check the
* parent, all elements of a group will always be printed because that's how
* is_filtered works for groups. If we do not check the parent, sometimes
* this will filter everything out.
*
* For other resource types, is_filtered is okay.
*/
if (uber_parent(rsc)->variant == pe_group) {
if (!pcmk__str_in_list(only_rsc, rsc_printable_id(rsc)) &&
!pcmk__str_in_list(only_rsc, rsc_printable_id(uber_parent(rsc)))) {
continue;
}
} else {
if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
continue;
}
}
if (operations == FALSE) {
time_t last_failure = 0;
int failcount = failure_count(data_set, node, rsc, &last_failure);
if (failcount <= 0) {
continue;
}
if (rc == pcmk_rc_no_output) {
rc = pcmk_rc_ok;
out->message(out, "node", node, get_resource_display_options(mon_ops),
FALSE, NULL,
pcmk_is_set(mon_ops, mon_op_print_clone_detail),
pcmk_is_set(mon_ops, mon_op_print_brief),
pcmk_is_set(mon_ops, mon_op_group_by_node),
only_node, only_rsc);
}
out->message(out, "resource-history", rsc, rsc_id, FALSE,
failcount, last_failure, FALSE);
} else {
GListPtr op_list = get_operation_list(rsc_entry);
if (op_list == NULL) {
continue;
}
if (rc == pcmk_rc_no_output) {
rc = pcmk_rc_ok;
out->message(out, "node", node, get_resource_display_options(mon_ops),
FALSE, NULL,
pcmk_is_set(mon_ops, mon_op_print_clone_detail),
pcmk_is_set(mon_ops, mon_op_print_brief),
pcmk_is_set(mon_ops, mon_op_group_by_node),
only_node, only_rsc);
}
print_rsc_history(out, data_set, node, rsc_entry, mon_ops, op_list);
}
}
PCMK__OUTPUT_LIST_FOOTER(out, rc);
return rc;
}
/*!
* \internal
* \brief Determine whether extended information about an attribute should be added.
*
* \param[in] out The output functions structure.
* \param[in] node Node that ran this resource.
* \param[in] rsc_list The list of resources for this node.
* \param[in] attrname The attribute to find.
* \param[out] expected_score The expected value for this attribute.
*
* \return TRUE if extended information should be printed, FALSE otherwise
* \note Currently, extended information is only supported for ping/pingd
* resources, for which a message will be printed if connectivity is lost
* or degraded.
*/
static gboolean
add_extra_info(pcmk__output_t *out, pe_node_t *node, GListPtr rsc_list,
const char *attrname, int *expected_score)
{
GListPtr gIter = NULL;
for (gIter = rsc_list; gIter != NULL; gIter = gIter->next) {
pe_resource_t *rsc = (pe_resource_t *) gIter->data;
const char *type = g_hash_table_lookup(rsc->meta, "type");
const char *name = NULL;
if (rsc->children != NULL) {
if (add_extra_info(out, node, rsc->children, attrname, expected_score)) {
return TRUE;
}
}
if (!pcmk__strcase_any_of(type, "ping", "pingd", NULL)) {
- return FALSE;
+ continue;
}
name = g_hash_table_lookup(rsc->parameters, "name");
if (name == NULL) {
name = "pingd";
}
/* To identify the resource with the attribute name. */
if (pcmk__str_eq(name, attrname, pcmk__str_casei)) {
int host_list_num = 0;
/* int value = crm_parse_int(attrvalue, "0"); */
const char *hosts = g_hash_table_lookup(rsc->parameters, "host_list");
const char *multiplier = g_hash_table_lookup(rsc->parameters, "multiplier");
if (hosts) {
char **host_list = g_strsplit(hosts, " ", 0);
host_list_num = g_strv_length(host_list);
g_strfreev(host_list);
}
/* pingd multiplier is the same as the default value. */
*expected_score = host_list_num * crm_parse_int(multiplier, "1");
return TRUE;
}
}
return FALSE;
}
/* structure for passing multiple user data to g_list_foreach() */
struct mon_attr_data {
pcmk__output_t *out;
pe_node_t *node;
};
static void
print_node_attribute(gpointer name, gpointer user_data)
{
const char *value = NULL;
int expected_score = 0;
gboolean add_extra = FALSE;
struct mon_attr_data *data = (struct mon_attr_data *) user_data;
value = pe_node_attribute_raw(data->node, name);
add_extra = add_extra_info(data->out, data->node, data->node->details->running_rsc,
name, &expected_score);
/* Print attribute name and value */
data->out->message(data->out, "node-attribute", name, value, add_extra,
expected_score);
}
/*!
* \internal
* \brief Print history for all nodes.
*
* \param[in] out The output functions structure.
* \param[in] data_set Cluster state to display.
* \param[in] operations Whether to print operations or just failcounts.
* \param[in] mon_ops Bitmask of mon_op_*.
*/
static int
print_node_summary(pcmk__output_t *out, pe_working_set_t * data_set,
gboolean operations, unsigned int mon_ops, GListPtr only_node,
GListPtr only_rsc, gboolean print_spacer)
{
xmlNode *node_state = NULL;
xmlNode *cib_status = get_object_root(XML_CIB_TAG_STATUS, data_set->input);
int rc = pcmk_rc_no_output;
if (xmlChildElementCount(cib_status) == 0) {
return rc;
}
/* Print each node in the CIB status */
for (node_state = pcmk__xe_first_child(cib_status); node_state != NULL;
node_state = pcmk__xe_next(node_state)) {
pe_node_t *node;
if (!pcmk__str_eq((const char *)node_state->name, XML_CIB_TAG_STATE, pcmk__str_none)) {
continue;
}
node = pe_find_node_id(data_set->nodes, ID(node_state));
if (!node || !node->details || !node->details->online) {
continue;
}
if (!pcmk__str_in_list(only_node, node->details->uname)) {
continue;
}
PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc, operations ? "Operations" : "Migration Summary");
print_node_history(out, data_set, node, node_state, operations, mon_ops,
only_node, only_rsc);
}
PCMK__OUTPUT_LIST_FOOTER(out, rc);
return rc;
}
/*!
* \internal
* \brief Print all tickets.
*
* \param[in] out The output functions structure.
* \param[in] data_set Cluster state to display.
*/
static int
print_cluster_tickets(pcmk__output_t *out, pe_working_set_t * data_set,
gboolean print_spacer)
{
GHashTableIter iter;
gpointer key, value;
if (g_hash_table_size(data_set->tickets) == 0) {
return pcmk_rc_no_output;
}
PCMK__OUTPUT_SPACER_IF(out, print_spacer);
/* Print section heading */
out->begin_list(out, NULL, NULL, "Tickets");
/* Print each ticket */
g_hash_table_iter_init(&iter, data_set->tickets);
while (g_hash_table_iter_next(&iter, &key, &value)) {
pe_ticket_t *ticket = (pe_ticket_t *) value;
out->message(out, "ticket", ticket);
}
/* Close section */
out->end_list(out);
return pcmk_rc_ok;
}
/*!
* \internal
* \brief Print section for negative location constraints
*
* \param[in] out The output functions structure.
* \param[in] data_set Cluster state to display.
* \param[in] mon_ops Bitmask of mon_op_*.
* \param[in] prefix ID prefix to filter results by.
*/
static int
print_neg_locations(pcmk__output_t *out, pe_working_set_t *data_set,
unsigned int mon_ops, const char *prefix, GListPtr only_rsc,
gboolean print_spacer)
{
GListPtr gIter, gIter2;
int rc = pcmk_rc_no_output;
/* Print each ban */
for (gIter = data_set->placement_constraints; gIter != NULL; gIter = gIter->next) {
pe__location_t *location = gIter->data;
if (prefix != NULL && !g_str_has_prefix(location->id, prefix))
continue;
if (!pcmk__str_in_list(only_rsc, rsc_printable_id(location->rsc_lh)) &&
!pcmk__str_in_list(only_rsc, rsc_printable_id(uber_parent(location->rsc_lh)))) {
continue;
}
for (gIter2 = location->node_list_rh; gIter2 != NULL; gIter2 = gIter2->next) {
pe_node_t *node = (pe_node_t *) gIter2->data;
if (node->weight < 0) {
PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc, "Negative Location Constraints");
out->message(out, "ban", node, location,
pcmk_is_set(mon_ops, mon_op_print_clone_detail));
}
}
}
PCMK__OUTPUT_LIST_FOOTER(out, rc);
return rc;
}
/*!
* \internal
* \brief Print node attributes section
*
* \param[in] out The output functions structure.
* \param[in] data_set Cluster state to display.
* \param[in] mon_ops Bitmask of mon_op_*.
*/
static int
print_node_attributes(pcmk__output_t *out, pe_working_set_t *data_set,
unsigned int mon_ops, GListPtr only_node,
GListPtr only_rsc, gboolean print_spacer)
{
GListPtr gIter = NULL;
int rc = pcmk_rc_no_output;
/* Unpack all resource parameters (it would be more efficient to do this
* only when needed for the first time in add_extra_info())
*/
for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
crm_mon_get_parameters(gIter->data, data_set);
}
/* Display each node's attributes */
for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
struct mon_attr_data data;
data.out = out;
data.node = (pe_node_t *) gIter->data;
if (data.node && data.node->details && data.node->details->online) {
GList *attr_list = NULL;
GHashTableIter iter;
gpointer key, value;
g_hash_table_iter_init(&iter, data.node->details->attrs);
while (g_hash_table_iter_next (&iter, &key, &value)) {
attr_list = append_attr_list(attr_list, key);
}
if (attr_list == NULL) {
continue;
}
if (!pcmk__str_in_list(only_node, data.node->details->uname)) {
continue;
}
PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc, "Node Attributes");
out->message(out, "node", data.node, get_resource_display_options(mon_ops),
FALSE, NULL,
pcmk_is_set(mon_ops, mon_op_print_clone_detail),
pcmk_is_set(mon_ops, mon_op_print_brief),
pcmk_is_set(mon_ops, mon_op_group_by_node),
only_node, only_rsc);
g_list_foreach(attr_list, print_node_attribute, &data);
g_list_free(attr_list);
out->end_list(out);
}
}
PCMK__OUTPUT_LIST_FOOTER(out, rc);
return rc;
}
/*!
* \internal
* \brief Print a section for failed actions
*
* \param[in] out The output functions structure.
* \param[in] data_set Cluster state to display.
*/
static int
print_failed_actions(pcmk__output_t *out, pe_working_set_t *data_set,
GListPtr only_node, GListPtr only_rsc, gboolean print_spacer)
{
xmlNode *xml_op = NULL;
int rc = pcmk_rc_no_output;
const char *id = NULL;
if (xmlChildElementCount(data_set->failed) == 0) {
return rc;
}
for (xml_op = pcmk__xml_first_child(data_set->failed); xml_op != NULL;
xml_op = pcmk__xml_next(xml_op)) {
char *rsc = NULL;
if (!pcmk__str_in_list(only_node, crm_element_value(xml_op, XML_ATTR_UNAME))) {
continue;
}
id = crm_element_value(xml_op, XML_LRM_ATTR_TASK_KEY);
if (parse_op_key(id ? id : ID(xml_op), &rsc, NULL, NULL) == FALSE) {
continue;
}
if (!pcmk__str_in_list(only_rsc, rsc)) {
free(rsc);
continue;
}
free(rsc);
PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc, "Failed Resource Actions");
out->message(out, "failed-action", xml_op);
}
PCMK__OUTPUT_LIST_FOOTER(out, rc);
return rc;
}
#define CHECK_RC(retcode, retval) \
if (retval == pcmk_rc_ok) { \
retcode = pcmk_rc_ok; \
}
/*!
* \internal
* \brief Top-level printing function for text/curses output.
*
* \param[in] out The output functions structure.
* \param[in] data_set Cluster state to display.
* \param[in] stonith_history List of stonith actions.
* \param[in] mon_ops Bitmask of mon_op_*.
* \param[in] show Bitmask of mon_show_*.
* \param[in] prefix ID prefix to filter results by.
*/
void
print_status(pcmk__output_t *out, pe_working_set_t *data_set,
stonith_history_t *stonith_history, unsigned int mon_ops,
unsigned int show, char *prefix, char *only_node, char *only_rsc)
{
GListPtr unames = NULL;
GListPtr resources = NULL;
unsigned int print_opts = get_resource_display_options(mon_ops);
int rc = pcmk_rc_no_output;
CHECK_RC(rc, out->message(out, "cluster-summary", data_set,
pcmk_is_set(mon_ops, mon_op_print_clone_detail),
pcmk_is_set(show, mon_show_stack),
pcmk_is_set(show, mon_show_dc),
pcmk_is_set(show, mon_show_times),
pcmk_is_set(show, mon_show_counts),
pcmk_is_set(show, mon_show_options)));
unames = build_uname_list(data_set, only_node);
resources = build_rsc_list(data_set, only_rsc);
if (pcmk_is_set(show, mon_show_nodes) && unames) {
PCMK__OUTPUT_SPACER_IF(out, rc == pcmk_rc_ok);
CHECK_RC(rc, out->message(out, "node-list", data_set->nodes, unames,
resources, print_opts,
pcmk_is_set(mon_ops, mon_op_print_clone_detail),
pcmk_is_set(mon_ops, mon_op_print_brief),
pcmk_is_set(mon_ops, mon_op_group_by_node)));
}
/* Print resources section, if needed */
if (pcmk_is_set(show, mon_show_resources)) {
CHECK_RC(rc, out->message(out, "resource-list", data_set, print_opts,
pcmk_is_set(mon_ops, mon_op_group_by_node),
pcmk_is_set(mon_ops, mon_op_inactive_resources),
pcmk_is_set(mon_ops, mon_op_print_brief), TRUE, unames,
resources, rc == pcmk_rc_ok));
}
/* print Node Attributes section if requested */
if (pcmk_is_set(show, mon_show_attributes)) {
CHECK_RC(rc, print_node_attributes(out, data_set, mon_ops, unames, resources,
rc == pcmk_rc_ok));
}
/* If requested, print resource operations (which includes failcounts)
* or just failcounts
*/
if (pcmk_is_set(show, mon_show_operations)
|| pcmk_is_set(show, mon_show_failcounts)) {
CHECK_RC(rc, print_node_summary(out, data_set,
pcmk_is_set(show, mon_show_operations),
mon_ops, unames, resources,
(rc == pcmk_rc_ok)));
}
/* If there were any failed actions, print them */
if (pcmk_is_set(show, mon_show_failures)
&& xml_has_children(data_set->failed)) {
CHECK_RC(rc, print_failed_actions(out, data_set, unames, resources,
rc == pcmk_rc_ok));
}
/* Print failed stonith actions */
if (pcmk_is_set(show, mon_show_fence_failed)
&& pcmk_is_set(mon_ops, mon_op_fence_history)) {
stonith_history_t *hp = stonith__first_matching_event(stonith_history, stonith__event_state_eq,
GINT_TO_POINTER(st_failed));
if (hp) {
CHECK_RC(rc, out->message(out, "failed-fencing-history", stonith_history, unames,
pcmk_is_set(mon_ops, mon_op_fence_full_history),
rc == pcmk_rc_ok));
}
}
/* Print tickets if requested */
if (pcmk_is_set(show, mon_show_tickets)) {
CHECK_RC(rc, print_cluster_tickets(out, data_set, rc == pcmk_rc_ok));
}
/* Print negative location constraints if requested */
if (pcmk_is_set(show, mon_show_bans)) {
CHECK_RC(rc, print_neg_locations(out, data_set, mon_ops, prefix, resources,
rc == pcmk_rc_ok));
}
/* Print stonith history */
if (pcmk_is_set(mon_ops, mon_op_fence_history)) {
if (pcmk_is_set(show, mon_show_fence_worked)) {
stonith_history_t *hp = stonith__first_matching_event(stonith_history, stonith__event_state_neq,
GINT_TO_POINTER(st_failed));
if (hp) {
CHECK_RC(rc, out->message(out, "fencing-history", hp, unames,
pcmk_is_set(mon_ops, mon_op_fence_full_history),
rc == pcmk_rc_ok));
}
} else if (pcmk_is_set(show, mon_show_fence_pending)) {
stonith_history_t *hp = stonith__first_matching_event(stonith_history, stonith__event_state_pending, NULL);
if (hp) {
CHECK_RC(rc, out->message(out, "pending-fencing-actions", hp, unames,
pcmk_is_set(mon_ops, mon_op_fence_full_history),
rc == pcmk_rc_ok));
}
}
}
g_list_free_full(unames, free);
}
/*!
* \internal
* \brief Top-level printing function for XML output.
*
* \param[in] out The output functions structure.
* \param[in] data_set Cluster state to display.
* \param[in] stonith_history List of stonith actions.
* \param[in] mon_ops Bitmask of mon_op_*.
* \param[in] show Bitmask of mon_show_*.
* \param[in] prefix ID prefix to filter results by.
*/
void
print_xml_status(pcmk__output_t *out, pe_working_set_t *data_set,
crm_exit_t history_rc, stonith_history_t *stonith_history,
unsigned int mon_ops, unsigned int show, char *prefix,
char *only_node, char *only_rsc)
{
GListPtr unames = NULL;
GListPtr resources = NULL;
unsigned int print_opts = get_resource_display_options(mon_ops);
out->message(out, "cluster-summary", data_set,
pcmk_is_set(mon_ops, mon_op_print_clone_detail),
pcmk_is_set(show, mon_show_stack),
pcmk_is_set(show, mon_show_dc),
pcmk_is_set(show, mon_show_times),
pcmk_is_set(show, mon_show_counts),
pcmk_is_set(show, mon_show_options));
unames = build_uname_list(data_set, only_node);
resources = build_rsc_list(data_set, only_rsc);
/*** NODES ***/
if (pcmk_is_set(show, mon_show_nodes)) {
out->message(out, "node-list", data_set->nodes, unames,
resources, print_opts,
pcmk_is_set(mon_ops, mon_op_print_clone_detail),
pcmk_is_set(mon_ops, mon_op_print_brief),
pcmk_is_set(mon_ops, mon_op_group_by_node));
}
/* Print resources section, if needed */
if (pcmk_is_set(show, mon_show_resources)) {
out->message(out, "resource-list", data_set, print_opts,
pcmk_is_set(mon_ops, mon_op_group_by_node),
pcmk_is_set(mon_ops, mon_op_inactive_resources),
FALSE, FALSE, unames, resources, FALSE);
}
/* print Node Attributes section if requested */
if (pcmk_is_set(show, mon_show_attributes)) {
print_node_attributes(out, data_set, mon_ops, unames, resources, FALSE);
}
/* If requested, print resource operations (which includes failcounts)
* or just failcounts
*/
if (pcmk_is_set(show, mon_show_operations)
|| pcmk_is_set(show, mon_show_failcounts)) {
print_node_summary(out, data_set,
pcmk_is_set(show, mon_show_operations),
mon_ops, unames, resources, FALSE);
}
/* If there were any failed actions, print them */
if (pcmk_is_set(show, mon_show_failures)
&& xml_has_children(data_set->failed)) {
print_failed_actions(out, data_set, unames, resources, FALSE);
}
/* Print stonith history */
if (pcmk_is_set(show, mon_show_fencing_all)
&& pcmk_is_set(mon_ops, mon_op_fence_history)) {
out->message(out, "full-fencing-history", history_rc, stonith_history,
unames, pcmk_is_set(mon_ops, mon_op_fence_full_history),
FALSE);
}
/* Print tickets if requested */
if (pcmk_is_set(show, mon_show_tickets)) {
print_cluster_tickets(out, data_set, FALSE);
}
/* Print negative location constraints if requested */
if (pcmk_is_set(show, mon_show_bans)) {
print_neg_locations(out, data_set, mon_ops, prefix, resources, FALSE);
}
g_list_free_full(unames, free);
g_list_free_full(resources, free);
}
/*!
* \internal
* \brief Top-level printing function for HTML output.
*
* \param[in] out The output functions structure.
* \param[in] data_set Cluster state to display.
* \param[in] stonith_history List of stonith actions.
* \param[in] mon_ops Bitmask of mon_op_*.
* \param[in] show Bitmask of mon_show_*.
* \param[in] prefix ID prefix to filter results by.
*/
int
print_html_status(pcmk__output_t *out, pe_working_set_t *data_set,
stonith_history_t *stonith_history, unsigned int mon_ops,
unsigned int show, char *prefix, char *only_node,
char *only_rsc)
{
GListPtr unames = NULL;
GListPtr resources = NULL;
unsigned int print_opts = get_resource_display_options(mon_ops);
out->message(out, "cluster-summary", data_set,
pcmk_is_set(mon_ops, mon_op_print_clone_detail),
pcmk_is_set(show, mon_show_stack),
pcmk_is_set(show, mon_show_dc),
pcmk_is_set(show, mon_show_times),
pcmk_is_set(show, mon_show_counts),
pcmk_is_set(show, mon_show_options));
unames = build_uname_list(data_set, only_node);
resources = build_rsc_list(data_set, only_rsc);
/*** NODE LIST ***/
if (pcmk_is_set(show, mon_show_nodes) && unames) {
out->message(out, "node-list", data_set->nodes, unames,
resources, print_opts,
pcmk_is_set(mon_ops, mon_op_print_clone_detail),
pcmk_is_set(mon_ops, mon_op_print_brief),
pcmk_is_set(mon_ops, mon_op_group_by_node));
}
/* Print resources section, if needed */
if (pcmk_is_set(show, mon_show_resources)) {
out->message(out, "resource-list", data_set, print_opts,
pcmk_is_set(mon_ops, mon_op_group_by_node),
pcmk_is_set(mon_ops, mon_op_inactive_resources),
pcmk_is_set(mon_ops, mon_op_print_brief), TRUE, unames,
resources, FALSE);
}
/* print Node Attributes section if requested */
if (pcmk_is_set(show, mon_show_attributes)) {
print_node_attributes(out, data_set, mon_ops, unames, resources, FALSE);
}
/* If requested, print resource operations (which includes failcounts)
* or just failcounts
*/
if (pcmk_is_set(show, mon_show_operations)
|| pcmk_is_set(show, mon_show_failcounts)) {
print_node_summary(out, data_set,
pcmk_is_set(show, mon_show_operations),
mon_ops, unames, resources, FALSE);
}
/* If there were any failed actions, print them */
if (pcmk_is_set(show, mon_show_failures)
&& xml_has_children(data_set->failed)) {
print_failed_actions(out, data_set, unames, resources, FALSE);
}
/* Print failed stonith actions */
if (pcmk_is_set(show, mon_show_fence_failed)
&& pcmk_is_set(mon_ops, mon_op_fence_history)) {
stonith_history_t *hp = stonith__first_matching_event(stonith_history, stonith__event_state_eq,
GINT_TO_POINTER(st_failed));
if (hp) {
out->message(out, "failed-fencing-history", stonith_history, unames,
pcmk_is_set(mon_ops, mon_op_fence_full_history), FALSE);
}
}
/* Print stonith history */
if (pcmk_is_set(mon_ops, mon_op_fence_history)) {
if (pcmk_is_set(show, mon_show_fence_worked)) {
stonith_history_t *hp = stonith__first_matching_event(stonith_history, stonith__event_state_neq,
GINT_TO_POINTER(st_failed));
if (hp) {
out->message(out, "fencing-history", hp, unames,
pcmk_is_set(mon_ops, mon_op_fence_full_history),
FALSE);
}
} else if (pcmk_is_set(show, mon_show_fence_pending)) {
stonith_history_t *hp = stonith__first_matching_event(stonith_history, stonith__event_state_pending, NULL);
if (hp) {
out->message(out, "pending-fencing-actions", hp, unames,
pcmk_is_set(mon_ops, mon_op_fence_full_history),
FALSE);
}
}
}
/* Print tickets if requested */
if (pcmk_is_set(show, mon_show_tickets)) {
print_cluster_tickets(out, data_set, FALSE);
}
/* Print negative location constraints if requested */
if (pcmk_is_set(show, mon_show_bans)) {
print_neg_locations(out, data_set, mon_ops, prefix, resources, FALSE);
}
g_list_free_full(unames, free);
g_list_free_full(resources, free);
return 0;
}
File Metadata
Details
Attached
Mime Type
text/x-diff
Expires
Tue, Jul 8, 6:22 PM (17 h, 28 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
2002608
Default Alt Text
(229 KB)
Attached To
Mode
rP Pacemaker
Attached
Detach File
Event Timeline
Log In to Comment