Page MenuHomeClusterLabs Projects

No OneTemporary

This file is larger than 256 KB, so syntax highlighting was skipped.
diff --git a/cts/cli/regression.crm_mon.exp b/cts/cli/regression.crm_mon.exp
index 5c000fcd63..f5a6047799 100644
--- a/cts/cli/regression.crm_mon.exp
+++ b/cts/cli/regression.crm_mon.exp
@@ -1,3118 +1,3118 @@
=#=#=#= Begin test: Basic text output =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Active Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
* Fencing (stonith:fence_xvm): Started cluster01
* dummy (ocf::pacemaker:Dummy): Started cluster02
* Resource Group: exim-group:
* Public-IP (ocf::heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
* Clone Set: mysql-clone-group [mysql-group]:
* Started: [ cluster01 cluster02 ]
=#=#=#= End test: Basic text output - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output
=#=#=#= Begin test: XML output =#=#=#=
<pacemaker-result api-version="2.2" request="crm_mon --output-as=xml">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
<resources_configured number="27" disabled="4" blocked="0"/>
- <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
+ <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
</summary>
<nodes>
<node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
<node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
<node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
</nodes>
<resources>
<clone id="ping-clone" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
<resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
<resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
</clone>
<resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
<resource id="dummy" resource_agent="ocf::pacemaker:Dummy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
<clone id="inactive-clone" multi_state="false" unique="false" managed="true" disabled="true" failed="false" failure_ignored="false" target_role="stopped">
<resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</clone>
<group id="inactive-group" number_resources="2" managed="true" disabled="true">
<resource id="inactive-dummy-1" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="inactive-dummy-2" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
<bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
<replica id="0">
<resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-0" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-0" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
<replica id="1">
<resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-1" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-1" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
<replica id="2">
<resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-2" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-2" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
</bundle>
<group id="exim-group" number_resources="2" managed="true" disabled="false">
<resource id="Public-IP" resource_agent="ocf::heartbeat:IPaddr" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
<resource id="Email" resource_agent="lsb:exim" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
</group>
<clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
<group id="mysql-group:0" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
</group>
<group id="mysql-group:1" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
</group>
<group id="mysql-group:2" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
<group id="mysql-group:3" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
<group id="mysql-group:4" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
</clone>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history>
<node name="cluster02">
<resource_history id="ping" orphan="false" migration-threshold="1000000">
<operation_history call="11" task="start" exec-time="2044ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="12" task="monitor" interval="10000ms" exec-time="2031ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="dummy" orphan="false" migration-threshold="1000000">
<operation_history call="18" task="start" exec-time="6020ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="19" task="monitor" interval="60000ms" exec-time="6015ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="Public-IP" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="Email" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
<node name="cluster01">
<resource_history id="ping" orphan="false" migration-threshold="1000000">
<operation_history call="17" task="start" exec-time="2038ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="18" task="monitor" interval="10000ms" exec-time="2034ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="Fencing" orphan="false" migration-threshold="1000000">
<operation_history call="15" task="start" exec-time="36ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="19" task="monitor" interval="60000ms" exec-time="24ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="dummy" orphan="false" migration-threshold="1000000">
<operation_history call="16" task="stop" exec-time="6048ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
</node_history>
<bans>
<ban id="not-on-cluster1" resource="dummy" node="cluster01" weight="-1000000" master_only="false"/>
</bans>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output - OK (0) =#=#=#=
* Passed: crm_mon - XML output
=#=#=#= Begin test: Basic text output without node section =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Active Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
* Fencing (stonith:fence_xvm): Started cluster01
* dummy (ocf::pacemaker:Dummy): Started cluster02
* Resource Group: exim-group:
* Public-IP (ocf::heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
* Clone Set: mysql-clone-group [mysql-group]:
* Started: [ cluster01 cluster02 ]
=#=#=#= End test: Basic text output without node section - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output without node section
=#=#=#= Begin test: XML output without the node section =#=#=#=
<pacemaker-result api-version="2.2" request="crm_mon --output-as=xml --exclude=nodes">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
<resources_configured number="27" disabled="4" blocked="0"/>
- <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
+ <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
</summary>
<resources>
<clone id="ping-clone" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
<resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
<resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
</clone>
<resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
<resource id="dummy" resource_agent="ocf::pacemaker:Dummy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
<clone id="inactive-clone" multi_state="false" unique="false" managed="true" disabled="true" failed="false" failure_ignored="false" target_role="stopped">
<resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</clone>
<group id="inactive-group" number_resources="2" managed="true" disabled="true">
<resource id="inactive-dummy-1" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="inactive-dummy-2" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
<bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
<replica id="0">
<resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-0" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-0" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
<replica id="1">
<resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-1" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-1" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
<replica id="2">
<resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-2" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-2" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
</bundle>
<group id="exim-group" number_resources="2" managed="true" disabled="false">
<resource id="Public-IP" resource_agent="ocf::heartbeat:IPaddr" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
<resource id="Email" resource_agent="lsb:exim" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
</group>
<clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
<group id="mysql-group:0" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
</group>
<group id="mysql-group:1" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
</group>
<group id="mysql-group:2" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
<group id="mysql-group:3" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
<group id="mysql-group:4" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
</clone>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history>
<node name="cluster02">
<resource_history id="ping" orphan="false" migration-threshold="1000000">
<operation_history call="11" task="start" exec-time="2044ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="12" task="monitor" interval="10000ms" exec-time="2031ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="dummy" orphan="false" migration-threshold="1000000">
<operation_history call="18" task="start" exec-time="6020ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="19" task="monitor" interval="60000ms" exec-time="6015ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="Public-IP" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="Email" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
<node name="cluster01">
<resource_history id="ping" orphan="false" migration-threshold="1000000">
<operation_history call="17" task="start" exec-time="2038ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="18" task="monitor" interval="10000ms" exec-time="2034ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="Fencing" orphan="false" migration-threshold="1000000">
<operation_history call="15" task="start" exec-time="36ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="19" task="monitor" interval="60000ms" exec-time="24ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="dummy" orphan="false" migration-threshold="1000000">
<operation_history call="16" task="stop" exec-time="6048ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
</node_history>
<bans>
<ban id="not-on-cluster1" resource="dummy" node="cluster01" weight="-1000000" master_only="false"/>
</bans>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output without the node section - OK (0) =#=#=#=
* Passed: crm_mon - XML output without the node section
=#=#=#= Begin test: Text output with only the node section =#=#=#=
Node List:
* Online: [ cluster01 cluster02 ]
=#=#=#= End test: Text output with only the node section - OK (0) =#=#=#=
* Passed: crm_mon - Text output with only the node section
=#=#=#= Begin test: Complete text output =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Active Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
* Fencing (stonith:fence_xvm): Started cluster01
* dummy (ocf::pacemaker:Dummy): Started cluster02
* Resource Group: exim-group:
* Public-IP (ocf::heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
* Clone Set: mysql-clone-group [mysql-group]:
* Started: [ cluster01 cluster02 ]
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* dummy: migration-threshold=1000000:
* (18) start
* (19) monitor: interval="60000ms"
* Public-IP: migration-threshold=1000000:
* (2) start
* Email: migration-threshold=1000000:
* (2) start
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* Node: cluster01:
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
* Fencing: migration-threshold=1000000:
* (15) start
* (19) monitor: interval="60000ms"
* dummy: migration-threshold=1000000:
* (16) stop
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
Negative Location Constraints:
* not-on-cluster1 prevents dummy from running on cluster01
=#=#=#= End test: Complete text output - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output
=#=#=#= Begin test: Complete text output with detail =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (2) (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 (1) cluster02 (2) ]
Active Resources:
* Clone Set: ping-clone [ping]:
* ping (ocf::pacemaker:ping): Started cluster02
* ping (ocf::pacemaker:ping): Started cluster01
* Fencing (stonith:fence_xvm): Started cluster01
* dummy (ocf::pacemaker:Dummy): Started cluster02
* Resource Group: exim-group:
* Public-IP (ocf::heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
* Clone Set: mysql-clone-group [mysql-group]:
* Resource Group: mysql-group:0:
* mysql-proxy (lsb:mysql-proxy): Started cluster02
* Resource Group: mysql-group:1:
* mysql-proxy (lsb:mysql-proxy): Started cluster01
* Resource Group: mysql-group:2:
* mysql-proxy (lsb:mysql-proxy): Stopped
* Resource Group: mysql-group:3:
* mysql-proxy (lsb:mysql-proxy): Stopped
* Resource Group: mysql-group:4:
* mysql-proxy (lsb:mysql-proxy): Stopped
Node Attributes:
* Node: cluster01 (1):
* location : office
* pingd : 1000
* Node: cluster02 (2):
* pingd : 1000
Operations:
* Node: cluster02 (2):
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* dummy: migration-threshold=1000000:
* (18) start
* (19) monitor: interval="60000ms"
* Public-IP: migration-threshold=1000000:
* (2) start
* Email: migration-threshold=1000000:
* (2) start
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* Node: cluster01 (1):
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
* Fencing: migration-threshold=1000000:
* (15) start
* (19) monitor: interval="60000ms"
* dummy: migration-threshold=1000000:
* (16) stop
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
Negative Location Constraints:
* not-on-cluster1 prevents dummy from running on cluster01 (1)
=#=#=#= End test: Complete text output with detail - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output with detail
=#=#=#= Begin test: Complete brief text output =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Active Resources:
* 1 (ocf::pacemaker:Dummy): Active cluster02
* 1 (stonith:fence_xvm): Active cluster01
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
* Resource Group: exim-group:
* 1/1 (lsb:exim): Active cluster02
* 1/1 (ocf::heartbeat:IPaddr): Active cluster02
* Clone Set: mysql-clone-group [mysql-group]:
* Started: [ cluster01 cluster02 ]
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* dummy: migration-threshold=1000000:
* (18) start
* (19) monitor: interval="60000ms"
* Public-IP: migration-threshold=1000000:
* (2) start
* Email: migration-threshold=1000000:
* (2) start
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* Node: cluster01:
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
* Fencing: migration-threshold=1000000:
* (15) start
* (19) monitor: interval="60000ms"
* dummy: migration-threshold=1000000:
* (16) stop
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
Negative Location Constraints:
* not-on-cluster1 prevents dummy from running on cluster01
=#=#=#= End test: Complete brief text output - OK (0) =#=#=#=
* Passed: crm_mon - Complete brief text output
=#=#=#= Begin test: Complete text output grouped by node =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Node List:
* Node cluster01: online:
* Resources:
* ping (ocf::pacemaker:ping): Started
* Fencing (stonith:fence_xvm): Started
* mysql-proxy (lsb:mysql-proxy): Started
* Node cluster02: online:
* Resources:
* ping (ocf::pacemaker:ping): Started
* dummy (ocf::pacemaker:Dummy): Started
* Public-IP (ocf::heartbeat:IPaddr): Started
* Email (lsb:exim): Started
* mysql-proxy (lsb:mysql-proxy): Started
* GuestNode httpd-bundle-0@: OFFLINE:
* Resources:
* GuestNode httpd-bundle-1@: OFFLINE:
* Resources:
* GuestNode httpd-bundle-2@: OFFLINE:
* Resources:
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* dummy: migration-threshold=1000000:
* (18) start
* (19) monitor: interval="60000ms"
* Public-IP: migration-threshold=1000000:
* (2) start
* Email: migration-threshold=1000000:
* (2) start
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* Node: cluster01:
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
* Fencing: migration-threshold=1000000:
* (15) start
* (19) monitor: interval="60000ms"
* dummy: migration-threshold=1000000:
* (16) stop
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
Negative Location Constraints:
* not-on-cluster1 prevents dummy from running on cluster01
=#=#=#= End test: Complete text output grouped by node - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output grouped by node
=#=#=#= Begin test: Complete brief text output grouped by node =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Node List:
* Node cluster01: online:
* Resources:
* 1 (lsb:mysql-proxy): Active
* 1 (ocf::pacemaker:ping): Active
* 1 (stonith:fence_xvm): Active
* Node cluster02: online:
* Resources:
* 1 (lsb:exim): Active
* 1 (lsb:mysql-proxy): Active
* 1 (ocf::heartbeat:IPaddr): Active
* 1 (ocf::pacemaker:Dummy): Active
* 1 (ocf::pacemaker:ping): Active
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* dummy: migration-threshold=1000000:
* (18) start
* (19) monitor: interval="60000ms"
* Public-IP: migration-threshold=1000000:
* (2) start
* Email: migration-threshold=1000000:
* (2) start
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* Node: cluster01:
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
* Fencing: migration-threshold=1000000:
* (15) start
* (19) monitor: interval="60000ms"
* dummy: migration-threshold=1000000:
* (16) stop
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
Negative Location Constraints:
* not-on-cluster1 prevents dummy from running on cluster01
=#=#=#= End test: Complete brief text output grouped by node - OK (0) =#=#=#=
* Passed: crm_mon - Complete brief text output grouped by node
=#=#=#= Begin test: XML output grouped by node =#=#=#=
<pacemaker-result api-version="2.2" request="crm_mon -1 --output-as=xml --group-by-node">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
<resources_configured number="27" disabled="4" blocked="0"/>
- <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
+ <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
</summary>
<nodes>
<node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member">
<resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
<resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
</node>
<node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member">
<resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
<resource id="dummy" resource_agent="ocf::pacemaker:Dummy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
<resource id="Public-IP" resource_agent="ocf::heartbeat:IPaddr" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
<resource id="Email" resource_agent="lsb:exim" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
</node>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
<node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
</nodes>
<resources>
<clone id="inactive-clone" multi_state="false" unique="false" managed="true" disabled="true" failed="false" failure_ignored="false" target_role="stopped">
<resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</clone>
<group id="inactive-group" number_resources="2" managed="true" disabled="true">
<resource id="inactive-dummy-1" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="inactive-dummy-2" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
<bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
<replica id="0">
<resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-0" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-0" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
<replica id="1">
<resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-1" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-1" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
<replica id="2">
<resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-2" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-2" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
</bundle>
<clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
<group id="mysql-group:0" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
</group>
<group id="mysql-group:1" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
</group>
<group id="mysql-group:2" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
<group id="mysql-group:3" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
<group id="mysql-group:4" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
</clone>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history>
<node name="cluster02">
<resource_history id="ping" orphan="false" migration-threshold="1000000">
<operation_history call="11" task="start" exec-time="2044ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="12" task="monitor" interval="10000ms" exec-time="2031ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="dummy" orphan="false" migration-threshold="1000000">
<operation_history call="18" task="start" exec-time="6020ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="19" task="monitor" interval="60000ms" exec-time="6015ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="Public-IP" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="Email" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
<node name="cluster01">
<resource_history id="ping" orphan="false" migration-threshold="1000000">
<operation_history call="17" task="start" exec-time="2038ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="18" task="monitor" interval="10000ms" exec-time="2034ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="Fencing" orphan="false" migration-threshold="1000000">
<operation_history call="15" task="start" exec-time="36ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="19" task="monitor" interval="60000ms" exec-time="24ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="dummy" orphan="false" migration-threshold="1000000">
<operation_history call="16" task="stop" exec-time="6048ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
</node_history>
<bans>
<ban id="not-on-cluster1" resource="dummy" node="cluster01" weight="-1000000" master_only="false"/>
</bans>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output grouped by node - OK (0) =#=#=#=
* Passed: crm_mon - XML output grouped by node
=#=#=#= Begin test: Complete text output filtered by node =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 ]
Active Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 ]
* Fencing (stonith:fence_xvm): Started cluster01
* Clone Set: mysql-clone-group [mysql-group]:
* Started: [ cluster01 ]
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
Operations:
* Node: cluster01:
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
* Fencing: migration-threshold=1000000:
* (15) start
* (19) monitor: interval="60000ms"
* dummy: migration-threshold=1000000:
* (16) stop
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
Negative Location Constraints:
* not-on-cluster1 prevents dummy from running on cluster01
=#=#=#= End test: Complete text output filtered by node - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output filtered by node
=#=#=#= Begin test: XML output filtered by node =#=#=#=
<pacemaker-result api-version="2.2" request="crm_mon --output-as xml --include=all --node=cluster01">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
<resources_configured number="27" disabled="4" blocked="0"/>
- <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
+ <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
</summary>
<nodes>
<node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
</nodes>
<resources>
<clone id="ping-clone" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
<resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
</clone>
<resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
<clone id="inactive-clone" multi_state="false" unique="false" managed="true" disabled="true" failed="false" failure_ignored="false" target_role="stopped">
<resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</clone>
<group id="inactive-group" number_resources="2" managed="true" disabled="true">
<resource id="inactive-dummy-1" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="inactive-dummy-2" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
<bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
<replica id="0">
<resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-0" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-0" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
<replica id="1">
<resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-1" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-1" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
<replica id="2">
<resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-2" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-2" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
</bundle>
<clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
<group id="mysql-group:1" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
</group>
<group id="mysql-group:2" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
<group id="mysql-group:3" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
<group id="mysql-group:4" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
</clone>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history>
<node name="cluster01">
<resource_history id="ping" orphan="false" migration-threshold="1000000">
<operation_history call="17" task="start" exec-time="2038ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="18" task="monitor" interval="10000ms" exec-time="2034ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="Fencing" orphan="false" migration-threshold="1000000">
<operation_history call="15" task="start" exec-time="36ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="19" task="monitor" interval="60000ms" exec-time="24ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="dummy" orphan="false" migration-threshold="1000000">
<operation_history call="16" task="stop" exec-time="6048ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
</node_history>
<bans>
<ban id="not-on-cluster1" resource="dummy" node="cluster01" weight="-1000000" master_only="false"/>
</bans>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output filtered by node - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by node
=#=#=#= Begin test: Complete text output filtered by tag =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster02 ]
Active Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster02 ]
* dummy (ocf::pacemaker:Dummy): Started cluster02
* Resource Group: exim-group:
* Public-IP (ocf::heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
* Clone Set: mysql-clone-group [mysql-group]:
* Started: [ cluster02 ]
Node Attributes:
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* dummy: migration-threshold=1000000:
* (18) start
* (19) monitor: interval="60000ms"
* Public-IP: migration-threshold=1000000:
* (2) start
* Email: migration-threshold=1000000:
* (2) start
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
Negative Location Constraints:
* not-on-cluster1 prevents dummy from running on cluster01
=#=#=#= End test: Complete text output filtered by tag - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output filtered by tag
=#=#=#= Begin test: XML output filtered by tag =#=#=#=
<pacemaker-result api-version="2.2" request="crm_mon --output-as=xml --include=all --node=even-nodes">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
<resources_configured number="27" disabled="4" blocked="0"/>
- <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
+ <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
</summary>
<nodes>
<node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
</nodes>
<resources>
<clone id="ping-clone" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
<resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
</clone>
<resource id="dummy" resource_agent="ocf::pacemaker:Dummy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
<clone id="inactive-clone" multi_state="false" unique="false" managed="true" disabled="true" failed="false" failure_ignored="false" target_role="stopped">
<resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</clone>
<group id="inactive-group" number_resources="2" managed="true" disabled="true">
<resource id="inactive-dummy-1" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="inactive-dummy-2" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
<bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
<replica id="0">
<resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-0" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-0" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
<replica id="1">
<resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-1" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-1" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
<replica id="2">
<resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-2" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-2" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
</bundle>
<group id="exim-group" number_resources="2" managed="true" disabled="false">
<resource id="Public-IP" resource_agent="ocf::heartbeat:IPaddr" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
<resource id="Email" resource_agent="lsb:exim" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
</group>
<clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
<group id="mysql-group:0" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
</group>
<group id="mysql-group:2" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
<group id="mysql-group:3" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
<group id="mysql-group:4" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
</clone>
</resources>
<node_attributes>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history>
<node name="cluster02">
<resource_history id="ping" orphan="false" migration-threshold="1000000">
<operation_history call="11" task="start" exec-time="2044ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="12" task="monitor" interval="10000ms" exec-time="2031ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="dummy" orphan="false" migration-threshold="1000000">
<operation_history call="18" task="start" exec-time="6020ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="19" task="monitor" interval="60000ms" exec-time="6015ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="Public-IP" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="Email" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
</node_history>
<bans>
<ban id="not-on-cluster1" resource="dummy" node="cluster01" weight="-1000000" master_only="false"/>
</bans>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output filtered by tag - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by tag
=#=#=#= Begin test: Complete text output filtered by resource tag =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Active Resources:
* Fencing (stonith:fence_xvm): Started cluster01
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster01:
* Fencing: migration-threshold=1000000:
* (15) start
* (19) monitor: interval="60000ms"
=#=#=#= End test: Complete text output filtered by resource tag - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output filtered by resource tag
=#=#=#= Begin test: XML output filtered by resource tag =#=#=#=
<pacemaker-result api-version="2.2" request="crm_mon --output-as=xml --include=all --resource=fencing-rscs">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
<resources_configured number="27" disabled="4" blocked="0"/>
- <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
+ <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
</summary>
<nodes>
<node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
<node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
<node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
</nodes>
<resources>
<resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history>
<node name="cluster01">
<resource_history id="Fencing" orphan="false" migration-threshold="1000000">
<operation_history call="15" task="start" exec-time="36ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="19" task="monitor" interval="60000ms" exec-time="24ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
</node_history>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output filtered by resource tag - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by resource tag
=#=#=#= Begin test: Basic text output filtered by node that doesn't exist =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Active Resources:
* No active resources
=#=#=#= End test: Basic text output filtered by node that doesn't exist - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output filtered by node that doesn't exist
=#=#=#= Begin test: XML output filtered by node that doesn't exist =#=#=#=
<pacemaker-result api-version="2.2" request="crm_mon --output-as=xml --node=blah">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
<resources_configured number="27" disabled="4" blocked="0"/>
- <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
+ <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
</summary>
<nodes/>
<resources>
<clone id="inactive-clone" multi_state="false" unique="false" managed="true" disabled="true" failed="false" failure_ignored="false" target_role="stopped">
<resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</clone>
<group id="inactive-group" number_resources="2" managed="true" disabled="true">
<resource id="inactive-dummy-1" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="inactive-dummy-2" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
<bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
<replica id="0">
<resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-0" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-0" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
<replica id="1">
<resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-1" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-1" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
<replica id="2">
<resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-2" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-2" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
</bundle>
</resources>
<bans>
<ban id="not-on-cluster1" resource="dummy" node="cluster01" weight="-1000000" master_only="false"/>
</bans>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output filtered by node that doesn't exist - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by node that doesn't exist
=#=#=#= Begin test: Basic text output with inactive resources =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Full List of Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
* Fencing (stonith:fence_xvm): Started cluster01
* dummy (ocf::pacemaker:Dummy): Started cluster02
* Clone Set: inactive-clone [inactive-dhcpd] (disabled):
* Stopped (disabled): [ cluster01 cluster02 ]
* Resource Group: inactive-group (disabled):
* inactive-dummy-1 (ocf::pacemaker:Dummy): Stopped (disabled)
* inactive-dummy-2 (ocf::pacemaker:Dummy): Stopped (disabled)
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf::heartbeat:apache): Stopped
* httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped
* httpd-bundle-2 (192.168.122.133) (ocf::heartbeat:apache): Stopped
* Resource Group: exim-group:
* Public-IP (ocf::heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
* Clone Set: mysql-clone-group [mysql-group]:
* Started: [ cluster01 cluster02 ]
=#=#=#= End test: Basic text output with inactive resources - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output with inactive resources
=#=#=#= Begin test: Basic text output with inactive resources, filtered by node =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster02 ]
Full List of Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster02 ]
* dummy (ocf::pacemaker:Dummy): Started cluster02
* Clone Set: inactive-clone [inactive-dhcpd] (disabled):
* Stopped (disabled): [ cluster02 ]
* Resource Group: inactive-group (disabled):
* inactive-dummy-1 (ocf::pacemaker:Dummy): Stopped (disabled)
* inactive-dummy-2 (ocf::pacemaker:Dummy): Stopped (disabled)
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf::heartbeat:apache): Stopped
* httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped
* httpd-bundle-2 (192.168.122.133) (ocf::heartbeat:apache): Stopped
* Resource Group: exim-group:
* Public-IP (ocf::heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
* Clone Set: mysql-clone-group [mysql-group]:
* Started: [ cluster02 ]
=#=#=#= End test: Basic text output with inactive resources, filtered by node - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output with inactive resources, filtered by node
=#=#=#= Begin test: Complete text output filtered by primitive resource =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Active Resources:
* Fencing (stonith:fence_xvm): Started cluster01
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster01:
* Fencing: migration-threshold=1000000:
* (15) start
* (19) monitor: interval="60000ms"
=#=#=#= End test: Complete text output filtered by primitive resource - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output filtered by primitive resource
=#=#=#= Begin test: XML output filtered by primitive resource =#=#=#=
<pacemaker-result api-version="2.2" request="crm_mon --output-as=xml --resource=Fencing">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
<resources_configured number="27" disabled="4" blocked="0"/>
- <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
+ <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
</summary>
<nodes>
<node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
<node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
<node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
</nodes>
<resources>
<resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history>
<node name="cluster01">
<resource_history id="Fencing" orphan="false" migration-threshold="1000000">
<operation_history call="15" task="start" exec-time="36ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="19" task="monitor" interval="60000ms" exec-time="24ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
</node_history>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output filtered by primitive resource - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by primitive resource
=#=#=#= Begin test: Complete text output filtered by group resource =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Active Resources:
* Resource Group: exim-group:
* Public-IP (ocf::heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* Public-IP: migration-threshold=1000000:
* (2) start
* Email: migration-threshold=1000000:
* (2) start
=#=#=#= End test: Complete text output filtered by group resource - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output filtered by group resource
=#=#=#= Begin test: XML output filtered by group resource =#=#=#=
<pacemaker-result api-version="2.2" request="crm_mon --output-as=xml --resource=exim-group">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
<resources_configured number="27" disabled="4" blocked="0"/>
- <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
+ <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
</summary>
<nodes>
<node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
<node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
<node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
</nodes>
<resources>
<group id="exim-group" number_resources="2" managed="true" disabled="false">
<resource id="Public-IP" resource_agent="ocf::heartbeat:IPaddr" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
<resource id="Email" resource_agent="lsb:exim" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
</group>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history>
<node name="cluster02">
<resource_history id="Public-IP" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="Email" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
</node_history>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output filtered by group resource - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by group resource
=#=#=#= Begin test: Complete text output filtered by group resource member =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Active Resources:
* Resource Group: exim-group:
* Public-IP (ocf::heartbeat:IPaddr): Started cluster02
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* Public-IP: migration-threshold=1000000:
* (2) start
=#=#=#= End test: Complete text output filtered by group resource member - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output filtered by group resource member
=#=#=#= Begin test: XML output filtered by group resource member =#=#=#=
<pacemaker-result api-version="2.2" request="crm_mon --output-as=xml --resource=Email">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
<resources_configured number="27" disabled="4" blocked="0"/>
- <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
+ <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
</summary>
<nodes>
<node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
<node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
<node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
</nodes>
<resources>
<group id="exim-group" number_resources="2" managed="true" disabled="false">
<resource id="Email" resource_agent="lsb:exim" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
</group>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history>
<node name="cluster02">
<resource_history id="Email" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
</node_history>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output filtered by group resource member - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by group resource member
=#=#=#= Begin test: Complete text output filtered by clone resource =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Active Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* Node: cluster01:
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
=#=#=#= End test: Complete text output filtered by clone resource - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output filtered by clone resource
=#=#=#= Begin test: XML output filtered by clone resource =#=#=#=
<pacemaker-result api-version="2.2" request="crm_mon --output-as=xml --resource=ping-clone">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
<resources_configured number="27" disabled="4" blocked="0"/>
- <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
+ <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
</summary>
<nodes>
<node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
<node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
<node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
</nodes>
<resources>
<clone id="ping-clone" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
<resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
<resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
</clone>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history>
<node name="cluster02">
<resource_history id="ping" orphan="false" migration-threshold="1000000">
<operation_history call="11" task="start" exec-time="2044ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="12" task="monitor" interval="10000ms" exec-time="2031ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
<node name="cluster01">
<resource_history id="ping" orphan="false" migration-threshold="1000000">
<operation_history call="17" task="start" exec-time="2038ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="18" task="monitor" interval="10000ms" exec-time="2034ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
</node_history>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output filtered by clone resource - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by clone resource
=#=#=#= Begin test: Complete text output filtered by clone resource instance =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Active Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* Node: cluster01:
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
=#=#=#= End test: Complete text output filtered by clone resource instance - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output filtered by clone resource instance
=#=#=#= Begin test: XML output filtered by clone resource instance =#=#=#=
<pacemaker-result api-version="2.2" request="crm_mon --output-as=xml --resource=ping">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
<resources_configured number="27" disabled="4" blocked="0"/>
- <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
+ <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
</summary>
<nodes>
<node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
<node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
<node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
</nodes>
<resources>
<clone id="ping-clone" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
<resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
<resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
</clone>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history>
<node name="cluster02">
<resource_history id="ping" orphan="false" migration-threshold="1000000">
<operation_history call="11" task="start" exec-time="2044ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="12" task="monitor" interval="10000ms" exec-time="2031ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
<node name="cluster01">
<resource_history id="ping" orphan="false" migration-threshold="1000000">
<operation_history call="17" task="start" exec-time="2038ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="18" task="monitor" interval="10000ms" exec-time="2034ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
</node_history>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output filtered by clone resource instance - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by clone resource instance
=#=#=#= Begin test: Complete text output filtered by exact clone resource instance =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (2) (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 (1) cluster02 (2) ]
Active Resources:
* Clone Set: ping-clone [ping]:
* ping (ocf::pacemaker:ping): Started cluster02
Node Attributes:
* Node: cluster01 (1):
* location : office
* pingd : 1000
* Node: cluster02 (2):
* pingd : 1000
Operations:
* Node: cluster02 (2):
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* Node: cluster01 (1):
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
=#=#=#= End test: Complete text output filtered by exact clone resource instance - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output filtered by exact clone resource instance
=#=#=#= Begin test: XML output filtered by exact clone resource instance =#=#=#=
<pacemaker-result api-version="2.2" request="crm_mon --output-as=xml --resource=ping:1">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
<resources_configured number="27" disabled="4" blocked="0"/>
- <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
+ <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
</summary>
<nodes>
<node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
<node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
<node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
</nodes>
<resources>
<clone id="ping-clone" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
<resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
</clone>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history>
<node name="cluster02">
<resource_history id="ping" orphan="false" migration-threshold="1000000">
<operation_history call="11" task="start" exec-time="2044ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="12" task="monitor" interval="10000ms" exec-time="2031ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
<node name="cluster01">
<resource_history id="ping" orphan="false" migration-threshold="1000000">
<operation_history call="17" task="start" exec-time="2038ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="18" task="monitor" interval="10000ms" exec-time="2034ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
</node_history>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output filtered by exact clone resource instance - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by exact clone resource instance
=#=#=#= Begin test: Basic text output filtered by resource that doesn't exist =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Active Resources:
* No active resources
=#=#=#= End test: Basic text output filtered by resource that doesn't exist - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output filtered by resource that doesn't exist
=#=#=#= Begin test: XML output filtered by resource that doesn't exist =#=#=#=
<pacemaker-result api-version="2.2" request="crm_mon --output-as=xml --resource=blah">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
<resources_configured number="27" disabled="4" blocked="0"/>
- <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
+ <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
</summary>
<nodes>
<node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
<node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
<node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
</nodes>
<resources/>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history/>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output filtered by resource that doesn't exist - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by resource that doesn't exist
=#=#=#= Begin test: Basic text output with inactive resources, filtered by tag =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Full List of Resources:
* Clone Set: inactive-clone [inactive-dhcpd] (disabled):
* Stopped (disabled): [ cluster01 cluster02 ]
* Resource Group: inactive-group (disabled):
* inactive-dummy-1 (ocf::pacemaker:Dummy): Stopped (disabled)
* inactive-dummy-2 (ocf::pacemaker:Dummy): Stopped (disabled)
=#=#=#= End test: Basic text output with inactive resources, filtered by tag - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output with inactive resources, filtered by tag
=#=#=#= Begin test: Basic text output with inactive resources, filtered by bundle resource =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Full List of Resources:
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf::heartbeat:apache): Stopped
* httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped
* httpd-bundle-2 (192.168.122.133) (ocf::heartbeat:apache): Stopped
=#=#=#= End test: Basic text output with inactive resources, filtered by bundle resource - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output with inactive resources, filtered by bundle resource
=#=#=#= Begin test: XML output filtered by inactive bundle resource =#=#=#=
<pacemaker-result api-version="2.2" request="crm_mon --output-as=xml --resource=httpd-bundle">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
<resources_configured number="27" disabled="4" blocked="0"/>
- <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
+ <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
</summary>
<nodes>
<node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
<node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
<node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
</nodes>
<resources>
<bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
<replica id="0">
<resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-0" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-0" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
<replica id="1">
<resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-1" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-1" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
<replica id="2">
<resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-2" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-2" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
</bundle>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history/>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output filtered by inactive bundle resource - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by inactive bundle resource
=#=#=#= Begin test: Basic text output with inactive resources, filtered by bundled IP address resource =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Full List of Resources:
* Container bundle set: httpd-bundle [pcmk:http]:
* Replica[0]
* httpd-bundle-ip-192.168.122.131 (ocf::heartbeat:IPaddr2): Stopped
=#=#=#= End test: Basic text output with inactive resources, filtered by bundled IP address resource - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output with inactive resources, filtered by bundled IP address resource
=#=#=#= Begin test: XML output filtered by bundled IP address resource =#=#=#=
<pacemaker-result api-version="2.2" request="crm_mon --output-as=xml --resource=httpd-bundle-ip-192.168.122.132">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
<resources_configured number="27" disabled="4" blocked="0"/>
- <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
+ <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
</summary>
<nodes>
<node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
<node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
<node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
</nodes>
<resources>
<bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
<replica id="1">
<resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
</bundle>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history/>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output filtered by bundled IP address resource - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by bundled IP address resource
=#=#=#= Begin test: Basic text output with inactive resources, filtered by bundled container =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Full List of Resources:
* Container bundle set: httpd-bundle [pcmk:http]:
* Replica[1]
* httpd-bundle-docker-1 (ocf::heartbeat:docker): Stopped
=#=#=#= End test: Basic text output with inactive resources, filtered by bundled container - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output with inactive resources, filtered by bundled container
=#=#=#= Begin test: XML output filtered by bundled container =#=#=#=
<pacemaker-result api-version="2.2" request="crm_mon --output-as=xml --resource=httpd-bundle-docker-2">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
<resources_configured number="27" disabled="4" blocked="0"/>
- <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
+ <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
</summary>
<nodes>
<node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
<node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
<node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
</nodes>
<resources>
<bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
<replica id="2">
<resource id="httpd-bundle-docker-2" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
</bundle>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history/>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output filtered by bundled container - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by bundled container
=#=#=#= Begin test: Basic text output with inactive resources, filtered by bundle connection =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Full List of Resources:
* Container bundle set: httpd-bundle [pcmk:http]:
* Replica[0]
* httpd-bundle-0 (ocf::pacemaker:remote): Stopped
=#=#=#= End test: Basic text output with inactive resources, filtered by bundle connection - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output with inactive resources, filtered by bundle connection
=#=#=#= Begin test: XML output filtered by bundle connection =#=#=#=
<pacemaker-result api-version="2.2" request="crm_mon --output-as=xml --resource=httpd-bundle-0">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
<resources_configured number="27" disabled="4" blocked="0"/>
- <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
+ <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
</summary>
<nodes>
<node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
<node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
<node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
</nodes>
<resources>
<bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
<replica id="0">
<resource id="httpd-bundle-0" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
</bundle>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history/>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output filtered by bundle connection - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by bundle connection
=#=#=#= Begin test: Basic text output with inactive resources, filtered by bundled primitive resource =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Full List of Resources:
* Container bundle set: httpd-bundle [pcmk:http]:
* Replica[0]
* httpd (ocf::heartbeat:apache): Stopped
* Replica[1]
* httpd (ocf::heartbeat:apache): Stopped
* Replica[2]
* httpd (ocf::heartbeat:apache): Stopped
=#=#=#= End test: Basic text output with inactive resources, filtered by bundled primitive resource - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output with inactive resources, filtered by bundled primitive resource
=#=#=#= Begin test: XML output filtered by bundled primitive resource =#=#=#=
<pacemaker-result api-version="2.2" request="crm_mon --output-as=xml --resource=httpd">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
<resources_configured number="27" disabled="4" blocked="0"/>
- <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
+ <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
</summary>
<nodes>
<node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
<node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
<node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
</nodes>
<resources>
<bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
<replica id="0">
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
<replica id="1">
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
<replica id="2">
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
</bundle>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history/>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output filtered by bundled primitive resource - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by bundled primitive resource
=#=#=#= Begin test: Complete text output, filtered by clone name in cloned group =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (2) (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 (1) cluster02 (2) ]
Active Resources:
* Clone Set: mysql-clone-group [mysql-group]:
* Resource Group: mysql-group:0:
* mysql-proxy (lsb:mysql-proxy): Started cluster02
* Resource Group: mysql-group:1:
* mysql-proxy (lsb:mysql-proxy): Started cluster01
* Resource Group: mysql-group:2:
* mysql-proxy (lsb:mysql-proxy): Stopped
* Resource Group: mysql-group:3:
* mysql-proxy (lsb:mysql-proxy): Stopped
* Resource Group: mysql-group:4:
* mysql-proxy (lsb:mysql-proxy): Stopped
Node Attributes:
* Node: cluster01 (1):
* location : office
* pingd : 1000
* Node: cluster02 (2):
* pingd : 1000
Operations:
* Node: cluster02 (2):
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* Node: cluster01 (1):
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
=#=#=#= End test: Complete text output, filtered by clone name in cloned group - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output, filtered by clone name in cloned group
=#=#=#= Begin test: XML output, filtered by clone name in cloned group =#=#=#=
<pacemaker-result api-version="2.2" request="crm_mon --output-as=xml --resource=mysql-clone-group">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
<resources_configured number="27" disabled="4" blocked="0"/>
- <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
+ <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
</summary>
<nodes>
<node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
<node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
<node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
</nodes>
<resources>
<clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false"/>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history>
<node name="cluster02">
<resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
<node name="cluster01">
<resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
</node_history>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output, filtered by clone name in cloned group - OK (0) =#=#=#=
* Passed: crm_mon - XML output, filtered by clone name in cloned group
=#=#=#= Begin test: Complete text output, filtered by group name in cloned group =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (2) (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 (1) cluster02 (2) ]
Active Resources:
* Clone Set: mysql-clone-group [mysql-group]:
* Resource Group: mysql-group:0:
* mysql-proxy (lsb:mysql-proxy): Started cluster02
* Resource Group: mysql-group:1:
* mysql-proxy (lsb:mysql-proxy): Started cluster01
* Resource Group: mysql-group:2:
* mysql-proxy (lsb:mysql-proxy): Stopped
* Resource Group: mysql-group:3:
* mysql-proxy (lsb:mysql-proxy): Stopped
* Resource Group: mysql-group:4:
* mysql-proxy (lsb:mysql-proxy): Stopped
Node Attributes:
* Node: cluster01 (1):
* location : office
* pingd : 1000
* Node: cluster02 (2):
* pingd : 1000
Operations:
* Node: cluster02 (2):
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* Node: cluster01 (1):
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
=#=#=#= End test: Complete text output, filtered by group name in cloned group - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output, filtered by group name in cloned group
=#=#=#= Begin test: XML output, filtered by group name in cloned group =#=#=#=
<pacemaker-result api-version="2.2" request="crm_mon --output-as=xml --resource=mysql-group">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
<resources_configured number="27" disabled="4" blocked="0"/>
- <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
+ <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
</summary>
<nodes>
<node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
<node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
<node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
</nodes>
<resources>
<clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
<group id="mysql-group:0" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
</group>
<group id="mysql-group:1" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
</group>
<group id="mysql-group:2" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
<group id="mysql-group:3" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
<group id="mysql-group:4" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
</clone>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history>
<node name="cluster02">
<resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
<node name="cluster01">
<resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
</node_history>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output, filtered by group name in cloned group - OK (0) =#=#=#=
* Passed: crm_mon - XML output, filtered by group name in cloned group
=#=#=#= Begin test: Complete text output, filtered by exact group instance name in cloned group =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (2) (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 (1) cluster02 (2) ]
Active Resources:
* Clone Set: mysql-clone-group [mysql-group]:
* Resource Group: mysql-group:1:
* mysql-proxy (lsb:mysql-proxy): Started cluster01
Node Attributes:
* Node: cluster01 (1):
* location : office
* pingd : 1000
* Node: cluster02 (2):
* pingd : 1000
Operations:
* Node: cluster02 (2):
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* Node: cluster01 (1):
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
=#=#=#= End test: Complete text output, filtered by exact group instance name in cloned group - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output, filtered by exact group instance name in cloned group
=#=#=#= Begin test: XML output, filtered by exact group instance name in cloned group =#=#=#=
<pacemaker-result api-version="2.2" request="crm_mon --output-as=xml --resource=mysql-group:1">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
<resources_configured number="27" disabled="4" blocked="0"/>
- <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
+ <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
</summary>
<nodes>
<node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
<node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
<node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
</nodes>
<resources>
<clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
<group id="mysql-group:1" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
</group>
</clone>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history>
<node name="cluster02">
<resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
<node name="cluster01">
<resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
</node_history>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output, filtered by exact group instance name in cloned group - OK (0) =#=#=#=
* Passed: crm_mon - XML output, filtered by exact group instance name in cloned group
=#=#=#= Begin test: Complete text output, filtered by primitive name in cloned group =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (2) (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 (1) cluster02 (2) ]
Active Resources:
* Clone Set: mysql-clone-group [mysql-group]:
* Resource Group: mysql-group:0:
* mysql-proxy (lsb:mysql-proxy): Started cluster02
* Resource Group: mysql-group:1:
* mysql-proxy (lsb:mysql-proxy): Started cluster01
* Resource Group: mysql-group:2:
* mysql-proxy (lsb:mysql-proxy): Stopped
* Resource Group: mysql-group:3:
* mysql-proxy (lsb:mysql-proxy): Stopped
* Resource Group: mysql-group:4:
* mysql-proxy (lsb:mysql-proxy): Stopped
Node Attributes:
* Node: cluster01 (1):
* location : office
* pingd : 1000
* Node: cluster02 (2):
* pingd : 1000
Operations:
* Node: cluster02 (2):
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* Node: cluster01 (1):
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
=#=#=#= End test: Complete text output, filtered by primitive name in cloned group - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output, filtered by primitive name in cloned group
=#=#=#= Begin test: XML output, filtered by primitive name in cloned group =#=#=#=
<pacemaker-result api-version="2.2" request="crm_mon --output-as=xml --resource=mysql-proxy">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
<resources_configured number="27" disabled="4" blocked="0"/>
- <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
+ <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
</summary>
<nodes>
<node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
<node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
<node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
</nodes>
<resources>
<clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
<group id="mysql-group:0" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
</group>
<group id="mysql-group:1" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
</group>
<group id="mysql-group:2" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
<group id="mysql-group:3" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
<group id="mysql-group:4" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
</clone>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history>
<node name="cluster02">
<resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
<node name="cluster01">
<resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
</node_history>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output, filtered by primitive name in cloned group - OK (0) =#=#=#=
* Passed: crm_mon - XML output, filtered by primitive name in cloned group
=#=#=#= Begin test: Complete text output, filtered by exact primitive instance name in cloned group =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (2) (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 (1) cluster02 (2) ]
Active Resources:
* Clone Set: mysql-clone-group [mysql-group]:
* Resource Group: mysql-group:1:
* mysql-proxy (lsb:mysql-proxy): Started cluster01
Node Attributes:
* Node: cluster01 (1):
* location : office
* pingd : 1000
* Node: cluster02 (2):
* pingd : 1000
Operations:
* Node: cluster02 (2):
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* Node: cluster01 (1):
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
=#=#=#= End test: Complete text output, filtered by exact primitive instance name in cloned group - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output, filtered by exact primitive instance name in cloned group
=#=#=#= Begin test: XML output, filtered by exact primitive instance name in cloned group =#=#=#=
<pacemaker-result api-version="2.2" request="crm_mon --output-as=xml --resource=mysql-proxy:1">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
<resources_configured number="27" disabled="4" blocked="0"/>
- <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
+ <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
</summary>
<nodes>
<node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
<node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
<node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
</nodes>
<resources>
<clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
<group id="mysql-group:1" number_resources="1" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
</group>
</clone>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history>
<node name="cluster02">
<resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
<node name="cluster01">
<resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
</node_history>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output, filtered by exact primitive instance name in cloned group - OK (0) =#=#=#=
* Passed: crm_mon - XML output, filtered by exact primitive instance name in cloned group
=#=#=#= Begin test: Text output of partially active resources =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 4 nodes configured
* 13 resource instances configured (1 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
* GuestOnline: [ httpd-bundle-0@cluster02 httpd-bundle-1@cluster01 ]
Active Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 ]
* Fencing (stonith:fence_xvm): Started cluster01
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf::heartbeat:apache): Started cluster02
* httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped cluster01
* Resource Group: partially-active-group:
* dummy-1 (ocf::pacemaker:Dummy): Started cluster02
* dummy-2 (ocf::pacemaker:Dummy): Stopped (disabled)
=#=#=#= End test: Text output of partially active resources - OK (0) =#=#=#=
* Passed: crm_mon - Text output of partially active resources
=#=#=#= Begin test: XML output of partially active resources =#=#=#=
<pacemaker-result api-version="2.2" request="crm_mon -1 --output-as=xml">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="4"/>
<resources_configured number="13" disabled="1" blocked="0"/>
- <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
+ <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
</summary>
<nodes>
<node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="5" type="member"/>
<node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="4" type="member"/>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
</nodes>
<resources>
<clone id="ping-clone" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
<resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
<resource id="ping" resource_agent="ocf::pacemaker:ping" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</clone>
<resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
<bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
<replica id="0">
<resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf::heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="httpd-bundle-0" id="httpd-bundle-0" cached="true"/>
</resource>
<resource id="httpd-bundle-docker-0" resource_agent="ocf::heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
<resource id="httpd-bundle-0" resource_agent="ocf::pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
</replica>
<replica id="1">
<resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf::heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-1" resource_agent="ocf::heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
<resource id="httpd-bundle-1" resource_agent="ocf::pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
</replica>
</bundle>
<group id="partially-active-group" number_resources="2" managed="true" disabled="false">
<resource id="dummy-1" resource_agent="ocf::pacemaker:Dummy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
<resource id="dummy-2" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="pingd" value="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000"/>
</node>
</node_attributes>
<node_history>
<node name="cluster02">
<resource_history id="httpd-bundle-ip-192.168.122.131" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="60000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="httpd-bundle-docker-0" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="60000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="httpd-bundle-0" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="30000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="dummy-1" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
<node name="cluster01">
<resource_history id="Fencing" orphan="false" migration-threshold="1000000">
<operation_history call="15" task="start" exec-time="36ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="20" task="monitor" interval="60000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="ping" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="httpd-bundle-ip-192.168.122.132" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="60000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="httpd-bundle-docker-1" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="60000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="httpd-bundle-1" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="30000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
<node name="httpd-bundle-0">
<resource_history id="httpd" orphan="false" migration-threshold="1000000">
<operation_history call="1" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
</node_history>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output of partially active resources - OK (0) =#=#=#=
* Passed: crm_mon - XML output of partially active resources
=#=#=#= Begin test: Text output of partially active resources, with inactive resources =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 4 nodes configured
* 13 resource instances configured (1 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
* GuestOnline: [ httpd-bundle-0@cluster02 httpd-bundle-1@cluster01 ]
Full List of Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 ]
* Stopped: [ cluster02 ]
* Fencing (stonith:fence_xvm): Started cluster01
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf::heartbeat:apache): Started cluster02
* httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped cluster01
* Resource Group: partially-active-group:
* dummy-1 (ocf::pacemaker:Dummy): Started cluster02
* dummy-2 (ocf::pacemaker:Dummy): Stopped (disabled)
=#=#=#= End test: Text output of partially active resources, with inactive resources - OK (0) =#=#=#=
* Passed: crm_mon - Text output of partially active resources, with inactive resources
=#=#=#= Begin test: Complete brief text output, with inactive resources =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 4 nodes configured
* 13 resource instances configured (1 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
* GuestOnline: [ httpd-bundle-0@cluster02 httpd-bundle-1@cluster01 ]
Full List of Resources:
* 1/1 (stonith:fence_xvm): Active cluster01
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 ]
* Stopped: [ cluster02 ]
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf::heartbeat:apache): Started cluster02
* httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped cluster01
* Resource Group: partially-active-group:
* 1/2 (ocf::pacemaker:Dummy): Active cluster02
Node Attributes:
* Node: cluster01:
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* httpd-bundle-ip-192.168.122.131: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-docker-0: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-0: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="30000ms"
* dummy-1: migration-threshold=1000000:
* (2) start
* Node: cluster01:
* Fencing: migration-threshold=1000000:
* (15) start
* (20) monitor: interval="60000ms"
* ping: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* httpd-bundle-ip-192.168.122.132: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-docker-1: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-1: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="30000ms"
* Node: httpd-bundle-0@cluster02:
* httpd: migration-threshold=1000000:
* (1) start
=#=#=#= End test: Complete brief text output, with inactive resources - OK (0) =#=#=#=
* Passed: crm_mon - Complete brief text output, with inactive resources
=#=#=#= Begin test: Complete brief text output grouped by node, with inactive resources =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 4 nodes configured
* 13 resource instances configured (1 DISABLED)
Node List:
* Node cluster01: online:
* Resources:
* 1 (ocf::heartbeat:IPaddr2): Active
* 1 (ocf::heartbeat:docker): Active
* 1 (ocf::pacemaker:ping): Active
* 1 (ocf::pacemaker:remote): Active
* 1 (stonith:fence_xvm): Active
* Node cluster02: online:
* Resources:
* 1 (ocf::heartbeat:IPaddr2): Active
* 1 (ocf::heartbeat:docker): Active
* 1 (ocf::pacemaker:Dummy): Active
* 1 (ocf::pacemaker:remote): Active
* GuestNode httpd-bundle-0@cluster02: online:
* Resources:
* 1 (ocf::heartbeat:apache): Active
Inactive Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 ]
* Stopped: [ cluster02 ]
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf::heartbeat:apache): Started cluster02
* httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped cluster01
* Resource Group: partially-active-group:
* 1/2 (ocf::pacemaker:Dummy): Active cluster02
Node Attributes:
* Node: cluster01:
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* httpd-bundle-ip-192.168.122.131: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-docker-0: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-0: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="30000ms"
* dummy-1: migration-threshold=1000000:
* (2) start
* Node: cluster01:
* Fencing: migration-threshold=1000000:
* (15) start
* (20) monitor: interval="60000ms"
* ping: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* httpd-bundle-ip-192.168.122.132: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-docker-1: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-1: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="30000ms"
* Node: httpd-bundle-0@cluster02:
* httpd: migration-threshold=1000000:
* (1) start
=#=#=#= End test: Complete brief text output grouped by node, with inactive resources - OK (0) =#=#=#=
* Passed: crm_mon - Complete brief text output grouped by node, with inactive resources
=#=#=#= Begin test: Text output of partially active resources, with inactive resources, filtered by node =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 4 nodes configured
* 13 resource instances configured (1 DISABLED)
Node List:
* Online: [ cluster01 ]
Full List of Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 ]
* Fencing (stonith:fence_xvm): Started cluster01
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped cluster01
=#=#=#= End test: Text output of partially active resources, with inactive resources, filtered by node - OK (0) =#=#=#=
* Passed: crm_mon - Text output of partially active resources, with inactive resources, filtered by node
=#=#=#= Begin test: Text output of partially active resources, filtered by node =#=#=#=
<pacemaker-result api-version="2.2" request="crm_mon -1 --output-as=xml --node=cluster01">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="4"/>
<resources_configured number="13" disabled="1" blocked="0"/>
- <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
+ <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/>
</summary>
<nodes>
<node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="5" type="member"/>
</nodes>
<resources>
<clone id="ping-clone" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false">
<resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
<resource id="ping" resource_agent="ocf::pacemaker:ping" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</clone>
<resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
<bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
<replica id="1">
<resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf::heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-1" resource_agent="ocf::heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
<resource id="httpd-bundle-1" resource_agent="ocf::pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
</replica>
</bundle>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="pingd" value="1000"/>
</node>
</node_attributes>
<node_history>
<node name="cluster01">
<resource_history id="Fencing" orphan="false" migration-threshold="1000000">
<operation_history call="15" task="start" exec-time="36ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="20" task="monitor" interval="60000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="ping" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="httpd-bundle-ip-192.168.122.132" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="60000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="httpd-bundle-docker-1" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="60000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="httpd-bundle-1" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="30000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
</node_history>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Text output of partially active resources, filtered by node - OK (0) =#=#=#=
* Passed: crm_mon - Text output of partially active resources, filtered by node
=#=#=#= Begin test: Text output of all resources with maintenance-mode enabled =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 27 resource instances configured (4 DISABLED)
*** Resource management is DISABLED ***
The cluster will not attempt to start, stop or recover services
Node List:
* Online: [ cluster01 cluster02 ]
Full List of Resources:
* Clone Set: ping-clone [ping] (unmanaged):
* ping (ocf::pacemaker:ping): Started cluster02 (unmanaged)
* ping (ocf::pacemaker:ping): Started cluster01 (unmanaged)
* Fencing (stonith:fence_xvm): Started cluster01 (unmanaged)
* dummy (ocf::pacemaker:Dummy): Started cluster02 (unmanaged)
* Clone Set: inactive-clone [inactive-dhcpd] (unmanaged) (disabled):
* Stopped (disabled): [ cluster01 cluster02 ]
* Resource Group: inactive-group (unmanaged) (disabled):
* inactive-dummy-1 (ocf::pacemaker:Dummy): Stopped (disabled, unmanaged)
* inactive-dummy-2 (ocf::pacemaker:Dummy): Stopped (disabled, unmanaged)
* Container bundle set: httpd-bundle [pcmk:http] (unmanaged):
* httpd-bundle-0 (192.168.122.131) (ocf::heartbeat:apache): Stopped (unmanaged)
* httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped (unmanaged)
* httpd-bundle-2 (192.168.122.133) (ocf::heartbeat:apache): Stopped (unmanaged)
* Resource Group: exim-group (unmanaged):
* Public-IP (ocf::heartbeat:IPaddr): Started cluster02 (unmanaged)
* Email (lsb:exim): Started cluster02 (unmanaged)
* Clone Set: mysql-clone-group [mysql-group] (unmanaged):
* Resource Group: mysql-group:0 (unmanaged):
* mysql-proxy (lsb:mysql-proxy): Started cluster02 (unmanaged)
* Resource Group: mysql-group:1 (unmanaged):
* mysql-proxy (lsb:mysql-proxy): Started cluster01 (unmanaged)
=#=#=#= End test: Text output of all resources with maintenance-mode enabled - OK (0) =#=#=#=
* Passed: crm_mon - Text output of all resources with maintenance-mode enabled
diff --git a/lib/pengine/pe_output.c b/lib/pengine/pe_output.c
index fe4356b493..9094034c1c 100644
--- a/lib/pengine/pe_output.c
+++ b/lib/pengine/pe_output.c
@@ -1,1906 +1,1923 @@
/*
* Copyright 2019-2020 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <crm/common/iso8601_internal.h>
#include <crm/msg_xml.h>
#include <crm/pengine/internal.h>
static char *
failed_action_string(xmlNodePtr xml_op) {
const char *op_key = crm_element_value(xml_op, XML_LRM_ATTR_TASK_KEY);
int rc = crm_parse_int(crm_element_value(xml_op, XML_LRM_ATTR_RC), "0");
int status = crm_parse_int(crm_element_value(xml_op, XML_LRM_ATTR_OPSTATUS), "0");
const char *exit_reason = crm_element_value(xml_op, XML_LRM_ATTR_EXIT_REASON);
time_t last_change = 0;
if (crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE,
&last_change) == pcmk_ok) {
crm_time_t *crm_when = crm_time_new(NULL);
char *time_s = NULL;
char *buf = NULL;
crm_time_set_timet(crm_when, &last_change);
time_s = crm_time_as_string(crm_when, crm_time_log_date | crm_time_log_timeofday | crm_time_log_with_timezone);
buf = crm_strdup_printf("%s on %s '%s' (%d): call=%s, status='%s', "
"exitreason='%s', " XML_RSC_OP_LAST_CHANGE
"='%s', queued=%sms, exec=%sms",
op_key ? op_key : ID(xml_op),
crm_element_value(xml_op, XML_ATTR_UNAME),
services_ocf_exitcode_str(rc), rc,
crm_element_value(xml_op, XML_LRM_ATTR_CALLID),
services_lrm_status_str(status),
exit_reason ? exit_reason : "none",
time_s,
crm_element_value(xml_op, XML_RSC_OP_T_QUEUE),
crm_element_value(xml_op, XML_RSC_OP_T_EXEC));
crm_time_free(crm_when);
free(time_s);
return buf;
} else {
return crm_strdup_printf("%s on %s '%s' (%d): call=%s, status=%s, exitreason='%s'",
op_key ? op_key : ID(xml_op),
crm_element_value(xml_op, XML_ATTR_UNAME),
services_ocf_exitcode_str(rc), rc,
crm_element_value(xml_op, XML_LRM_ATTR_CALLID),
services_lrm_status_str(status),
exit_reason ? exit_reason : "none");
}
}
static const char *
get_cluster_stack(pe_working_set_t *data_set)
{
xmlNode *stack = get_xpath_object("//nvpair[@name='cluster-infrastructure']",
data_set->input, LOG_DEBUG);
return stack? crm_element_value(stack, XML_NVPAIR_ATTR_VALUE) : "unknown";
}
static char *
last_changed_string(const char *last_written, const char *user,
const char *client, const char *origin) {
if (last_written != NULL || user != NULL || client != NULL || origin != NULL) {
return crm_strdup_printf("%s%s%s%s%s%s%s",
last_written ? last_written : "",
user ? " by " : "",
user ? user : "",
client ? " via " : "",
client ? client : "",
origin ? " on " : "",
origin ? origin : "");
} else {
return strdup("");
}
}
static char *
op_history_string(xmlNode *xml_op, const char *task, const char *interval_ms_s,
int rc, gboolean print_timing) {
const char *call = crm_element_value(xml_op, XML_LRM_ATTR_CALLID);
char *interval_str = NULL;
char *buf = NULL;
if (interval_ms_s && !pcmk__str_eq(interval_ms_s, "0", pcmk__str_casei)) {
char *pair = pcmk_format_nvpair("interval", interval_ms_s, "ms");
interval_str = crm_strdup_printf(" %s", pair);
free(pair);
}
if (print_timing) {
char *last_change_str = NULL;
char *last_run_str = NULL;
char *exec_str = NULL;
char *queue_str = NULL;
const char *value = NULL;
time_t epoch = 0;
if ((crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE, &epoch) == pcmk_ok)
&& (epoch > 0)) {
char *time = pcmk_format_named_time(XML_RSC_OP_LAST_CHANGE, epoch);
last_change_str = crm_strdup_printf(" %s", time);
free(time);
}
if ((crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_RUN, &epoch) == pcmk_ok)
&& (epoch > 0)) {
char *time = pcmk_format_named_time(XML_RSC_OP_LAST_RUN, epoch);
last_run_str = crm_strdup_printf(" %s", time);
free(time);
}
value = crm_element_value(xml_op, XML_RSC_OP_T_EXEC);
if (value) {
char *pair = pcmk_format_nvpair(XML_RSC_OP_T_EXEC, value, "ms");
exec_str = crm_strdup_printf(" %s", pair);
free(pair);
}
value = crm_element_value(xml_op, XML_RSC_OP_T_QUEUE);
if (value) {
char *pair = pcmk_format_nvpair(XML_RSC_OP_T_QUEUE, value, "ms");
queue_str = crm_strdup_printf(" %s", pair);
free(pair);
}
buf = crm_strdup_printf("(%s) %s:%s%s%s%s%s rc=%d (%s)", call, task,
interval_str ? interval_str : "",
last_change_str ? last_change_str : "",
last_run_str ? last_run_str : "",
exec_str ? exec_str : "",
queue_str ? queue_str : "",
rc, services_ocf_exitcode_str(rc));
if (last_change_str) {
free(last_change_str);
}
if (last_run_str) {
free(last_run_str);
}
if (exec_str) {
free(exec_str);
}
if (queue_str) {
free(queue_str);
}
} else {
buf = crm_strdup_printf("(%s) %s%s%s", call, task,
interval_str ? ":" : "",
interval_str ? interval_str : "");
}
if (interval_str) {
free(interval_str);
}
return buf;
}
static char *
resource_history_string(pe_resource_t *rsc, const char *rsc_id, gboolean all,
int failcount, time_t last_failure) {
char *buf = NULL;
if (rsc == NULL) {
buf = crm_strdup_printf("%s: orphan", rsc_id);
} else if (all || failcount || last_failure > 0) {
char *failcount_s = NULL;
char *lastfail_s = NULL;
if (failcount > 0) {
failcount_s = crm_strdup_printf(" %s=%d", PCMK__FAIL_COUNT_PREFIX,
failcount);
} else {
failcount_s = strdup("");
}
if (last_failure > 0) {
lastfail_s = crm_strdup_printf(" %s='%s'",
PCMK__LAST_FAILURE_PREFIX,
pcmk__epoch2str(&last_failure));
}
buf = crm_strdup_printf("%s: migration-threshold=%d%s%s",
rsc_id, rsc->migration_threshold, failcount_s,
lastfail_s? lastfail_s : "");
free(failcount_s);
free(lastfail_s);
} else {
buf = crm_strdup_printf("%s:", rsc_id);
}
return buf;
}
PCMK__OUTPUT_ARGS("cluster-summary", "pe_working_set_t *", "gboolean", "gboolean", "gboolean",
"gboolean", "gboolean", "gboolean")
int
pe__cluster_summary(pcmk__output_t *out, va_list args) {
pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
gboolean print_clone_detail = va_arg(args, gboolean);
gboolean show_stack = va_arg(args, gboolean);
gboolean show_dc = va_arg(args, gboolean);
gboolean show_times = va_arg(args, gboolean);
gboolean show_counts = va_arg(args, gboolean);
gboolean show_options = va_arg(args, gboolean);
int rc = pcmk_rc_no_output;
const char *stack_s = get_cluster_stack(data_set);
if (show_stack) {
PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Cluster Summary");
out->message(out, "cluster-stack", stack_s);
}
/* Always print DC if none, even if not requested */
if (data_set->dc_node == NULL || show_dc) {
xmlNode *dc_version = get_xpath_object("//nvpair[@name='dc-version']",
data_set->input, LOG_DEBUG);
const char *dc_version_s = dc_version?
crm_element_value(dc_version, XML_NVPAIR_ATTR_VALUE)
: NULL;
const char *quorum = crm_element_value(data_set->input, XML_ATTR_HAVE_QUORUM);
char *dc_name = data_set->dc_node ? pe__node_display_name(data_set->dc_node, print_clone_detail) : NULL;
PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Cluster Summary");
out->message(out, "cluster-dc", data_set->dc_node, quorum, dc_version_s, dc_name);
free(dc_name);
}
if (show_times) {
const char *last_written = crm_element_value(data_set->input, XML_CIB_ATTR_WRITTEN);
const char *user = crm_element_value(data_set->input, XML_ATTR_UPDATE_USER);
const char *client = crm_element_value(data_set->input, XML_ATTR_UPDATE_CLIENT);
const char *origin = crm_element_value(data_set->input, XML_ATTR_UPDATE_ORIG);
PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Cluster Summary");
out->message(out, "cluster-times", last_written, user, client, origin);
}
if (show_counts) {
PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Cluster Summary");
out->message(out, "cluster-counts", g_list_length(data_set->nodes),
data_set->ninstances, data_set->disabled_resources,
data_set->blocked_resources);
}
if (show_options) {
PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Cluster Summary");
out->message(out, "cluster-options", data_set);
}
PCMK__OUTPUT_LIST_FOOTER(out, rc);
- if (pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)) {
- out->message(out, "maint-mode");
+ if (out->message(out, "maint-mode", data_set->flags) == pcmk_rc_ok) {
rc = pcmk_rc_ok;
}
return rc;
}
PCMK__OUTPUT_ARGS("cluster-summary", "pe_working_set_t *", "gboolean", "gboolean", "gboolean",
"gboolean", "gboolean", "gboolean")
int
pe__cluster_summary_html(pcmk__output_t *out, va_list args) {
pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
gboolean print_clone_detail = va_arg(args, gboolean);
gboolean show_stack = va_arg(args, gboolean);
gboolean show_dc = va_arg(args, gboolean);
gboolean show_times = va_arg(args, gboolean);
gboolean show_counts = va_arg(args, gboolean);
gboolean show_options = va_arg(args, gboolean);
int rc = pcmk_rc_no_output;
const char *stack_s = get_cluster_stack(data_set);
if (show_stack) {
PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Cluster Summary");
out->message(out, "cluster-stack", stack_s);
}
/* Always print DC if none, even if not requested */
if (data_set->dc_node == NULL || show_dc) {
xmlNode *dc_version = get_xpath_object("//nvpair[@name='dc-version']",
data_set->input, LOG_DEBUG);
const char *dc_version_s = dc_version?
crm_element_value(dc_version, XML_NVPAIR_ATTR_VALUE)
: NULL;
const char *quorum = crm_element_value(data_set->input, XML_ATTR_HAVE_QUORUM);
char *dc_name = data_set->dc_node ? pe__node_display_name(data_set->dc_node, print_clone_detail) : NULL;
PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Cluster Summary");
out->message(out, "cluster-dc", data_set->dc_node, quorum, dc_version_s, dc_name);
free(dc_name);
}
if (show_times) {
const char *last_written = crm_element_value(data_set->input, XML_CIB_ATTR_WRITTEN);
const char *user = crm_element_value(data_set->input, XML_ATTR_UPDATE_USER);
const char *client = crm_element_value(data_set->input, XML_ATTR_UPDATE_CLIENT);
const char *origin = crm_element_value(data_set->input, XML_ATTR_UPDATE_ORIG);
PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Cluster Summary");
out->message(out, "cluster-times", last_written, user, client, origin);
}
if (show_counts) {
PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Cluster Summary");
out->message(out, "cluster-counts", g_list_length(data_set->nodes),
data_set->ninstances, data_set->disabled_resources,
data_set->blocked_resources);
}
if (show_options) {
/* Kind of a hack - close the list we may have opened earlier in this
* function so we can put all the options into their own list. We
* only want to do this on HTML output, though.
*/
PCMK__OUTPUT_LIST_FOOTER(out, rc);
out->begin_list(out, NULL, NULL, "Config Options");
out->message(out, "cluster-options", data_set);
}
PCMK__OUTPUT_LIST_FOOTER(out, rc);
- if (pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)) {
- out->message(out, "maint-mode");
+ if (out->message(out, "maint-mode", data_set->flags) == pcmk_rc_ok) {
rc = pcmk_rc_ok;
}
return rc;
}
char *
pe__node_display_name(pe_node_t *node, bool print_detail)
{
char *node_name;
const char *node_host = NULL;
const char *node_id = NULL;
int name_len;
CRM_ASSERT((node != NULL) && (node->details != NULL) && (node->details->uname != NULL));
/* Host is displayed only if this is a guest node */
if (pe__is_guest_node(node)) {
pe_node_t *host_node = pe__current_node(node->details->remote_rsc);
if (host_node && host_node->details) {
node_host = host_node->details->uname;
}
if (node_host == NULL) {
node_host = ""; /* so we at least get "uname@" to indicate guest */
}
}
/* Node ID is displayed if different from uname and detail is requested */
if (print_detail && !pcmk__str_eq(node->details->uname, node->details->id, pcmk__str_casei)) {
node_id = node->details->id;
}
/* Determine name length */
name_len = strlen(node->details->uname) + 1;
if (node_host) {
name_len += strlen(node_host) + 1; /* "@node_host" */
}
if (node_id) {
name_len += strlen(node_id) + 3; /* + " (node_id)" */
}
/* Allocate and populate display name */
node_name = malloc(name_len);
CRM_ASSERT(node_name != NULL);
strcpy(node_name, node->details->uname);
if (node_host) {
strcat(node_name, "@");
strcat(node_name, node_host);
}
if (node_id) {
strcat(node_name, " (");
strcat(node_name, node_id);
strcat(node_name, ")");
}
return node_name;
}
int
pe__name_and_nvpairs_xml(pcmk__output_t *out, bool is_list, const char *tag_name
, size_t pairs_count, ...)
{
xmlNodePtr xml_node = NULL;
va_list args;
CRM_ASSERT(tag_name != NULL);
xml_node = pcmk__output_xml_peek_parent(out);
CRM_ASSERT(xml_node != NULL);
xml_node = is_list
? create_xml_node(xml_node, tag_name)
: xmlNewChild(xml_node, NULL, (pcmkXmlStr) tag_name, NULL);
va_start(args, pairs_count);
while(pairs_count--) {
const char *param_name = va_arg(args, const char *);
const char *param_value = va_arg(args, const char *);
if (param_name && param_value) {
xmlSetProp(xml_node, (pcmkXmlStr)param_name, (pcmkXmlStr)param_value);
}
};
va_end(args);
if (is_list) {
pcmk__output_xml_push_parent(out, xml_node);
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("ban", "pe_node_t *", "pe__location_t *", "gboolean")
int
pe__ban_html(pcmk__output_t *out, va_list args) {
pe_node_t *pe_node = va_arg(args, pe_node_t *);
pe__location_t *location = va_arg(args, pe__location_t *);
gboolean print_clone_detail = va_arg(args, gboolean);
char *node_name = pe__node_display_name(pe_node, print_clone_detail);
char *buf = crm_strdup_printf("%s\tprevents %s from running %son %s",
location->id, location->rsc_lh->id,
location->role_filter == RSC_ROLE_MASTER ? "as Master " : "",
node_name);
pcmk__output_create_html_node(out, "li", NULL, NULL, buf);
free(node_name);
free(buf);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("ban", "pe_node_t *", "pe__location_t *", "gboolean")
int
pe__ban_text(pcmk__output_t *out, va_list args) {
pe_node_t *pe_node = va_arg(args, pe_node_t *);
pe__location_t *location = va_arg(args, pe__location_t *);
gboolean print_clone_detail = va_arg(args, gboolean);
char *node_name = pe__node_display_name(pe_node, print_clone_detail);
out->list_item(out, NULL, "%s\tprevents %s from running %son %s",
location->id, location->rsc_lh->id,
location->role_filter == RSC_ROLE_MASTER ? "as Master " : "",
node_name);
free(node_name);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("ban", "pe_node_t *", "pe__location_t *", "gboolean")
int
pe__ban_xml(pcmk__output_t *out, va_list args) {
xmlNodePtr node = pcmk__output_create_xml_node(out, "ban");
pe_node_t *pe_node = va_arg(args, pe_node_t *);
pe__location_t *location = va_arg(args, pe__location_t *);
gboolean print_clone_detail G_GNUC_UNUSED = va_arg(args, gboolean);
char *weight_s = crm_itoa(pe_node->weight);
xmlSetProp(node, (pcmkXmlStr) "id", (pcmkXmlStr) location->id);
xmlSetProp(node, (pcmkXmlStr) "resource", (pcmkXmlStr) location->rsc_lh->id);
xmlSetProp(node, (pcmkXmlStr) "node", (pcmkXmlStr) pe_node->details->uname);
xmlSetProp(node, (pcmkXmlStr) "weight", (pcmkXmlStr) weight_s);
xmlSetProp(node, (pcmkXmlStr) "master_only",
(pcmkXmlStr) pcmk__btoa(location->role_filter == RSC_ROLE_MASTER));
free(weight_s);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("cluster-counts", "unsigned int", "int", "int", "int")
int
pe__cluster_counts_html(pcmk__output_t *out, va_list args) {
xmlNodePtr nodes_node = pcmk__output_create_xml_node(out, "li");
xmlNodePtr resources_node = pcmk__output_create_xml_node(out, "li");
unsigned int nnodes = va_arg(args, unsigned int);
int nresources = va_arg(args, int);
int ndisabled = va_arg(args, int);
int nblocked = va_arg(args, int);
char *nnodes_str = crm_strdup_printf("%d node%s configured",
nnodes, pcmk__plural_s(nnodes));
pcmk_create_html_node(nodes_node, "span", NULL, NULL, nnodes_str);
free(nnodes_str);
if (ndisabled && nblocked) {
char *s = crm_strdup_printf("%d resource instance%s configured (%d ",
nresources, pcmk__plural_s(nresources),
ndisabled);
pcmk_create_html_node(resources_node, "span", NULL, NULL, s);
free(s);
pcmk_create_html_node(resources_node, "span", NULL, "bold", "DISABLED");
s = crm_strdup_printf(", %d ", nblocked);
pcmk_create_html_node(resources_node, "span", NULL, NULL, s);
free(s);
pcmk_create_html_node(resources_node, "span", NULL, "bold", "BLOCKED");
pcmk_create_html_node(resources_node, "span", NULL, NULL,
" from further action due to failure)");
} else if (ndisabled && !nblocked) {
char *s = crm_strdup_printf("%d resource instance%s configured (%d ",
nresources, pcmk__plural_s(nresources),
ndisabled);
pcmk_create_html_node(resources_node, "span", NULL, NULL, s);
free(s);
pcmk_create_html_node(resources_node, "span", NULL, "bold", "DISABLED");
pcmk_create_html_node(resources_node, "span", NULL, NULL, ")");
} else if (!ndisabled && nblocked) {
char *s = crm_strdup_printf("%d resource instance%s configured (%d ",
nresources, pcmk__plural_s(nresources),
nblocked);
pcmk_create_html_node(resources_node, "span", NULL, NULL, s);
free(s);
pcmk_create_html_node(resources_node, "span", NULL, "bold", "BLOCKED");
pcmk_create_html_node(resources_node, "span", NULL, NULL,
" from further action due to failure)");
} else {
char *s = crm_strdup_printf("%d resource instance%s configured",
nresources, pcmk__plural_s(nresources));
pcmk_create_html_node(resources_node, "span", NULL, NULL, s);
free(s);
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("cluster-counts", "unsigned int", "int", "int", "int")
int
pe__cluster_counts_text(pcmk__output_t *out, va_list args) {
unsigned int nnodes = va_arg(args, unsigned int);
int nresources = va_arg(args, int);
int ndisabled = va_arg(args, int);
int nblocked = va_arg(args, int);
out->list_item(out, NULL, "%d node%s configured",
nnodes, pcmk__plural_s(nnodes));
if (ndisabled && nblocked) {
out->list_item(out, NULL, "%d resource instance%s configured "
"(%d DISABLED, %d BLOCKED from "
"further action due to failure)",
nresources, pcmk__plural_s(nresources), ndisabled,
nblocked);
} else if (ndisabled && !nblocked) {
out->list_item(out, NULL, "%d resource instance%s configured "
"(%d DISABLED)",
nresources, pcmk__plural_s(nresources), ndisabled);
} else if (!ndisabled && nblocked) {
out->list_item(out, NULL, "%d resource instance%s configured "
"(%d BLOCKED from further action "
"due to failure)",
nresources, pcmk__plural_s(nresources), nblocked);
} else {
out->list_item(out, NULL, "%d resource instance%s configured",
nresources, pcmk__plural_s(nresources));
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("cluster-counts", "unsigned int", "int", "int", "int")
int
pe__cluster_counts_xml(pcmk__output_t *out, va_list args) {
xmlNodePtr nodes_node = pcmk__output_create_xml_node(out, "nodes_configured");
xmlNodePtr resources_node = pcmk__output_create_xml_node(out, "resources_configured");
unsigned int nnodes = va_arg(args, unsigned int);
int nresources = va_arg(args, int);
int ndisabled = va_arg(args, int);
int nblocked = va_arg(args, int);
char *s = crm_itoa(nnodes);
xmlSetProp(nodes_node, (pcmkXmlStr) "number", (pcmkXmlStr) s);
free(s);
s = crm_itoa(nresources);
xmlSetProp(resources_node, (pcmkXmlStr) "number", (pcmkXmlStr) s);
free(s);
s = crm_itoa(ndisabled);
xmlSetProp(resources_node, (pcmkXmlStr) "disabled", (pcmkXmlStr) s);
free(s);
s = crm_itoa(nblocked);
xmlSetProp(resources_node, (pcmkXmlStr) "blocked", (pcmkXmlStr) s);
free(s);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("cluster-dc", "pe_node_t *", "const char *", "const char *", "char *")
int
pe__cluster_dc_html(pcmk__output_t *out, va_list args) {
xmlNodePtr node = pcmk__output_create_xml_node(out, "li");
pe_node_t *dc = va_arg(args, pe_node_t *);
const char *quorum = va_arg(args, const char *);
const char *dc_version_s = va_arg(args, const char *);
char *dc_name = va_arg(args, char *);
pcmk_create_html_node(node, "span", NULL, "bold", "Current DC: ");
if (dc) {
if (crm_is_true(quorum)) {
char *buf = crm_strdup_printf("%s (version %s) - partition with quorum",
dc_name, dc_version_s ? dc_version_s : "unknown");
pcmk_create_html_node(node, "span", NULL, NULL, buf);
free(buf);
} else {
char *buf = crm_strdup_printf("%s (version %s) - partition",
dc_name, dc_version_s ? dc_version_s : "unknown");
pcmk_create_html_node(node, "span", NULL, NULL, buf);
free(buf);
pcmk_create_html_node(node, "span", NULL, "warning", "WITHOUT");
pcmk_create_html_node(node, "span", NULL, NULL, "quorum");
}
} else {
pcmk_create_html_node(node ,"span", NULL, "warning", "NONE");
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("cluster-dc", "pe_node_t *", "const char *", "const char *", "char *")
int
pe__cluster_dc_text(pcmk__output_t *out, va_list args) {
pe_node_t *dc = va_arg(args, pe_node_t *);
const char *quorum = va_arg(args, const char *);
const char *dc_version_s = va_arg(args, const char *);
char *dc_name = va_arg(args, char *);
if (dc) {
out->list_item(out, "Current DC", "%s (version %s) - partition %s quorum",
dc_name, dc_version_s ? dc_version_s : "unknown",
crm_is_true(quorum) ? "with" : "WITHOUT");
} else {
out->list_item(out, "Current DC", "NONE");
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("cluster-dc", "pe_node_t *", "const char *", "const char *", "char *")
int
pe__cluster_dc_xml(pcmk__output_t *out, va_list args) {
xmlNodePtr node = pcmk__output_create_xml_node(out, "current_dc");
pe_node_t *dc = va_arg(args, pe_node_t *);
const char *quorum = va_arg(args, const char *);
const char *dc_version_s = va_arg(args, const char *);
char *dc_name G_GNUC_UNUSED = va_arg(args, char *);
if (dc) {
xmlSetProp(node, (pcmkXmlStr) "present", (pcmkXmlStr) "true");
xmlSetProp(node, (pcmkXmlStr) "version", (pcmkXmlStr) (dc_version_s ? dc_version_s : ""));
xmlSetProp(node, (pcmkXmlStr) "name", (pcmkXmlStr) dc->details->uname);
xmlSetProp(node, (pcmkXmlStr) "id", (pcmkXmlStr) dc->details->id);
xmlSetProp(node, (pcmkXmlStr) "with_quorum",
(pcmkXmlStr) pcmk__btoa(crm_is_true(quorum)));
} else {
xmlSetProp(node, (pcmkXmlStr) "present", (pcmkXmlStr) "false");
}
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("maint-mode")
+PCMK__OUTPUT_ARGS("maint-mode", "unsigned long long")
int
pe__cluster_maint_mode_text(pcmk__output_t *out, va_list args) {
- fprintf(out->dest, "\n *** Resource management is DISABLED ***");
- fprintf(out->dest, "\n The cluster will not attempt to start, stop or recover services");
- fprintf(out->dest, "\n");
- return pcmk_rc_ok;
+ unsigned long long flags = va_arg(args, unsigned long long);
+
+ if (pcmk_is_set(flags, pe_flag_maintenance_mode)) {
+ fprintf(out->dest, "\n *** Resource management is DISABLED ***");
+ fprintf(out->dest, "\n The cluster will not attempt to start, stop or recover services");
+ fprintf(out->dest, "\n");
+ return pcmk_rc_ok;
+ } else if (pcmk_is_set(flags, pe_flag_stop_everything)) {
+ fprintf(out->dest, "\n *** Resource management is DISABLED ***");
+ fprintf(out->dest, "\n The cluster will keep all resources stopped");
+ fprintf(out->dest, "\n");
+ return pcmk_rc_ok;
+ } else {
+ return pcmk_rc_no_output;
+ }
}
PCMK__OUTPUT_ARGS("cluster-options", "pe_working_set_t *")
int
pe__cluster_options_html(pcmk__output_t *out, va_list args) {
pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
out->list_item(out, NULL, "STONITH of failed nodes %s",
pcmk_is_set(data_set->flags, pe_flag_stonith_enabled) ? "enabled" : "disabled");
out->list_item(out, NULL, "Cluster is %s",
pcmk_is_set(data_set->flags, pe_flag_symmetric_cluster) ? "symmetric" : "asymmetric");
switch (data_set->no_quorum_policy) {
case no_quorum_freeze:
out->list_item(out, NULL, "No quorum policy: Freeze resources");
break;
case no_quorum_stop:
out->list_item(out, NULL, "No quorum policy: Stop ALL resources");
break;
case no_quorum_demote:
out->list_item(out, NULL, "No quorum policy: Demote promotable "
"resources and stop all other resources");
break;
case no_quorum_ignore:
out->list_item(out, NULL, "No quorum policy: Ignore");
break;
case no_quorum_suicide:
out->list_item(out, NULL, "No quorum policy: Suicide");
break;
}
if (pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)) {
xmlNodePtr node = pcmk__output_create_xml_node(out, "li");
pcmk_create_html_node(node, "span", NULL, NULL, "Resource management: ");
pcmk_create_html_node(node, "span", NULL, "bold", "DISABLED");
pcmk_create_html_node(node, "span", NULL, NULL,
" (the cluster will not attempt to start, stop, or recover services)");
+ } else if (pcmk_is_set(data_set->flags, pe_flag_stop_everything)) {
+ xmlNodePtr node = pcmk__output_create_xml_node(out, "li");
+
+ pcmk_create_html_node(node, "span", NULL, NULL, "Resource management: ");
+ pcmk_create_html_node(node, "span", NULL, "bold", "STOPPED");
+ pcmk_create_html_node(node, "span", NULL, NULL,
+ " (the cluster will keep all resources stopped)");
} else {
out->list_item(out, NULL, "Resource management: enabled");
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("cluster-options", "pe_working_set_t *")
int
pe__cluster_options_log(pcmk__output_t *out, va_list args) {
pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
if (pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)) {
out->info(out, "Resource management is DISABLED. The cluster will not attempt to start, stop or recover services.");
return pcmk_rc_ok;
+ } else if (pcmk_is_set(data_set->flags, pe_flag_stop_everything)) {
+ out->info(out, "Resource management is DISABLED. The cluster has stopped all resources.");
+ return pcmk_rc_ok;
} else {
return pcmk_rc_no_output;
}
}
PCMK__OUTPUT_ARGS("cluster-options", "pe_working_set_t *")
int
pe__cluster_options_text(pcmk__output_t *out, va_list args) {
pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
out->list_item(out, NULL, "STONITH of failed nodes %s",
pcmk_is_set(data_set->flags, pe_flag_stonith_enabled) ? "enabled" : "disabled");
out->list_item(out, NULL, "Cluster is %s",
pcmk_is_set(data_set->flags, pe_flag_symmetric_cluster) ? "symmetric" : "asymmetric");
switch (data_set->no_quorum_policy) {
case no_quorum_freeze:
out->list_item(out, NULL, "No quorum policy: Freeze resources");
break;
case no_quorum_stop:
out->list_item(out, NULL, "No quorum policy: Stop ALL resources");
break;
case no_quorum_demote:
out->list_item(out, NULL, "No quorum policy: Demote promotable "
"resources and stop all other resources");
break;
case no_quorum_ignore:
out->list_item(out, NULL, "No quorum policy: Ignore");
break;
case no_quorum_suicide:
out->list_item(out, NULL, "No quorum policy: Suicide");
break;
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("cluster-options", "pe_working_set_t *")
int
pe__cluster_options_xml(pcmk__output_t *out, va_list args) {
xmlNodePtr node = pcmk__output_create_xml_node(out, "cluster_options");
pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
xmlSetProp(node, (pcmkXmlStr) "stonith-enabled",
(pcmkXmlStr) pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)));
xmlSetProp(node, (pcmkXmlStr) "symmetric-cluster",
(pcmkXmlStr) pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_symmetric_cluster)));
switch (data_set->no_quorum_policy) {
case no_quorum_freeze:
xmlSetProp(node, (pcmkXmlStr) "no-quorum-policy", (pcmkXmlStr) "freeze");
break;
case no_quorum_stop:
xmlSetProp(node, (pcmkXmlStr) "no-quorum-policy", (pcmkXmlStr) "stop");
break;
case no_quorum_demote:
xmlSetProp(node, (pcmkXmlStr) "no-quorum-policy", (pcmkXmlStr) "demote");
break;
case no_quorum_ignore:
xmlSetProp(node, (pcmkXmlStr) "no-quorum-policy", (pcmkXmlStr) "ignore");
break;
case no_quorum_suicide:
xmlSetProp(node, (pcmkXmlStr) "no-quorum-policy", (pcmkXmlStr) "suicide");
break;
}
xmlSetProp(node, (pcmkXmlStr) "maintenance-mode",
(pcmkXmlStr) pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)));
+ xmlSetProp(node, (pcmkXmlStr) "stop-all-resources",
+ (pcmkXmlStr) pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_stop_everything)));
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("cluster-stack", "const char *")
int
pe__cluster_stack_html(pcmk__output_t *out, va_list args) {
xmlNodePtr node = pcmk__output_create_xml_node(out, "li");
const char *stack_s = va_arg(args, const char *);
pcmk_create_html_node(node, "span", NULL, "bold", "Stack: ");
pcmk_create_html_node(node, "span", NULL, NULL, stack_s);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("cluster-stack", "const char *")
int
pe__cluster_stack_text(pcmk__output_t *out, va_list args) {
const char *stack_s = va_arg(args, const char *);
out->list_item(out, "Stack", "%s", stack_s);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("cluster-stack", "const char *")
int
pe__cluster_stack_xml(pcmk__output_t *out, va_list args) {
xmlNodePtr node = pcmk__output_create_xml_node(out, "stack");
const char *stack_s = va_arg(args, const char *);
xmlSetProp(node, (pcmkXmlStr) "type", (pcmkXmlStr) stack_s);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("cluster-times", "const char *", "const char *", "const char *", "const char *")
int
pe__cluster_times_html(pcmk__output_t *out, va_list args) {
xmlNodePtr updated_node = pcmk__output_create_xml_node(out, "li");
xmlNodePtr changed_node = pcmk__output_create_xml_node(out, "li");
const char *last_written = va_arg(args, const char *);
const char *user = va_arg(args, const char *);
const char *client = va_arg(args, const char *);
const char *origin = va_arg(args, const char *);
char *buf = last_changed_string(last_written, user, client, origin);
pcmk_create_html_node(updated_node, "span", NULL, "bold", "Last updated: ");
pcmk_create_html_node(updated_node, "span", NULL, NULL,
pcmk__epoch2str(NULL));
pcmk_create_html_node(changed_node, "span", NULL, "bold", "Last change: ");
pcmk_create_html_node(changed_node, "span", NULL, NULL, buf);
free(buf);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("cluster-times", "const char *", "const char *", "const char *", "const char *")
int
pe__cluster_times_xml(pcmk__output_t *out, va_list args) {
xmlNodePtr updated_node = pcmk__output_create_xml_node(out, "last_update");
xmlNodePtr changed_node = pcmk__output_create_xml_node(out, "last_change");
const char *last_written = va_arg(args, const char *);
const char *user = va_arg(args, const char *);
const char *client = va_arg(args, const char *);
const char *origin = va_arg(args, const char *);
xmlSetProp(updated_node, (pcmkXmlStr) "time",
(pcmkXmlStr) pcmk__epoch2str(NULL));
xmlSetProp(changed_node, (pcmkXmlStr) "time", (pcmkXmlStr) (last_written ? last_written : ""));
xmlSetProp(changed_node, (pcmkXmlStr) "user", (pcmkXmlStr) (user ? user : ""));
xmlSetProp(changed_node, (pcmkXmlStr) "client", (pcmkXmlStr) (client ? client : ""));
xmlSetProp(changed_node, (pcmkXmlStr) "origin", (pcmkXmlStr) (origin ? origin : ""));
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("cluster-times", "const char *", "const char *", "const char *", "const char *")
int
pe__cluster_times_text(pcmk__output_t *out, va_list args) {
const char *last_written = va_arg(args, const char *);
const char *user = va_arg(args, const char *);
const char *client = va_arg(args, const char *);
const char *origin = va_arg(args, const char *);
char *buf = last_changed_string(last_written, user, client, origin);
out->list_item(out, "Last updated", "%s", pcmk__epoch2str(NULL));
out->list_item(out, "Last change", " %s", buf);
free(buf);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("failed-action", "xmlNodePtr")
int
pe__failed_action_text(pcmk__output_t *out, va_list args) {
xmlNodePtr xml_op = va_arg(args, xmlNodePtr);
char *s = failed_action_string(xml_op);
out->list_item(out, NULL, "%s", s);
free(s);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("failed-action", "xmlNodePtr")
int
pe__failed_action_xml(pcmk__output_t *out, va_list args) {
xmlNodePtr xml_op = va_arg(args, xmlNodePtr);
const char *op_key = crm_element_value(xml_op, XML_LRM_ATTR_TASK_KEY);
const char *last = crm_element_value(xml_op, XML_RSC_OP_LAST_CHANGE);
int rc = crm_parse_int(crm_element_value(xml_op, XML_LRM_ATTR_RC), "0");
int status = crm_parse_int(crm_element_value(xml_op, XML_LRM_ATTR_OPSTATUS), "0");
const char *exit_reason = crm_element_value(xml_op, XML_LRM_ATTR_EXIT_REASON);
char *rc_s = crm_itoa(rc);
char *reason_s = crm_xml_escape(exit_reason ? exit_reason : "none");
xmlNodePtr node = pcmk__output_create_xml_node(out, "failure");
xmlSetProp(node, (pcmkXmlStr) (op_key ? "op_key" : "id"),
(pcmkXmlStr) (op_key ? op_key : "id"));
xmlSetProp(node, (pcmkXmlStr) "node",
(pcmkXmlStr) crm_element_value(xml_op, XML_ATTR_UNAME));
xmlSetProp(node, (pcmkXmlStr) "exitstatus",
(pcmkXmlStr) services_ocf_exitcode_str(rc));
xmlSetProp(node, (pcmkXmlStr) "exitreason", (pcmkXmlStr) reason_s);
xmlSetProp(node, (pcmkXmlStr) "exitcode", (pcmkXmlStr) rc_s);
xmlSetProp(node, (pcmkXmlStr) "call",
(pcmkXmlStr) crm_element_value(xml_op, XML_LRM_ATTR_CALLID));
xmlSetProp(node, (pcmkXmlStr) "status",
(pcmkXmlStr) services_lrm_status_str(status));
if (last) {
guint interval_ms = 0;
char *s = NULL;
time_t when = crm_parse_int(last, "0");
crm_time_t *crm_when = crm_time_new(NULL);
char *rc_change = NULL;
crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms);
s = crm_itoa(interval_ms);
crm_time_set_timet(crm_when, &when);
rc_change = crm_time_as_string(crm_when, crm_time_log_date | crm_time_log_timeofday | crm_time_log_with_timezone);
xmlSetProp(node, (pcmkXmlStr) XML_RSC_OP_LAST_CHANGE, (pcmkXmlStr) rc_change);
xmlSetProp(node, (pcmkXmlStr) "queued",
(pcmkXmlStr) crm_element_value(xml_op, XML_RSC_OP_T_QUEUE));
xmlSetProp(node, (pcmkXmlStr) "exec",
(pcmkXmlStr) crm_element_value(xml_op, XML_RSC_OP_T_EXEC));
xmlSetProp(node, (pcmkXmlStr) "interval", (pcmkXmlStr) s);
xmlSetProp(node, (pcmkXmlStr) "task",
(pcmkXmlStr) crm_element_value(xml_op, XML_LRM_ATTR_TASK));
free(s);
free(rc_change);
crm_time_free(crm_when);
}
free(reason_s);
free(rc_s);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("node", "pe_node_t *", "unsigned int", "gboolean", "const char *",
"gboolean", "gboolean", "gboolean", "GListPtr", "GListPtr")
int
pe__node_html(pcmk__output_t *out, va_list args) {
pe_node_t *node = va_arg(args, pe_node_t *);
unsigned int print_opts = va_arg(args, unsigned int);
gboolean full = va_arg(args, gboolean);
const char *node_mode G_GNUC_UNUSED = va_arg(args, const char *);
gboolean print_clone_detail = va_arg(args, gboolean);
gboolean print_brief = va_arg(args, gboolean);
gboolean group_by_node = va_arg(args, gboolean);
GListPtr only_node = va_arg(args, GListPtr);
GListPtr only_rsc = va_arg(args, GListPtr);
char *node_name = pe__node_display_name(node, print_clone_detail);
char *buf = crm_strdup_printf("Node: %s", node_name);
if (full) {
xmlNodePtr item_node = pcmk__output_create_xml_node(out, "li");
pcmk_create_html_node(item_node, "span", NULL, NULL, buf);
if (node->details->standby_onfail && node->details->online) {
pcmk_create_html_node(item_node, "span", NULL, "standby", " standby (on-fail)");
} else if (node->details->standby && node->details->online) {
char *s = crm_strdup_printf(" standby%s", node->details->running_rsc ? " (with active resources)" : "");
pcmk_create_html_node(item_node, "span", NULL, " standby", s);
free(s);
} else if (node->details->standby) {
pcmk_create_html_node(item_node, "span", NULL, "offline", " OFFLINE (standby)");
} else if (node->details->maintenance && node->details->online) {
pcmk_create_html_node(item_node, "span", NULL, "maint", " maintenance");
} else if (node->details->maintenance) {
pcmk_create_html_node(item_node, "span", NULL, "offline", " OFFLINE (maintenance)");
} else if (node->details->online) {
pcmk_create_html_node(item_node, "span", NULL, "online", " online");
} else {
pcmk_create_html_node(item_node, "span", NULL, "offline", " OFFLINE");
}
if (print_brief && group_by_node) {
GListPtr rscs = pe__filter_rsc_list(node->details->running_rsc, only_rsc);
if (rscs != NULL) {
out->begin_list(out, NULL, NULL, NULL);
pe__rscs_brief_output(out, rscs, print_opts | pe_print_rsconly, FALSE);
out->end_list(out);
}
} else if (group_by_node) {
GListPtr lpc2 = NULL;
out->begin_list(out, NULL, NULL, NULL);
for (lpc2 = node->details->running_rsc; lpc2 != NULL; lpc2 = lpc2->next) {
pe_resource_t *rsc = (pe_resource_t *) lpc2->data;
out->message(out, crm_map_element_name(rsc->xml), print_opts | pe_print_rsconly,
rsc, only_node, only_rsc);
}
out->end_list(out);
}
} else {
out->begin_list(out, NULL, NULL, "%s", buf);
}
free(buf);
free(node_name);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("node", "pe_node_t *", "unsigned int", "gboolean", "const char *",
"gboolean", "gboolean", "gboolean", "GListPtr", "GListPtr")
int
pe__node_text(pcmk__output_t *out, va_list args) {
pe_node_t *node = va_arg(args, pe_node_t *);
unsigned int print_opts = va_arg(args, unsigned int);
gboolean full = va_arg(args, gboolean);
const char *node_mode = va_arg(args, const char *);
gboolean print_clone_detail = va_arg(args, gboolean);
gboolean print_brief = va_arg(args, gboolean);
gboolean group_by_node = va_arg(args, gboolean);
GListPtr only_node = va_arg(args, GListPtr);
GListPtr only_rsc = va_arg(args, GListPtr);
if (full) {
char *node_name = pe__node_display_name(node, print_clone_detail);
char *buf = NULL;
/* Print the node name and status */
if (pe__is_guest_node(node)) {
buf = crm_strdup_printf("GuestNode %s: %s", node_name, node_mode);
} else if (pe__is_remote_node(node)) {
buf = crm_strdup_printf("RemoteNode %s: %s", node_name, node_mode);
} else {
buf = crm_strdup_printf("Node %s: %s", node_name, node_mode);
}
/* If we're grouping by node, print its resources */
if (group_by_node) {
if (print_brief) {
GListPtr rscs = pe__filter_rsc_list(node->details->running_rsc, only_rsc);
if (rscs != NULL) {
out->begin_list(out, NULL, NULL, "%s", buf);
out->begin_list(out, NULL, NULL, "Resources");
pe__rscs_brief_output(out, rscs, print_opts | pe_print_rsconly, FALSE);
out->end_list(out);
out->end_list(out);
}
} else {
GListPtr gIter2 = NULL;
out->begin_list(out, NULL, NULL, "%s", buf);
out->begin_list(out, NULL, NULL, "Resources");
for (gIter2 = node->details->running_rsc; gIter2 != NULL; gIter2 = gIter2->next) {
pe_resource_t *rsc = (pe_resource_t *) gIter2->data;
out->message(out, crm_map_element_name(rsc->xml), print_opts | pe_print_rsconly,
rsc, only_node, only_rsc);
}
out->end_list(out);
out->end_list(out);
}
} else {
out->list_item(out, NULL, "%s", buf);
}
free(buf);
free(node_name);
} else {
out->begin_list(out, NULL, NULL, "Node: %s", pe__node_display_name(node, print_clone_detail));
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("node", "pe_node_t *", "unsigned int", "gboolean", "const char *",
"gboolean", "gboolean", "gboolean", "GListPtr", "GListPtr")
int
pe__node_xml(pcmk__output_t *out, va_list args) {
pe_node_t *node = va_arg(args, pe_node_t *);
unsigned int print_opts = va_arg(args, unsigned int);
gboolean full = va_arg(args, gboolean);
const char *node_mode G_GNUC_UNUSED = va_arg(args, const char *);
gboolean print_clone_detail G_GNUC_UNUSED = va_arg(args, gboolean);
gboolean print_brief G_GNUC_UNUSED = va_arg(args, gboolean);
gboolean group_by_node = va_arg(args, gboolean);
GListPtr only_node = va_arg(args, GListPtr);
GListPtr only_rsc = va_arg(args, GListPtr);
if (full) {
const char *node_type = "unknown";
char *length_s = crm_itoa(g_list_length(node->details->running_rsc));
switch (node->details->type) {
case node_member:
node_type = "member";
break;
case node_remote:
node_type = "remote";
break;
case node_ping:
node_type = "ping";
break;
}
pe__name_and_nvpairs_xml(out, true, "node", 13,
"name", node->details->uname,
"id", node->details->id,
"online", pcmk__btoa(node->details->online),
"standby", pcmk__btoa(node->details->standby),
"standby_onfail", pcmk__btoa(node->details->standby_onfail),
"maintenance", pcmk__btoa(node->details->maintenance),
"pending", pcmk__btoa(node->details->pending),
"unclean", pcmk__btoa(node->details->unclean),
"shutdown", pcmk__btoa(node->details->shutdown),
"expected_up", pcmk__btoa(node->details->expected_up),
"is_dc", pcmk__btoa(node->details->is_dc),
"resources_running", length_s,
"type", node_type);
if (pe__is_guest_node(node)) {
xmlNodePtr xml_node = pcmk__output_xml_peek_parent(out);
xmlSetProp(xml_node, (pcmkXmlStr) "id_as_resource",
(pcmkXmlStr) node->details->remote_rsc->container->id);
}
if (group_by_node) {
GListPtr lpc = NULL;
for (lpc = node->details->running_rsc; lpc != NULL; lpc = lpc->next) {
pe_resource_t *rsc = (pe_resource_t *) lpc->data;
out->message(out, crm_map_element_name(rsc->xml), print_opts | pe_print_rsconly,
rsc, only_node, only_rsc);
}
}
free(length_s);
out->end_list(out);
} else {
xmlNodePtr parent = pcmk__output_xml_create_parent(out, "node");
xmlSetProp(parent, (pcmkXmlStr) "name", (pcmkXmlStr) node->details->uname);
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("node-attribute", "const char *", "const char *", "gboolean", "int")
int
pe__node_attribute_text(pcmk__output_t *out, va_list args) {
const char *name = va_arg(args, const char *);
const char *value = va_arg(args, const char *);
gboolean add_extra = va_arg(args, gboolean);
int expected_score = va_arg(args, int);
if (add_extra) {
int v = crm_parse_int(value, "0");
if (v <= 0) {
out->list_item(out, NULL, "%-32s\t: %-10s\t: Connectivity is lost", name, value);
} else if (v < expected_score) {
out->list_item(out, NULL, "%-32s\t: %-10s\t: Connectivity is degraded (Expected=%d)", name, value, expected_score);
} else {
out->list_item(out, NULL, "%-32s\t: %-10s", name, value);
}
} else {
out->list_item(out, NULL, "%-32s\t: %-10s", name, value);
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("node-attribute", "const char *", "const char *", "gboolean", "int")
int
pe__node_attribute_html(pcmk__output_t *out, va_list args) {
const char *name = va_arg(args, const char *);
const char *value = va_arg(args, const char *);
gboolean add_extra = va_arg(args, gboolean);
int expected_score = va_arg(args, int);
if (add_extra) {
int v = crm_parse_int(value, "0");
char *s = crm_strdup_printf("%s: %s", name, value);
xmlNodePtr item_node = pcmk__output_create_xml_node(out, "li");
pcmk_create_html_node(item_node, "span", NULL, NULL, s);
free(s);
if (v <= 0) {
pcmk_create_html_node(item_node, "span", NULL, "bold", "(connectivity is lost)");
} else if (v < expected_score) {
char *buf = crm_strdup_printf("(connectivity is degraded -- expected %d", expected_score);
pcmk_create_html_node(item_node, "span", NULL, "bold", buf);
free(buf);
}
} else {
out->list_item(out, NULL, "%s: %s", name, value);
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("node-attribute", "const char *", "const char *", "gboolean", "int")
int
pe__node_attribute_xml(pcmk__output_t *out, va_list args) {
const char *name = va_arg(args, const char *);
const char *value = va_arg(args, const char *);
gboolean add_extra = va_arg(args, gboolean);
int expected_score = va_arg(args, int);
xmlNodePtr node = pcmk__output_create_xml_node(out, "attribute");
xmlSetProp(node, (pcmkXmlStr) "name", (pcmkXmlStr) name);
xmlSetProp(node, (pcmkXmlStr) "value", (pcmkXmlStr) value);
if (add_extra) {
char *buf = crm_itoa(expected_score);
xmlSetProp(node, (pcmkXmlStr) "expected", (pcmkXmlStr) buf);
free(buf);
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("node-list", "GListPtr", "GListPtr", "GListPtr", "unsigned int", "gboolean", "gboolean", "gboolean")
int
pe__node_list_html(pcmk__output_t *out, va_list args) {
GListPtr nodes = va_arg(args, GListPtr);
GListPtr only_node = va_arg(args, GListPtr);
GListPtr only_rsc = va_arg(args, GListPtr);
unsigned int print_opts = va_arg(args, unsigned int);
gboolean print_clone_detail = va_arg(args, gboolean);
gboolean print_brief = va_arg(args, gboolean);
gboolean group_by_node = va_arg(args, gboolean);
int rc = pcmk_rc_no_output;
for (GListPtr gIter = nodes; gIter != NULL; gIter = gIter->next) {
pe_node_t *node = (pe_node_t *) gIter->data;
if (!pcmk__str_in_list(only_node, node->details->uname)) {
continue;
}
PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Node List");
out->message(out, "node", node, print_opts, TRUE, NULL, print_clone_detail,
print_brief, group_by_node, only_node, only_rsc);
}
PCMK__OUTPUT_LIST_FOOTER(out, rc);
return rc;
}
PCMK__OUTPUT_ARGS("node-list", "GListPtr", "GListPtr", "GListPtr", "unsigned int", "gboolean", "gboolean", "gboolean")
int
pe__node_list_text(pcmk__output_t *out, va_list args) {
GListPtr nodes = va_arg(args, GListPtr);
GListPtr only_node = va_arg(args, GListPtr);
GListPtr only_rsc = va_arg(args, GListPtr);
unsigned int print_opts = va_arg(args, unsigned int);
gboolean print_clone_detail = va_arg(args, gboolean);
gboolean print_brief = va_arg(args, gboolean);
gboolean group_by_node = va_arg(args, gboolean);
/* space-separated lists of node names */
char *online_nodes = NULL;
char *online_remote_nodes = NULL;
char *online_guest_nodes = NULL;
char *offline_nodes = NULL;
char *offline_remote_nodes = NULL;
int rc = pcmk_rc_no_output;
for (GListPtr gIter = nodes; gIter != NULL; gIter = gIter->next) {
pe_node_t *node = (pe_node_t *) gIter->data;
const char *node_mode = NULL;
char *node_name = pe__node_display_name(node, print_clone_detail);
if (!pcmk__str_in_list(only_node, node->details->uname)) {
free(node_name);
continue;
}
PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Node List");
/* Get node mode */
if (node->details->unclean) {
if (node->details->online) {
node_mode = "UNCLEAN (online)";
} else if (node->details->pending) {
node_mode = "UNCLEAN (pending)";
} else {
node_mode = "UNCLEAN (offline)";
}
} else if (node->details->pending) {
node_mode = "pending";
} else if (node->details->standby_onfail && node->details->online) {
node_mode = "standby (on-fail)";
} else if (node->details->standby) {
if (node->details->online) {
if (node->details->running_rsc) {
node_mode = "standby (with active resources)";
} else {
node_mode = "standby";
}
} else {
node_mode = "OFFLINE (standby)";
}
} else if (node->details->maintenance) {
if (node->details->online) {
node_mode = "maintenance";
} else {
node_mode = "OFFLINE (maintenance)";
}
} else if (node->details->online) {
node_mode = "online";
if (group_by_node == FALSE) {
if (pe__is_guest_node(node)) {
online_guest_nodes = pcmk__add_word(online_guest_nodes,
node_name);
} else if (pe__is_remote_node(node)) {
online_remote_nodes = pcmk__add_word(online_remote_nodes,
node_name);
} else {
online_nodes = pcmk__add_word(online_nodes, node_name);
}
free(node_name);
continue;
}
} else {
node_mode = "OFFLINE";
if (group_by_node == FALSE) {
if (pe__is_remote_node(node)) {
offline_remote_nodes = pcmk__add_word(offline_remote_nodes,
node_name);
} else if (pe__is_guest_node(node)) {
/* ignore offline guest nodes */
} else {
offline_nodes = pcmk__add_word(offline_nodes, node_name);
}
free(node_name);
continue;
}
}
/* If we get here, node is in bad state, or we're grouping by node */
out->message(out, "node", node, print_opts, TRUE, node_mode, print_clone_detail,
print_brief, group_by_node, only_node, only_rsc);
free(node_name);
}
/* If we're not grouping by node, summarize nodes by status */
if (online_nodes) {
out->list_item(out, "Online", "[%s ]", online_nodes);
free(online_nodes);
}
if (offline_nodes) {
out->list_item(out, "OFFLINE", "[%s ]", offline_nodes);
free(offline_nodes);
}
if (online_remote_nodes) {
out->list_item(out, "RemoteOnline", "[%s ]", online_remote_nodes);
free(online_remote_nodes);
}
if (offline_remote_nodes) {
out->list_item(out, "RemoteOFFLINE", "[%s ]", offline_remote_nodes);
free(offline_remote_nodes);
}
if (online_guest_nodes) {
out->list_item(out, "GuestOnline", "[%s ]", online_guest_nodes);
free(online_guest_nodes);
}
PCMK__OUTPUT_LIST_FOOTER(out, rc);
return rc;
}
PCMK__OUTPUT_ARGS("node-list", "GListPtr", "GListPtr", "GListPtr", "unsigned int", "gboolean", "gboolean", "gboolean")
int
pe__node_list_xml(pcmk__output_t *out, va_list args) {
GListPtr nodes = va_arg(args, GListPtr);
GListPtr only_node = va_arg(args, GListPtr);
GListPtr only_rsc = va_arg(args, GListPtr);
unsigned int print_opts = va_arg(args, unsigned int);
gboolean print_clone_detail = va_arg(args, gboolean);
gboolean print_brief = va_arg(args, gboolean);
gboolean group_by_node = va_arg(args, gboolean);
out->begin_list(out, NULL, NULL, "nodes");
for (GListPtr gIter = nodes; gIter != NULL; gIter = gIter->next) {
pe_node_t *node = (pe_node_t *) gIter->data;
if (!pcmk__str_in_list(only_node, node->details->uname)) {
continue;
}
out->message(out, "node", node, print_opts, TRUE, NULL, print_clone_detail,
print_brief, group_by_node, only_node, only_rsc);
}
out->end_list(out);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("op-history", "struct xmlNode *", "const char *", "const char *", "int", "gboolean")
int
pe__op_history_text(pcmk__output_t *out, va_list args) {
xmlNode *xml_op = va_arg(args, xmlNode *);
const char *task = va_arg(args, const char *);
const char *interval_ms_s = va_arg(args, const char *);
int rc = va_arg(args, int);
gboolean print_timing = va_arg(args, gboolean);
char *buf = op_history_string(xml_op, task, interval_ms_s, rc, print_timing);
out->list_item(out, NULL, "%s", buf);
free(buf);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("op-history", "struct xmlNode *", "const char *", "const char *", "int", "gboolean")
int
pe__op_history_xml(pcmk__output_t *out, va_list args) {
xmlNode *xml_op = va_arg(args, xmlNode *);
const char *task = va_arg(args, const char *);
const char *interval_ms_s = va_arg(args, const char *);
int rc = va_arg(args, int);
gboolean print_timing = va_arg(args, gboolean);
char *rc_s = NULL;
xmlNodePtr node = pcmk__output_create_xml_node(out, "operation_history");
xmlSetProp(node, (pcmkXmlStr) "call",
(pcmkXmlStr) crm_element_value(xml_op, XML_LRM_ATTR_CALLID));
xmlSetProp(node, (pcmkXmlStr) "task", (pcmkXmlStr) task);
if (interval_ms_s && !pcmk__str_eq(interval_ms_s, "0", pcmk__str_casei)) {
char *s = crm_strdup_printf("%sms", interval_ms_s);
xmlSetProp(node, (pcmkXmlStr) "interval", (pcmkXmlStr) s);
free(s);
}
if (print_timing) {
const char *value = NULL;
value = crm_element_value(xml_op, XML_RSC_OP_LAST_CHANGE);
if (value) {
time_t int_value = (time_t) crm_parse_int(value, NULL);
if (int_value > 0) {
xmlSetProp(node, (pcmkXmlStr) XML_RSC_OP_LAST_CHANGE,
(pcmkXmlStr) pcmk__epoch2str(&int_value));
}
}
value = crm_element_value(xml_op, XML_RSC_OP_LAST_RUN);
if (value) {
time_t int_value = (time_t) crm_parse_int(value, NULL);
if (int_value > 0) {
xmlSetProp(node, (pcmkXmlStr) XML_RSC_OP_LAST_RUN,
(pcmkXmlStr) pcmk__epoch2str(&int_value));
}
}
value = crm_element_value(xml_op, XML_RSC_OP_T_EXEC);
if (value) {
char *s = crm_strdup_printf("%sms", value);
xmlSetProp(node, (pcmkXmlStr) XML_RSC_OP_T_EXEC, (pcmkXmlStr) s);
free(s);
}
value = crm_element_value(xml_op, XML_RSC_OP_T_QUEUE);
if (value) {
char *s = crm_strdup_printf("%sms", value);
xmlSetProp(node, (pcmkXmlStr) XML_RSC_OP_T_QUEUE, (pcmkXmlStr) s);
free(s);
}
}
rc_s = crm_itoa(rc);
xmlSetProp(node, (pcmkXmlStr) "rc", (pcmkXmlStr) rc_s);
xmlSetProp(node, (pcmkXmlStr) "rc_text", (pcmkXmlStr) services_ocf_exitcode_str(rc));
free(rc_s);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("resource-history", "pe_resource_t *", "const char *", "gboolean", "int", "time_t", "gboolean")
int
pe__resource_history_text(pcmk__output_t *out, va_list args) {
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
const char *rsc_id = va_arg(args, const char *);
gboolean all = va_arg(args, gboolean);
int failcount = va_arg(args, int);
time_t last_failure = va_arg(args, int);
gboolean as_header = va_arg(args, gboolean);
char *buf = resource_history_string(rsc, rsc_id, all, failcount, last_failure);
if (as_header) {
out->begin_list(out, NULL, NULL, "%s", buf);
} else {
out->list_item(out, NULL, "%s", buf);
}
free(buf);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("resource-history", "pe_resource_t *", "const char *", "gboolean", "int", "time_t", "gboolean")
int
pe__resource_history_xml(pcmk__output_t *out, va_list args) {
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
const char *rsc_id = va_arg(args, const char *);
gboolean all = va_arg(args, gboolean);
int failcount = va_arg(args, int);
time_t last_failure = va_arg(args, int);
gboolean as_header = va_arg(args, gboolean);
xmlNodePtr node = pcmk__output_xml_create_parent(out, "resource_history");
xmlSetProp(node, (pcmkXmlStr) "id", (pcmkXmlStr) rsc_id);
if (rsc == NULL) {
xmlSetProp(node, (pcmkXmlStr) "orphan", (pcmkXmlStr) "true");
} else if (all || failcount || last_failure > 0) {
char *migration_s = crm_itoa(rsc->migration_threshold);
xmlSetProp(node, (pcmkXmlStr) "orphan", (pcmkXmlStr) "false");
xmlSetProp(node, (pcmkXmlStr) "migration-threshold",
(pcmkXmlStr) migration_s);
free(migration_s);
if (failcount > 0) {
char *s = crm_itoa(failcount);
xmlSetProp(node, (pcmkXmlStr) PCMK__FAIL_COUNT_PREFIX,
(pcmkXmlStr) s);
free(s);
}
if (last_failure > 0) {
xmlSetProp(node, (pcmkXmlStr) PCMK__LAST_FAILURE_PREFIX,
(pcmkXmlStr) pcmk__epoch2str(&last_failure));
}
}
if (as_header == FALSE) {
pcmk__output_xml_pop_parent(out);
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("resource-list", "pe_working_set_t *", "unsigned int", "gboolean",
"gboolean", "gboolean", "gboolean", "GListPtr", "GListPtr", "gboolean")
int
pe__resource_list(pcmk__output_t *out, va_list args)
{
pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
unsigned int print_opts = va_arg(args, unsigned int);
gboolean group_by_node = va_arg(args, gboolean);
gboolean inactive_resources = va_arg(args, gboolean);
gboolean brief_output = va_arg(args, gboolean);
gboolean print_summary = va_arg(args, gboolean);
GListPtr only_node = va_arg(args, GListPtr);
GListPtr only_rsc = va_arg(args, GListPtr);
gboolean print_spacer = va_arg(args, gboolean);
GListPtr rsc_iter;
int rc = pcmk_rc_no_output;
/* If we already showed active resources by node, and
* we're not showing inactive resources, we have nothing to do
*/
if (group_by_node && !inactive_resources) {
return rc;
}
PCMK__OUTPUT_SPACER_IF(out, print_spacer);
if (group_by_node) {
/* Active resources have already been printed by node */
out->begin_list(out, NULL, NULL, "Inactive Resources");
} else if (inactive_resources) {
out->begin_list(out, NULL, NULL, "Full List of Resources");
} else {
out->begin_list(out, NULL, NULL, "Active Resources");
}
/* If we haven't already printed resources grouped by node,
* and brief output was requested, print resource summary */
if (brief_output && !group_by_node) {
GListPtr rscs = pe__filter_rsc_list(data_set->resources, only_rsc);
pe__rscs_brief_output(out, rscs, print_opts, inactive_resources);
g_list_free(rscs);
}
/* For each resource, display it if appropriate */
for (rsc_iter = data_set->resources; rsc_iter != NULL; rsc_iter = rsc_iter->next) {
pe_resource_t *rsc = (pe_resource_t *) rsc_iter->data;
int x;
/* Complex resources may have some sub-resources active and some inactive */
gboolean is_active = rsc->fns->active(rsc, TRUE);
gboolean partially_active = rsc->fns->active(rsc, FALSE);
/* Skip inactive orphans (deleted but still in CIB) */
if (pcmk_is_set(rsc->flags, pe_rsc_orphan) && !is_active) {
continue;
/* Skip active resources if we already displayed them by node */
} else if (group_by_node) {
if (is_active) {
continue;
}
/* Skip primitives already counted in a brief summary */
} else if (brief_output && (rsc->variant == pe_native)) {
continue;
/* Skip resources that aren't at least partially active,
* unless we're displaying inactive resources
*/
} else if (!partially_active && !inactive_resources) {
continue;
} else if (partially_active && !pe__rsc_running_on_any_node_in_list(rsc, only_node)) {
continue;
}
/* Print this resource */
x = out->message(out, crm_map_element_name(rsc->xml), print_opts, rsc,
only_node, only_rsc);
if (x == pcmk_rc_ok) {
rc = pcmk_rc_ok;
}
}
if (print_summary && rc != pcmk_rc_ok) {
if (group_by_node) {
out->list_item(out, NULL, "No inactive resources");
} else if (inactive_resources) {
out->list_item(out, NULL, "No resources");
} else {
out->list_item(out, NULL, "No active resources");
}
}
out->end_list(out);
return rc;
}
PCMK__OUTPUT_ARGS("ticket", "pe_ticket_t *")
int
pe__ticket_html(pcmk__output_t *out, va_list args) {
pe_ticket_t *ticket = va_arg(args, pe_ticket_t *);
if (ticket->last_granted > -1) {
char *time = pcmk_format_named_time("last-granted", ticket->last_granted);
out->list_item(out, NULL, "%s:\t%s%s %s", ticket->id,
ticket->granted ? "granted" : "revoked",
ticket->standby ? " [standby]" : "",
time);
free(time);
} else {
out->list_item(out, NULL, "%s:\t%s%s", ticket->id,
ticket->granted ? "granted" : "revoked",
ticket->standby ? " [standby]" : "");
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("ticket", "pe_ticket_t *")
int
pe__ticket_text(pcmk__output_t *out, va_list args) {
pe_ticket_t *ticket = va_arg(args, pe_ticket_t *);
if (ticket->last_granted > -1) {
char *time = pcmk_format_named_time("last-granted", ticket->last_granted);
out->list_item(out, ticket->id, "\t%s%s %s",
ticket->granted ? "granted" : "revoked",
ticket->standby ? " [standby]" : "",
time);
free(time);
} else {
out->list_item(out, ticket->id, "\t%s%s",
ticket->granted ? "granted" : "revoked",
ticket->standby ? " [standby]" : "");
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("ticket", "pe_ticket_t *")
int
pe__ticket_xml(pcmk__output_t *out, va_list args) {
xmlNodePtr node = NULL;
pe_ticket_t *ticket = va_arg(args, pe_ticket_t *);
node = pcmk__output_create_xml_node(out, "ticket");
xmlSetProp(node, (pcmkXmlStr) "id", (pcmkXmlStr) ticket->id);
xmlSetProp(node, (pcmkXmlStr) "status", (pcmkXmlStr) (ticket->granted ? "granted" : "revoked"));
xmlSetProp(node, (pcmkXmlStr) "standby",
(pcmkXmlStr) pcmk__btoa(ticket->standby));
if (ticket->last_granted > -1) {
xmlSetProp(node, (pcmkXmlStr) "last-granted",
(pcmkXmlStr) pcmk__epoch2str(&ticket->last_granted));
}
return pcmk_rc_ok;
}
static pcmk__message_entry_t fmt_functions[] = {
{ "ban", "html", pe__ban_html },
{ "ban", "log", pe__ban_text },
{ "ban", "text", pe__ban_text },
{ "ban", "xml", pe__ban_xml },
{ "bundle", "xml", pe__bundle_xml },
{ "bundle", "html", pe__bundle_html },
{ "bundle", "text", pe__bundle_text },
{ "bundle", "log", pe__bundle_text },
{ "clone", "xml", pe__clone_xml },
{ "clone", "html", pe__clone_html },
{ "clone", "text", pe__clone_text },
{ "clone", "log", pe__clone_text },
{ "cluster-counts", "html", pe__cluster_counts_html },
{ "cluster-counts", "log", pe__cluster_counts_text },
{ "cluster-counts", "text", pe__cluster_counts_text },
{ "cluster-counts", "xml", pe__cluster_counts_xml },
{ "cluster-dc", "html", pe__cluster_dc_html },
{ "cluster-dc", "log", pe__cluster_dc_text },
{ "cluster-dc", "text", pe__cluster_dc_text },
{ "cluster-dc", "xml", pe__cluster_dc_xml },
{ "cluster-options", "html", pe__cluster_options_html },
{ "cluster-options", "log", pe__cluster_options_log },
{ "cluster-options", "text", pe__cluster_options_text },
{ "cluster-options", "xml", pe__cluster_options_xml },
{ "cluster-summary", "default", pe__cluster_summary },
{ "cluster-summary", "html", pe__cluster_summary_html },
{ "cluster-stack", "html", pe__cluster_stack_html },
{ "cluster-stack", "log", pe__cluster_stack_text },
{ "cluster-stack", "text", pe__cluster_stack_text },
{ "cluster-stack", "xml", pe__cluster_stack_xml },
{ "cluster-times", "html", pe__cluster_times_html },
{ "cluster-times", "log", pe__cluster_times_text },
{ "cluster-times", "text", pe__cluster_times_text },
{ "cluster-times", "xml", pe__cluster_times_xml },
{ "failed-action", "html", pe__failed_action_text },
{ "failed-action", "log", pe__failed_action_text },
{ "failed-action", "text", pe__failed_action_text },
{ "failed-action", "xml", pe__failed_action_xml },
{ "group", "xml", pe__group_xml },
{ "group", "html", pe__group_html },
{ "group", "text", pe__group_text },
{ "group", "log", pe__group_text },
- /* maint-mode only exists for text and log. Other formatters output it as
- * part of the cluster-options handler.
- */
- { "maint-mode", "log", pe__cluster_maint_mode_text },
{ "maint-mode", "text", pe__cluster_maint_mode_text },
{ "node", "html", pe__node_html },
{ "node", "log", pe__node_text },
{ "node", "text", pe__node_text },
{ "node", "xml", pe__node_xml },
{ "node-list", "html", pe__node_list_html },
{ "node-list", "log", pe__node_list_text },
{ "node-list", "text", pe__node_list_text },
{ "node-list", "xml", pe__node_list_xml },
{ "node-attribute", "html", pe__node_attribute_html },
{ "node-attribute", "log", pe__node_attribute_text },
{ "node-attribute", "text", pe__node_attribute_text },
{ "node-attribute", "xml", pe__node_attribute_xml },
{ "op-history", "html", pe__op_history_text },
{ "op-history", "log", pe__op_history_text },
{ "op-history", "text", pe__op_history_text },
{ "op-history", "xml", pe__op_history_xml },
{ "primitive", "xml", pe__resource_xml },
{ "primitive", "html", pe__resource_html },
{ "primitive", "text", pe__resource_text },
{ "primitive", "log", pe__resource_text },
{ "resource-history", "default", pe__resource_history_text },
{ "resource-history", "xml", pe__resource_history_xml },
{ "resource-list", "default", pe__resource_list },
{ "ticket", "html", pe__ticket_html },
{ "ticket", "log", pe__ticket_text },
{ "ticket", "text", pe__ticket_text },
{ "ticket", "xml", pe__ticket_xml },
{ NULL, NULL, NULL }
};
void
pe__register_messages(pcmk__output_t *out) {
pcmk__register_messages(out, fmt_functions);
}
void
pe__output_node(pe_node_t *node, gboolean details, pcmk__output_t *out)
{
if (node == NULL) {
crm_trace("<NULL>");
return;
}
CRM_ASSERT(node->details);
crm_trace("%sNode %s: (weight=%d, fixed=%s)",
node->details->online ? "" : "Unavailable/Unclean ",
node->details->uname, node->weight, node->fixed ? "True" : "False");
if (details) {
char *pe_mutable = strdup("\t\t");
GListPtr gIter = node->details->running_rsc;
GListPtr all = NULL;
all = g_list_prepend(all, strdup("*"));
crm_trace("\t\t===Node Attributes");
g_hash_table_foreach(node->details->attrs, print_str_str, pe_mutable);
free(pe_mutable);
crm_trace("\t\t=== Resources");
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *rsc = (pe_resource_t *) gIter->data;
out->message(out, crm_map_element_name(rsc->xml),
pe_print_pending, rsc, all, all);
}
g_list_free_full(all, free);
}
}
diff --git a/tools/crm_mon_curses.c b/tools/crm_mon_curses.c
index ef0b0c7bb1..2c092dfe0d 100644
--- a/tools/crm_mon_curses.c
+++ b/tools/crm_mon_curses.c
@@ -1,432 +1,446 @@
/*
* Copyright 2019-2020 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <stdarg.h>
#include <stdlib.h>
#include <crm/crm.h>
#include <crm/common/curses_internal.h>
#include <crm/common/output_internal.h>
#include <crm/stonith-ng.h>
#include <crm/fencing/internal.h>
#include <crm/pengine/internal.h>
#include <glib.h>
#include "crm_mon.h"
#if CURSES_ENABLED
GOptionEntry crm_mon_curses_output_entries[] = {
{ NULL }
};
typedef struct curses_list_data_s {
unsigned int len;
char *singular_noun;
char *plural_noun;
} curses_list_data_t;
typedef struct private_data_s {
GQueue *parent_q;
} private_data_t;
static void
curses_free_priv(pcmk__output_t *out) {
private_data_t *priv = out->priv;
if (priv == NULL) {
return;
}
g_queue_free(priv->parent_q);
free(priv);
out->priv = NULL;
}
static bool
curses_init(pcmk__output_t *out) {
private_data_t *priv = NULL;
/* If curses_init was previously called on this output struct, just return. */
if (out->priv != NULL) {
return true;
} else {
out->priv = calloc(1, sizeof(private_data_t));
if (out->priv == NULL) {
return false;
}
priv = out->priv;
}
priv->parent_q = g_queue_new();
initscr();
cbreak();
noecho();
return true;
}
static void
curses_finish(pcmk__output_t *out, crm_exit_t exit_status, bool print, void **copy_dest) {
echo();
nocbreak();
endwin();
}
static void
curses_reset(pcmk__output_t *out) {
CRM_ASSERT(out != NULL);
curses_free_priv(out);
curses_init(out);
}
static void
curses_subprocess_output(pcmk__output_t *out, int exit_status,
const char *proc_stdout, const char *proc_stderr) {
if (proc_stdout != NULL) {
printw("%s\n", proc_stdout);
}
if (proc_stderr != NULL) {
printw("%s\n", proc_stderr);
}
clrtoeol();
refresh();
}
/* curses_version is defined in curses.h, so we can't use that name here.
* Note that this function prints out via text, not with curses.
*/
static void
curses_ver(pcmk__output_t *out, bool extended) {
if (extended) {
printf("Pacemaker %s (Build: %s): %s\n", PACEMAKER_VERSION, BUILD_VERSION, CRM_FEATURES);
} else {
printf("Pacemaker %s\n", PACEMAKER_VERSION);
printf("Written by Andrew Beekhof\n");
}
}
G_GNUC_PRINTF(2, 3)
static void
curses_error(pcmk__output_t *out, const char *format, ...) {
va_list ap;
/* Informational output does not get indented, to separate it from other
* potentially indented list output.
*/
va_start(ap, format);
vw_printw(stdscr, format, ap);
va_end(ap);
/* Add a newline. */
addch('\n');
clrtoeol();
refresh();
sleep(2);
}
G_GNUC_PRINTF(2, 3)
static void
curses_info(pcmk__output_t *out, const char *format, ...) {
va_list ap;
/* Informational output does not get indented, to separate it from other
* potentially indented list output.
*/
va_start(ap, format);
vw_printw(stdscr, format, ap);
va_end(ap);
/* Add a newline. */
addch('\n');
clrtoeol();
refresh();
}
static void
curses_output_xml(pcmk__output_t *out, const char *name, const char *buf) {
private_data_t *priv = out->priv;
CRM_ASSERT(priv != NULL);
curses_indented_printf(out, "%s", buf);
}
G_GNUC_PRINTF(4, 5)
static void
curses_begin_list(pcmk__output_t *out, const char *singular_noun, const char *plural_noun,
const char *format, ...) {
private_data_t *priv = out->priv;
curses_list_data_t *new_list = NULL;
va_list ap;
CRM_ASSERT(priv != NULL);
va_start(ap, format);
curses_indented_vprintf(out, format, ap);
printw(":\n");
va_end(ap);
new_list = calloc(1, sizeof(curses_list_data_t));
new_list->len = 0;
new_list->singular_noun = singular_noun == NULL ? NULL : strdup(singular_noun);
new_list->plural_noun = plural_noun == NULL ? NULL : strdup(plural_noun);
g_queue_push_tail(priv->parent_q, new_list);
}
G_GNUC_PRINTF(3, 4)
static void
curses_list_item(pcmk__output_t *out, const char *id, const char *format, ...) {
private_data_t *priv = out->priv;
va_list ap;
CRM_ASSERT(priv != NULL);
va_start(ap, format);
if (id != NULL) {
curses_indented_printf(out, "%s: ", id);
vw_printw(stdscr, format, ap);
} else {
curses_indented_vprintf(out, format, ap);
}
addch('\n');
va_end(ap);
out->increment_list(out);
}
static void
curses_increment_list(pcmk__output_t *out) {
private_data_t *priv = out->priv;
gpointer tail;
CRM_ASSERT(priv != NULL);
tail = g_queue_peek_tail(priv->parent_q);
CRM_ASSERT(tail != NULL);
((curses_list_data_t *) tail)->len++;
}
static void
curses_end_list(pcmk__output_t *out) {
private_data_t *priv = out->priv;
curses_list_data_t *node = NULL;
CRM_ASSERT(priv != NULL);
node = g_queue_pop_tail(priv->parent_q);
if (node->singular_noun != NULL && node->plural_noun != NULL) {
if (node->len == 1) {
curses_indented_printf(out, "%d %s found\n", node->len, node->singular_noun);
} else {
curses_indented_printf(out, "%d %s found\n", node->len, node->plural_noun);
}
}
free(node);
}
static bool
curses_is_quiet(pcmk__output_t *out) {
return out->quiet;
}
pcmk__output_t *
crm_mon_mk_curses_output(char **argv) {
pcmk__output_t *retval = calloc(1, sizeof(pcmk__output_t));
if (retval == NULL) {
return NULL;
}
retval->fmt_name = "console";
retval->request = argv == NULL ? NULL : g_strjoinv(" ", argv);
retval->init = curses_init;
retval->free_priv = curses_free_priv;
retval->finish = curses_finish;
retval->reset = curses_reset;
retval->register_message = pcmk__register_message;
retval->message = pcmk__call_message;
retval->subprocess_output = curses_subprocess_output;
retval->version = curses_ver;
retval->err = curses_error;
retval->info = curses_info;
retval->output_xml = curses_output_xml;
retval->begin_list = curses_begin_list;
retval->list_item = curses_list_item;
retval->increment_list = curses_increment_list;
retval->end_list = curses_end_list;
retval->is_quiet = curses_is_quiet;
return retval;
}
G_GNUC_PRINTF(2, 0)
void
curses_indented_vprintf(pcmk__output_t *out, const char *format, va_list args) {
int level = 0;
private_data_t *priv = out->priv;
CRM_ASSERT(priv != NULL);
level = g_queue_get_length(priv->parent_q);
for (int i = 0; i < level; i++) {
printw(" ");
}
if (level > 0) {
printw("* ");
}
vw_printw(stdscr, format, args);
clrtoeol();
refresh();
}
G_GNUC_PRINTF(2, 3)
void
curses_indented_printf(pcmk__output_t *out, const char *format, ...) {
va_list ap;
va_start(ap, format);
curses_indented_vprintf(out, format, ap);
va_end(ap);
}
PCMK__OUTPUT_ARGS("stonith-event", "stonith_history_t *", "gboolean", "gboolean")
static int
stonith_event_console(pcmk__output_t *out, va_list args) {
stonith_history_t *event = va_arg(args, stonith_history_t *);
gboolean full_history = va_arg(args, gboolean);
gboolean later_succeeded = va_arg(args, gboolean);
crm_time_t *crm_when = crm_time_new(NULL);
char *buf = NULL;
crm_time_set_timet(crm_when, &(event->completed));
buf = crm_time_as_string(crm_when, crm_time_log_date | crm_time_log_timeofday | crm_time_log_with_timezone);
switch (event->state) {
case st_failed:
curses_indented_printf(out, "%s of %s failed: delegate=%s, client=%s, origin=%s, %s='%s'%s\n",
stonith_action_str(event->action), event->target,
event->delegate ? event->delegate : "",
event->client, event->origin,
full_history ? "completed" : "last-failed", buf,
later_succeeded ? " (a later attempt succeeded)" : "");
break;
case st_done:
curses_indented_printf(out, "%s of %s successful: delegate=%s, client=%s, origin=%s, %s='%s'\n",
stonith_action_str(event->action), event->target,
event->delegate ? event->delegate : "",
event->client, event->origin,
full_history ? "completed" : "last-successful", buf);
break;
default:
curses_indented_printf(out, "%s of %s pending: client=%s, origin=%s\n",
stonith_action_str(event->action), event->target,
event->client, event->origin);
break;
}
free(buf);
crm_time_free(crm_when);
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("maint-mode")
+PCMK__OUTPUT_ARGS("maint-mode", "unsigned long long")
static int
cluster_maint_mode_console(pcmk__output_t *out, va_list args) {
- printw("\n *** Resource management is DISABLED ***");
- printw("\n The cluster will not attempt to start, stop or recover services");
- printw("\n");
+ unsigned long long flags = va_arg(args, unsigned long long);
+ int rc;
+
+ if (pcmk_is_set(flags, pe_flag_maintenance_mode)) {
+ printw("\n *** Resource management is DISABLED ***");
+ printw("\n The cluster will not attempt to start, stop or recover services");
+ printw("\n");
+ rc = pcmk_rc_ok;
+ } else if (pcmk_is_set(flags, pe_flag_stop_everything)) {
+ printw("\n *** Resource management is DISABLED ***");
+ printw("\n The cluster will keep all resources stopped");
+ printw("\n");
+ rc = pcmk_rc_ok;
+ } else {
+ rc = pcmk_rc_no_output;
+ }
+
clrtoeol();
refresh();
- return pcmk_rc_ok;
+ return rc;
}
static pcmk__message_entry_t fmt_functions[] = {
{ "ban", "console", pe__ban_text },
{ "bundle", "console", pe__bundle_text },
{ "clone", "console", pe__clone_text },
{ "cluster-counts", "console", pe__cluster_counts_text },
{ "cluster-dc", "console", pe__cluster_dc_text },
{ "cluster-options", "console", pe__cluster_options_text },
{ "cluster-stack", "console", pe__cluster_stack_text },
{ "cluster-summary", "console", pe__cluster_summary },
{ "cluster-times", "console", pe__cluster_times_text },
{ "failed-action", "console", pe__failed_action_text },
{ "failed-fencing-history", "console", stonith__failed_history },
{ "fencing-history", "console", stonith__history },
{ "full-fencing-history", "console", stonith__full_history },
{ "group", "console", pe__group_text },
{ "maint-mode", "console", cluster_maint_mode_console },
{ "node", "console", pe__node_text },
{ "node-attribute", "console", pe__node_attribute_text },
{ "node-list", "console", pe__node_list_text },
{ "op-history", "console", pe__op_history_text },
{ "pending-fencing-actions", "console", stonith__pending_actions },
{ "primitive", "console", pe__resource_text },
{ "resource-history", "console", pe__resource_history_text },
{ "stonith-event", "console", stonith_event_console },
{ "ticket", "console", pe__ticket_text },
{ NULL, NULL, NULL }
};
void
crm_mon_register_messages(pcmk__output_t *out) {
pcmk__register_messages(out, fmt_functions);
}
#else
pcmk__output_t *
crm_mon_mk_curses_output(char **argv) {
/* curses was disabled in the build, so fall back to text. */
return pcmk__mk_text_output(argv);
}
G_GNUC_PRINTF(2, 0)
void
curses_indented_vprintf(pcmk__output_t *out, const char *format, va_list args) {
return;
}
G_GNUC_PRINTF(2, 3)
void
curses_indented_printf(pcmk__output_t *out, const char *format, ...) {
return;
}
void
crm_mon_register_messages(pcmk__output_t *out) {
return;
}
#endif

File Metadata

Mime Type
text/x-diff
Expires
Sat, Nov 23, 10:03 AM (1 d, 13 h)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
1018558
Default Alt Text
(282 KB)

Event Timeline