Page MenuHomeClusterLabs Projects

No OneTemporary

This file is larger than 256 KB, so syntax highlighting was skipped.
diff --git a/cts/cli/crm_mon.xml b/cts/cli/crm_mon.xml
index 6ef665f2ca..ffb2f80d8e 100644
--- a/cts/cli/crm_mon.xml
+++ b/cts/cli/crm_mon.xml
@@ -1,180 +1,197 @@
<cib crm_feature_set="3.3.0" validate-with="pacemaker-3.3" epoch="1" num_updates="1" admin_epoch="1" cib-last-written="Tue May 5 12:04:36 2020" update-origin="cluster01" update-client="crmd" update-user="hacluster" have-quorum="1" dc-uuid="2">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
<nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.0.4-1.e97f9675f.git.el7-e97f9675f"/>
<nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
<nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="test-cluster"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
<nvpair id="cib-bootstrap-options-maintenance-mode" name="maintenance-mode" value="false"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="1" uname="cluster01">
<instance_attributes id="nodes-1">
<nvpair id="nodes-1-location" name="location" value="office"/>
</instance_attributes>
</node>
<node id="2" uname="cluster02"/>
</nodes>
<resources>
<clone id="ping-clone">
<primitive class="ocf" id="ping" provider="pacemaker" type="ping">
<instance_attributes id="ping-instance_attributes">
<nvpair id="ping-instance_attributes-dampen" name="dampen" value="5s"/>
<nvpair id="ping-instance_attributes-host_list" name="host_list" value="192.168.122.1"/>
<nvpair id="ping-instance_attributes-multiplier" name="multiplier" value="1000"/>
</instance_attributes>
<operations>
<op id="ping-monitor-interval-10s" interval="10s" name="monitor" timeout="60s"/>
<op id="ping-start-interval-0s" interval="0s" name="start" timeout="60s"/>
<op id="ping-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
</operations>
</primitive>
</clone>
<primitive class="stonith" id="Fencing" type="fence_xvm">
<instance_attributes id="Fencing-instance_attributes">
<nvpair id="Fencing-instance_attributes-ip_family" name="ip_family" value="ipv4"/>
</instance_attributes>
<operations>
<op id="Fencing-monitor-interval-60s" interval="60s" name="monitor"/>
</operations>
</primitive>
<primitive class="ocf" id="dummy" provider="pacemaker" type="Dummy">
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-op_sleep" name="op_sleep" value="6"/>
</instance_attributes>
<operations>
<op id="dummy-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="20s"/>
<op id="dummy-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="20s"/>
<op id="dummy-monitor-interval-60s" interval="60s" name="monitor" on-fail="stop"/>
<op id="dummy-reload-interval-0s" interval="0s" name="reload" timeout="20s"/>
<op id="dummy-start-interval-0s" interval="0s" name="start" timeout="20s"/>
<op id="dummy-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
</operations>
</primitive>
<primitive class="ocf" id="inactive-dummy" provider="pacemaker" type="Dummy">
<meta_attributes id="inactive-dummy.meta">
<nvpair id="inactive-dummy.meta.target" name="target-role" value="stopped"/>
</meta_attributes>
</primitive>
<master id="inactive-clone-master">
<primitive class="ocf" id="inactive-clone" provider="pacemaker" type="Stateful">
<operations>
<op id="inactive-clone-monitor-interval-10" interval="10" name="monitor" role="Master" timeout="20"/>
<op id="inactive-clone-monitor-interval-11" interval="11" name="monitor" role="Slave" timeout="20"/>
</operations>
</primitive>
</master>
<bundle id="httpd-bundle">
<docker image="pcmk:http" replicas="3"/>
<network ip-range-start="192.168.122.131" host-netmask="24" host-interface="eth0">
<port-mapping id="httpd-port" port="80"/>
</network>
<storage>
<storage-mapping id="httpd-syslog" source-dir="/dev/log" target-dir="/dev/log" options="rw"/>
<storage-mapping id="httpd-root" source-dir="/srv/html" target-dir="/var/www/html" options="rw"/>
<storage-mapping id="httpd-logs" source-dir-root="/var/log/pacemaker/bundles" target-dir="/etc/httpd/logs" options="rw"/>
</storage>
<primitive class="ocf" id="httpd" provider="heartbeat" type="apache"/>
<meta_attributes id="bundle-meta_attributes">
<nvpair id="bundle-meta_attributes-target-role" name="target-role" value="Started"/>
</meta_attributes>
</bundle>
<group id="exim-group">
<primitive id="Public-IP" class="ocf" type="IPaddr" provider="heartbeat">
<instance_attributes id="params-public-ip">
<nvpair id="public-ip-addr" name="ip" value="192.168.1.1"/>
</instance_attributes>
</primitive>
<primitive id="Email" class="lsb" type="exim"/>
</group>
+ <clone id="mysql-clone-group">
+ <group id="mysql-group">
+ <primitive id="mysql-proxy" class="lsb" type="mysql-proxy">
+ <operations>
+ <op name="monitor" interval="10s" id="mysql-proxy_mon" timeout="20s"/>
+ </operations>
+ </primitive>
+ </group>
+ </clone>
</resources>
<constraints>
<rsc_location id="not-on-cluster1" rsc="dummy" node="cluster01" score="-INFINITY"/>
</constraints>
<tags>
<tag id="all-nodes">
<obj_ref id="1"/>
<obj_ref id="2"/>
</tag>
<tag id="even-nodes">
<obj_ref id="2"/>
</tag>
<tag id="odd-nodes">
<obj_ref id="1"/>
</tag>
<tag id="inactive-rscs">
<obj_ref id="inactive-dummy"/>
<obj_ref id="inactive-clone"/>
</tag>
<tag id="fencing-rscs">
<obj_ref id="Fencing"/>
</tag>
</tags>
<op_defaults>
<meta_attributes id="op_defaults-options">
<nvpair id="op_defaults-options-timeout" name="timeout" value="5s"/>
</meta_attributes>
</op_defaults>
</configuration>
<status>
<node_state id="2" uname="cluster02" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
<lrm id="2">
<lrm_resources>
<lrm_resource id="ping" type="ping" class="ocf" provider="pacemaker">
<lrm_rsc_op id="ping_last_0" operation_key="ping_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="9:0:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;9:0:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster02" call-id="11" rc-code="0" op-status="0" interval="0" last-rc-change="1588951263" last-run="1588951263" exec-time="2044" queue-time="0" op-digest="769dd6f95f1494d416ae9dc690960e17"/>
<lrm_rsc_op id="ping_monitor_10000" operation_key="ping_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="10:0:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;10:0:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster02" call-id="12" rc-code="0" op-status="0" interval="10000" last-rc-change="1588951265" exec-time="2031" queue-time="0" op-digest="7beffd8be749b787fabea4aef5df21c9"/>
</lrm_resource>
<lrm_resource id="Fencing" type="fence_xvm" class="stonith">
<lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="5:0:7:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:7;5:0:7:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster02" call-id="10" rc-code="7" op-status="0" interval="0" last-rc-change="1588951263" last-run="1588951263" exec-time="3" queue-time="0" op-digest="7da16842ab2328e41f737cab5e5fc89c"/>
</lrm_resource>
<lrm_resource id="dummy" type="Dummy" class="ocf" provider="pacemaker">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="14:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;14:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster02" call-id="18" rc-code="0" op-status="0" interval="0" last-rc-change="1588951278" last-run="1588951278" exec-time="6020" queue-time="0" op-digest="aa0f9b7caf28600646551adb55bd9b95" op-force-restart=" envfile op_sleep passwd state " op-restart-digest="aa0f9b7caf28600646551adb55bd9b95" op-secure-params=" passwd " op-secure-digest="aa0f9b7caf28600646551adb55bd9b95"/>
<lrm_rsc_op id="dummy_monitor_60000" operation_key="dummy_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="16:2:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;16:2:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster02" call-id="19" rc-code="0" op-status="0" interval="60000" last-rc-change="1588951284" exec-time="6015" queue-time="0" op-digest="ccfee4afbb0618907016c9bef210b8b6" op-secure-params=" passwd " op-secure-digest="aa0f9b7caf28600646551adb55bd9b95"/>
</lrm_resource>
<lrm_resource id="Public-IP" class="ocf" provider="heartbeat" type="IPaddr">
<lrm_rsc_op id="Public-IP_last_0" operation_key="Public-IP_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.3.0" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1591717057" last-run="1591717057" exec-time="0" queue-time="0" op-digest="3bb21cd55b79809a3ae69333a8981fd4"/>
</lrm_resource>
<lrm_resource id="Email" class="lsb" type="exim">
<lrm_rsc_op id="Email_last_0" operation_key="Email_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.3.0" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1591717057" last-run="1591717057" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
+ <lrm_resource id="mysql-proxy" class="lsb" type="mysql-proxy">
+ <lrm_rsc_op id="mysql-proxy_last_0" operation_key="mysql-proxy_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.4.1" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1596126852" last-run="1596126852" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="mysql-proxy_monitor_10000" operation_key="mysql-proxy_monitor_10000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.4.1" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="10000" last-rc-change="1596126852" exec-time="0" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
</lrm_resources>
</lrm>
<transient_attributes id="2">
<instance_attributes id="status-2">
<nvpair id="status-2-pingd" name="pingd" value="1000"/>
</instance_attributes>
</transient_attributes>
</node_state>
<node_state id="1" uname="cluster01" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
<lrm id="1">
<lrm_resources>
<lrm_resource id="ping" type="ping" class="ocf" provider="pacemaker">
<lrm_rsc_op id="ping_last_0" operation_key="ping_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="6:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;6:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster01" call-id="17" rc-code="0" op-status="0" interval="0" last-rc-change="1588951272" last-run="1588951272" exec-time="2038" queue-time="0" op-digest="769dd6f95f1494d416ae9dc690960e17"/>
<lrm_rsc_op id="ping_monitor_10000" operation_key="ping_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="7:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;7:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster01" call-id="18" rc-code="0" op-status="0" interval="10000" last-rc-change="1588951274" exec-time="2034" queue-time="0" op-digest="7beffd8be749b787fabea4aef5df21c9"/>
</lrm_resource>
<lrm_resource id="Fencing" type="fence_xvm" class="stonith">
<lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="12:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;12:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster01" call-id="15" rc-code="0" op-status="0" interval="0" last-rc-change="1588951272" last-run="1588951272" exec-time="36" queue-time="0" op-digest="7da16842ab2328e41f737cab5e5fc89c"/>
<lrm_rsc_op id="Fencing_monitor_60000" operation_key="Fencing_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="13:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;13:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster01" call-id="19" rc-code="0" op-status="0" interval="60000" last-rc-change="1588951276" exec-time="24" queue-time="0" op-digest="f85d77708ad4ea02a9099e1e548aff0d"/>
</lrm_resource>
<lrm_resource id="dummy" type="Dummy" class="ocf" provider="pacemaker">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_stop_0" operation="stop" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="3:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;3:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster01" call-id="16" rc-code="0" op-status="0" interval="0" last-rc-change="1588951272" last-run="1588951272" exec-time="6048" queue-time="0" op-digest="aa0f9b7caf28600646551adb55bd9b95" op-force-restart=" envfile op_sleep passwd state " op-restart-digest="aa0f9b7caf28600646551adb55bd9b95" op-secure-params=" passwd " op-secure-digest="aa0f9b7caf28600646551adb55bd9b95"/>
</lrm_resource>
<lrm_resource id="Public-IP" class="ocf" provider="heartbeat" type="IPaddr">
<lrm_rsc_op id="Public-IP_last_0" operation_key="Public-IP_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.3.0" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1591717057" last-run="1591717057" exec-time="0" queue-time="0" op-digest="3bb21cd55b79809a3ae69333a8981fd4"/>
</lrm_resource>
<lrm_resource id="Email" class="lsb" type="exim">
<lrm_rsc_op id="Email_last_0" operation_key="Email_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.3.0" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1591717057" last-run="1591717057" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
+ <lrm_resource id="mysql-proxy" class="lsb" type="mysql-proxy">
+ <lrm_rsc_op id="mysql-proxy_last_0" operation_key="mysql-proxy_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.4.1" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1596126852" last-run="1596126852" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="mysql-proxy_monitor_10000" operation_key="mysql-proxy_monitor_10000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.4.1" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="10000" last-rc-change="1596126852" exec-time="0" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
</lrm_resources>
</lrm>
<transient_attributes id="1">
<instance_attributes id="status-1">
<nvpair id="status-1-pingd" name="pingd" value="1000"/>
</instance_attributes>
</transient_attributes>
</node_state>
</status>
</cib>
diff --git a/cts/cli/regression.crm_mon.exp b/cts/cli/regression.crm_mon.exp
index 8d4779e4f6..b36ffa25f5 100644
--- a/cts/cli/regression.crm_mon.exp
+++ b/cts/cli/regression.crm_mon.exp
@@ -1,2326 +1,3030 @@
=#=#=#= Begin test: Basic text output =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 21 resource instances configured (1 DISABLED)
+ * 26 resource instances configured (1 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Active Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
* Fencing (stonith:fence_xvm): Started cluster01
* dummy (ocf::pacemaker:Dummy): Started cluster02
* Resource Group: exim-group:
* Public-IP (ocf::heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
+ * Clone Set: mysql-clone-group [mysql-group]:
+ * Started: [ cluster01 cluster02 ]
=#=#=#= End test: Basic text output - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output
=#=#=#= Begin test: XML output =#=#=#=
<pacemaker-result api-version="2.2" request="crm_mon --output-as=xml">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
- <resources_configured number="21" disabled="1" blocked="0"/>
+ <resources_configured number="26" disabled="1" blocked="0"/>
<cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
</summary>
<nodes>
- <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="2" type="member"/>
- <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="4" type="member"/>
+ <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
+ <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
<node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
</nodes>
<resources>
<clone id="ping-clone" multi_state="false" unique="false" managed="true" failed="false" failure_ignored="false">
<resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
<resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
</clone>
<resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
<resource id="dummy" resource_agent="ocf::pacemaker:Dummy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
<resource id="inactive-dummy" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<clone id="inactive-clone-master" multi_state="true" unique="false" managed="true" failed="false" failure_ignored="false">
<resource id="inactive-clone" resource_agent="ocf::pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="inactive-clone" resource_agent="ocf::pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</clone>
<bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
<replica id="0">
<resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-0" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-0" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
<replica id="1">
<resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-1" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-1" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
<replica id="2">
<resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-2" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-2" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
</bundle>
<group id="exim-group" number_resources="2">
<resource id="Public-IP" resource_agent="ocf::heartbeat:IPaddr" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
<resource id="Email" resource_agent="lsb:exim" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
</group>
+ <clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" failed="false" failure_ignored="false">
+ <group id="mysql-group:0" number_resources="1">
+ <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+ <node name="cluster02" id="2" cached="true"/>
+ </resource>
+ </group>
+ <group id="mysql-group:1" number_resources="1">
+ <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+ <node name="cluster01" id="1" cached="true"/>
+ </resource>
+ </group>
+ <group id="mysql-group:2" number_resources="1">
+ <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+ </group>
+ <group id="mysql-group:3" number_resources="1">
+ <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+ </group>
+ <group id="mysql-group:4" number_resources="1">
+ <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+ </group>
+ </clone>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history>
<node name="cluster02">
<resource_history id="ping" orphan="false" migration-threshold="1000000">
<operation_history call="11" task="start" exec-time="2044ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="12" task="monitor" interval="10000ms" exec-time="2031ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="dummy" orphan="false" migration-threshold="1000000">
<operation_history call="18" task="start" exec-time="6020ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="19" task="monitor" interval="60000ms" exec-time="6015ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="Public-IP" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="Email" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
+ <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
+ <operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+ <operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+ </resource_history>
</node>
<node name="cluster01">
<resource_history id="ping" orphan="false" migration-threshold="1000000">
<operation_history call="17" task="start" exec-time="2038ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="18" task="monitor" interval="10000ms" exec-time="2034ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="Fencing" orphan="false" migration-threshold="1000000">
<operation_history call="15" task="start" exec-time="36ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="19" task="monitor" interval="60000ms" exec-time="24ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="dummy" orphan="false" migration-threshold="1000000">
<operation_history call="16" task="stop" exec-time="6048ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
+ <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
+ <operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+ <operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+ </resource_history>
</node>
</node_history>
<bans>
<ban id="not-on-cluster1" resource="dummy" node="cluster01" weight="-1000000" master_only="false"/>
</bans>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output - OK (0) =#=#=#=
* Passed: crm_mon - XML output
=#=#=#= Begin test: Basic text output without node section =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 21 resource instances configured (1 DISABLED)
+ * 26 resource instances configured (1 DISABLED)
Active Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
* Fencing (stonith:fence_xvm): Started cluster01
* dummy (ocf::pacemaker:Dummy): Started cluster02
* Resource Group: exim-group:
* Public-IP (ocf::heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
+ * Clone Set: mysql-clone-group [mysql-group]:
+ * Started: [ cluster01 cluster02 ]
=#=#=#= End test: Basic text output without node section - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output without node section
=#=#=#= Begin test: XML output without the node section =#=#=#=
<pacemaker-result api-version="2.2" request="crm_mon --output-as=xml --exclude=nodes">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
- <resources_configured number="21" disabled="1" blocked="0"/>
+ <resources_configured number="26" disabled="1" blocked="0"/>
<cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
</summary>
<resources>
<clone id="ping-clone" multi_state="false" unique="false" managed="true" failed="false" failure_ignored="false">
<resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
<resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
</clone>
<resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
<resource id="dummy" resource_agent="ocf::pacemaker:Dummy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
<resource id="inactive-dummy" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<clone id="inactive-clone-master" multi_state="true" unique="false" managed="true" failed="false" failure_ignored="false">
<resource id="inactive-clone" resource_agent="ocf::pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="inactive-clone" resource_agent="ocf::pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</clone>
<bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
<replica id="0">
<resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-0" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-0" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
<replica id="1">
<resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-1" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-1" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
<replica id="2">
<resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-2" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-2" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
</bundle>
<group id="exim-group" number_resources="2">
<resource id="Public-IP" resource_agent="ocf::heartbeat:IPaddr" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
<resource id="Email" resource_agent="lsb:exim" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
</group>
+ <clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" failed="false" failure_ignored="false">
+ <group id="mysql-group:0" number_resources="1">
+ <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+ <node name="cluster02" id="2" cached="true"/>
+ </resource>
+ </group>
+ <group id="mysql-group:1" number_resources="1">
+ <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+ <node name="cluster01" id="1" cached="true"/>
+ </resource>
+ </group>
+ <group id="mysql-group:2" number_resources="1">
+ <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+ </group>
+ <group id="mysql-group:3" number_resources="1">
+ <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+ </group>
+ <group id="mysql-group:4" number_resources="1">
+ <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+ </group>
+ </clone>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history>
<node name="cluster02">
<resource_history id="ping" orphan="false" migration-threshold="1000000">
<operation_history call="11" task="start" exec-time="2044ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="12" task="monitor" interval="10000ms" exec-time="2031ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="dummy" orphan="false" migration-threshold="1000000">
<operation_history call="18" task="start" exec-time="6020ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="19" task="monitor" interval="60000ms" exec-time="6015ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="Public-IP" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="Email" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
+ <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
+ <operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+ <operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+ </resource_history>
</node>
<node name="cluster01">
<resource_history id="ping" orphan="false" migration-threshold="1000000">
<operation_history call="17" task="start" exec-time="2038ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="18" task="monitor" interval="10000ms" exec-time="2034ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="Fencing" orphan="false" migration-threshold="1000000">
<operation_history call="15" task="start" exec-time="36ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="19" task="monitor" interval="60000ms" exec-time="24ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="dummy" orphan="false" migration-threshold="1000000">
<operation_history call="16" task="stop" exec-time="6048ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
+ <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
+ <operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+ <operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+ </resource_history>
</node>
</node_history>
<bans>
<ban id="not-on-cluster1" resource="dummy" node="cluster01" weight="-1000000" master_only="false"/>
</bans>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output without the node section - OK (0) =#=#=#=
* Passed: crm_mon - XML output without the node section
=#=#=#= Begin test: Text output with only the node section =#=#=#=
Node List:
* Online: [ cluster01 cluster02 ]
=#=#=#= End test: Text output with only the node section - OK (0) =#=#=#=
* Passed: crm_mon - Text output with only the node section
=#=#=#= Begin test: Complete text output =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 21 resource instances configured (1 DISABLED)
+ * 26 resource instances configured (1 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Active Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
* Fencing (stonith:fence_xvm): Started cluster01
* dummy (ocf::pacemaker:Dummy): Started cluster02
* Resource Group: exim-group:
* Public-IP (ocf::heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
+ * Clone Set: mysql-clone-group [mysql-group]:
+ * Started: [ cluster01 cluster02 ]
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* dummy: migration-threshold=1000000:
* (18) start
* (19) monitor: interval="60000ms"
* Public-IP: migration-threshold=1000000:
* (2) start
* Email: migration-threshold=1000000:
* (2) start
+ * mysql-proxy: migration-threshold=1000000:
+ * (2) start
+ * (3) monitor: interval="10000ms"
* Node: cluster01:
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
* Fencing: migration-threshold=1000000:
* (15) start
* (19) monitor: interval="60000ms"
* dummy: migration-threshold=1000000:
* (16) stop
+ * mysql-proxy: migration-threshold=1000000:
+ * (2) start
+ * (3) monitor: interval="10000ms"
Negative Location Constraints:
* not-on-cluster1 prevents dummy from running on cluster01
=#=#=#= End test: Complete text output - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output
=#=#=#= Begin test: Complete text output with detail =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (2) (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 21 resource instances configured (1 DISABLED)
+ * 26 resource instances configured (1 DISABLED)
Node List:
* Online: [ cluster01 (1) cluster02 (2) ]
Active Resources:
* Clone Set: ping-clone [ping]:
* ping (ocf::pacemaker:ping): Started cluster02
* ping (ocf::pacemaker:ping): Started cluster01
* Fencing (stonith:fence_xvm): Started cluster01
* dummy (ocf::pacemaker:Dummy): Started cluster02
* Resource Group: exim-group:
* Public-IP (ocf::heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
+ * Clone Set: mysql-clone-group [mysql-group]:
+ * Resource Group: mysql-group:0:
+ * mysql-proxy (lsb:mysql-proxy): Started cluster02
+ * Resource Group: mysql-group:1:
+ * mysql-proxy (lsb:mysql-proxy): Started cluster01
+ * Resource Group: mysql-group:2:
+ * mysql-proxy (lsb:mysql-proxy): Stopped
+ * Resource Group: mysql-group:3:
+ * mysql-proxy (lsb:mysql-proxy): Stopped
+ * Resource Group: mysql-group:4:
+ * mysql-proxy (lsb:mysql-proxy): Stopped
Node Attributes:
* Node: cluster01 (1):
* location : office
* pingd : 1000
* Node: cluster02 (2):
* pingd : 1000
Operations:
* Node: cluster02 (2):
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* dummy: migration-threshold=1000000:
* (18) start
* (19) monitor: interval="60000ms"
* Public-IP: migration-threshold=1000000:
* (2) start
* Email: migration-threshold=1000000:
* (2) start
+ * mysql-proxy: migration-threshold=1000000:
+ * (2) start
+ * (3) monitor: interval="10000ms"
* Node: cluster01 (1):
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
* Fencing: migration-threshold=1000000:
* (15) start
* (19) monitor: interval="60000ms"
* dummy: migration-threshold=1000000:
* (16) stop
+ * mysql-proxy: migration-threshold=1000000:
+ * (2) start
+ * (3) monitor: interval="10000ms"
Negative Location Constraints:
* not-on-cluster1 prevents dummy from running on cluster01 (1)
=#=#=#= End test: Complete text output with detail - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output with detail
=#=#=#= Begin test: Complete brief text output =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 21 resource instances configured (1 DISABLED)
+ * 26 resource instances configured (1 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Active Resources:
* 1 (ocf::pacemaker:Dummy): Active cluster02
* 1 (stonith:fence_xvm): Active cluster01
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
* Resource Group: exim-group:
* 1/1 (lsb:exim): Active cluster02
* 1/1 (ocf::heartbeat:IPaddr): Active cluster02
+ * Clone Set: mysql-clone-group [mysql-group]:
+ * Started: [ cluster01 cluster02 ]
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* dummy: migration-threshold=1000000:
* (18) start
* (19) monitor: interval="60000ms"
* Public-IP: migration-threshold=1000000:
* (2) start
* Email: migration-threshold=1000000:
* (2) start
+ * mysql-proxy: migration-threshold=1000000:
+ * (2) start
+ * (3) monitor: interval="10000ms"
* Node: cluster01:
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
* Fencing: migration-threshold=1000000:
* (15) start
* (19) monitor: interval="60000ms"
* dummy: migration-threshold=1000000:
* (16) stop
+ * mysql-proxy: migration-threshold=1000000:
+ * (2) start
+ * (3) monitor: interval="10000ms"
Negative Location Constraints:
* not-on-cluster1 prevents dummy from running on cluster01
=#=#=#= End test: Complete brief text output - OK (0) =#=#=#=
* Passed: crm_mon - Complete brief text output
=#=#=#= Begin test: Complete text output grouped by node =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 21 resource instances configured (1 DISABLED)
+ * 26 resource instances configured (1 DISABLED)
Node List:
* Node cluster01: online:
* Resources:
* ping (ocf::pacemaker:ping): Started
* Fencing (stonith:fence_xvm): Started
+ * mysql-proxy (lsb:mysql-proxy): Started
* Node cluster02: online:
* Resources:
* ping (ocf::pacemaker:ping): Started
* dummy (ocf::pacemaker:Dummy): Started
* Public-IP (ocf::heartbeat:IPaddr): Started
* Email (lsb:exim): Started
+ * mysql-proxy (lsb:mysql-proxy): Started
* GuestNode httpd-bundle-0@: OFFLINE:
* Resources:
* GuestNode httpd-bundle-1@: OFFLINE:
* Resources:
* GuestNode httpd-bundle-2@: OFFLINE:
* Resources:
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* dummy: migration-threshold=1000000:
* (18) start
* (19) monitor: interval="60000ms"
* Public-IP: migration-threshold=1000000:
* (2) start
* Email: migration-threshold=1000000:
* (2) start
+ * mysql-proxy: migration-threshold=1000000:
+ * (2) start
+ * (3) monitor: interval="10000ms"
* Node: cluster01:
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
* Fencing: migration-threshold=1000000:
* (15) start
* (19) monitor: interval="60000ms"
* dummy: migration-threshold=1000000:
* (16) stop
+ * mysql-proxy: migration-threshold=1000000:
+ * (2) start
+ * (3) monitor: interval="10000ms"
Negative Location Constraints:
* not-on-cluster1 prevents dummy from running on cluster01
=#=#=#= End test: Complete text output grouped by node - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output grouped by node
=#=#=#= Begin test: Complete brief text output grouped by node =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 21 resource instances configured (1 DISABLED)
+ * 26 resource instances configured (1 DISABLED)
Node List:
* Node cluster01: online:
* Resources:
+ * 1 (lsb:mysql-proxy): Active
* 1 (ocf::pacemaker:ping): Active
* 1 (stonith:fence_xvm): Active
* Node cluster02: online:
* Resources:
* 1 (lsb:exim): Active
+ * 1 (lsb:mysql-proxy): Active
* 1 (ocf::heartbeat:IPaddr): Active
* 1 (ocf::pacemaker:Dummy): Active
* 1 (ocf::pacemaker:ping): Active
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* dummy: migration-threshold=1000000:
* (18) start
* (19) monitor: interval="60000ms"
* Public-IP: migration-threshold=1000000:
* (2) start
* Email: migration-threshold=1000000:
* (2) start
+ * mysql-proxy: migration-threshold=1000000:
+ * (2) start
+ * (3) monitor: interval="10000ms"
* Node: cluster01:
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
* Fencing: migration-threshold=1000000:
* (15) start
* (19) monitor: interval="60000ms"
* dummy: migration-threshold=1000000:
* (16) stop
+ * mysql-proxy: migration-threshold=1000000:
+ * (2) start
+ * (3) monitor: interval="10000ms"
Negative Location Constraints:
* not-on-cluster1 prevents dummy from running on cluster01
=#=#=#= End test: Complete brief text output grouped by node - OK (0) =#=#=#=
* Passed: crm_mon - Complete brief text output grouped by node
=#=#=#= Begin test: XML output grouped by node =#=#=#=
<pacemaker-result api-version="2.2" request="crm_mon -1 --output-as=xml --group-by-node">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
- <resources_configured number="21" disabled="1" blocked="0"/>
+ <resources_configured number="26" disabled="1" blocked="0"/>
<cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
</summary>
<nodes>
- <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="2" type="member">
+ <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member">
<resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
<resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
+ <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+ <node name="cluster01" id="1" cached="true"/>
+ </resource>
</node>
- <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="4" type="member">
+ <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member">
<resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
<resource id="dummy" resource_agent="ocf::pacemaker:Dummy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
<resource id="Public-IP" resource_agent="ocf::heartbeat:IPaddr" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
<resource id="Email" resource_agent="lsb:exim" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
+ <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+ <node name="cluster02" id="2" cached="true"/>
+ </resource>
</node>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
<node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
</nodes>
<resources>
<resource id="inactive-dummy" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<clone id="inactive-clone-master" multi_state="true" unique="false" managed="true" failed="false" failure_ignored="false">
<resource id="inactive-clone" resource_agent="ocf::pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="inactive-clone" resource_agent="ocf::pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</clone>
<bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
<replica id="0">
<resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-0" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-0" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
<replica id="1">
<resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-1" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-1" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
<replica id="2">
<resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-2" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-2" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
</bundle>
+ <clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" failed="false" failure_ignored="false">
+ <group id="mysql-group:0" number_resources="1">
+ <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+ <node name="cluster02" id="2" cached="true"/>
+ </resource>
+ </group>
+ <group id="mysql-group:1" number_resources="1">
+ <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+ <node name="cluster01" id="1" cached="true"/>
+ </resource>
+ </group>
+ <group id="mysql-group:2" number_resources="1">
+ <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+ </group>
+ <group id="mysql-group:3" number_resources="1">
+ <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+ </group>
+ <group id="mysql-group:4" number_resources="1">
+ <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+ </group>
+ </clone>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history>
<node name="cluster02">
<resource_history id="ping" orphan="false" migration-threshold="1000000">
<operation_history call="11" task="start" exec-time="2044ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="12" task="monitor" interval="10000ms" exec-time="2031ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="dummy" orphan="false" migration-threshold="1000000">
<operation_history call="18" task="start" exec-time="6020ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="19" task="monitor" interval="60000ms" exec-time="6015ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="Public-IP" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="Email" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
+ <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
+ <operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+ <operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+ </resource_history>
</node>
<node name="cluster01">
<resource_history id="ping" orphan="false" migration-threshold="1000000">
<operation_history call="17" task="start" exec-time="2038ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="18" task="monitor" interval="10000ms" exec-time="2034ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="Fencing" orphan="false" migration-threshold="1000000">
<operation_history call="15" task="start" exec-time="36ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="19" task="monitor" interval="60000ms" exec-time="24ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="dummy" orphan="false" migration-threshold="1000000">
<operation_history call="16" task="stop" exec-time="6048ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
+ <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
+ <operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+ <operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+ </resource_history>
</node>
</node_history>
<bans>
<ban id="not-on-cluster1" resource="dummy" node="cluster01" weight="-1000000" master_only="false"/>
</bans>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output grouped by node - OK (0) =#=#=#=
* Passed: crm_mon - XML output grouped by node
=#=#=#= Begin test: Complete text output filtered by node =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 21 resource instances configured (1 DISABLED)
+ * 26 resource instances configured (1 DISABLED)
Node List:
* Online: [ cluster01 ]
Active Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 ]
* Fencing (stonith:fence_xvm): Started cluster01
+ * Clone Set: mysql-clone-group [mysql-group]:
+ * Started: [ cluster01 ]
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
Operations:
* Node: cluster01:
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
* Fencing: migration-threshold=1000000:
* (15) start
* (19) monitor: interval="60000ms"
* dummy: migration-threshold=1000000:
* (16) stop
+ * mysql-proxy: migration-threshold=1000000:
+ * (2) start
+ * (3) monitor: interval="10000ms"
Negative Location Constraints:
* not-on-cluster1 prevents dummy from running on cluster01
=#=#=#= End test: Complete text output filtered by node - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output filtered by node
=#=#=#= Begin test: XML output filtered by node =#=#=#=
<pacemaker-result api-version="2.2" request="crm_mon --output-as xml --include=all --node=cluster01">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
- <resources_configured number="21" disabled="1" blocked="0"/>
+ <resources_configured number="26" disabled="1" blocked="0"/>
<cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
</summary>
<nodes>
- <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="2" type="member"/>
+ <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
</nodes>
<resources>
<clone id="ping-clone" multi_state="false" unique="false" managed="true" failed="false" failure_ignored="false">
<resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
</clone>
<resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
<resource id="inactive-dummy" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<clone id="inactive-clone-master" multi_state="true" unique="false" managed="true" failed="false" failure_ignored="false">
<resource id="inactive-clone" resource_agent="ocf::pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="inactive-clone" resource_agent="ocf::pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</clone>
<bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
<replica id="0">
<resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-0" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-0" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
<replica id="1">
<resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-1" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-1" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
<replica id="2">
<resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-2" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-2" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
</bundle>
+ <clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" failed="false" failure_ignored="false">
+ <group id="mysql-group:1" number_resources="1">
+ <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+ <node name="cluster01" id="1" cached="true"/>
+ </resource>
+ </group>
+ <group id="mysql-group:2" number_resources="1">
+ <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+ </group>
+ <group id="mysql-group:3" number_resources="1">
+ <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+ </group>
+ <group id="mysql-group:4" number_resources="1">
+ <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+ </group>
+ </clone>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history>
<node name="cluster01">
<resource_history id="ping" orphan="false" migration-threshold="1000000">
<operation_history call="17" task="start" exec-time="2038ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="18" task="monitor" interval="10000ms" exec-time="2034ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="Fencing" orphan="false" migration-threshold="1000000">
<operation_history call="15" task="start" exec-time="36ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="19" task="monitor" interval="60000ms" exec-time="24ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="dummy" orphan="false" migration-threshold="1000000">
<operation_history call="16" task="stop" exec-time="6048ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
+ <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
+ <operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+ <operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+ </resource_history>
</node>
</node_history>
<bans>
<ban id="not-on-cluster1" resource="dummy" node="cluster01" weight="-1000000" master_only="false"/>
</bans>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output filtered by node - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by node
=#=#=#= Begin test: Complete text output filtered by tag =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 21 resource instances configured (1 DISABLED)
+ * 26 resource instances configured (1 DISABLED)
Node List:
* Online: [ cluster02 ]
Active Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster02 ]
* dummy (ocf::pacemaker:Dummy): Started cluster02
* Resource Group: exim-group:
* Public-IP (ocf::heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
+ * Clone Set: mysql-clone-group [mysql-group]:
+ * Started: [ cluster02 ]
Node Attributes:
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* dummy: migration-threshold=1000000:
* (18) start
* (19) monitor: interval="60000ms"
* Public-IP: migration-threshold=1000000:
* (2) start
* Email: migration-threshold=1000000:
* (2) start
+ * mysql-proxy: migration-threshold=1000000:
+ * (2) start
+ * (3) monitor: interval="10000ms"
Negative Location Constraints:
* not-on-cluster1 prevents dummy from running on cluster01
=#=#=#= End test: Complete text output filtered by tag - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output filtered by tag
=#=#=#= Begin test: XML output filtered by tag =#=#=#=
<pacemaker-result api-version="2.2" request="crm_mon --output-as=xml --include=all --node=even-nodes">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
- <resources_configured number="21" disabled="1" blocked="0"/>
+ <resources_configured number="26" disabled="1" blocked="0"/>
<cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
</summary>
<nodes>
- <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="4" type="member"/>
+ <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
</nodes>
<resources>
<clone id="ping-clone" multi_state="false" unique="false" managed="true" failed="false" failure_ignored="false">
<resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
</clone>
<resource id="dummy" resource_agent="ocf::pacemaker:Dummy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
<resource id="inactive-dummy" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<clone id="inactive-clone-master" multi_state="true" unique="false" managed="true" failed="false" failure_ignored="false">
<resource id="inactive-clone" resource_agent="ocf::pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="inactive-clone" resource_agent="ocf::pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</clone>
<bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
<replica id="0">
<resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-0" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-0" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
<replica id="1">
<resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-1" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-1" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
<replica id="2">
<resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-2" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-2" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
</bundle>
<group id="exim-group" number_resources="2">
<resource id="Public-IP" resource_agent="ocf::heartbeat:IPaddr" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
<resource id="Email" resource_agent="lsb:exim" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
</group>
+ <clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" failed="false" failure_ignored="false">
+ <group id="mysql-group:0" number_resources="1">
+ <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+ <node name="cluster02" id="2" cached="true"/>
+ </resource>
+ </group>
+ <group id="mysql-group:2" number_resources="1">
+ <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+ </group>
+ <group id="mysql-group:3" number_resources="1">
+ <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+ </group>
+ <group id="mysql-group:4" number_resources="1">
+ <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+ </group>
+ </clone>
</resources>
<node_attributes>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history>
<node name="cluster02">
<resource_history id="ping" orphan="false" migration-threshold="1000000">
<operation_history call="11" task="start" exec-time="2044ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="12" task="monitor" interval="10000ms" exec-time="2031ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="dummy" orphan="false" migration-threshold="1000000">
<operation_history call="18" task="start" exec-time="6020ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="19" task="monitor" interval="60000ms" exec-time="6015ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="Public-IP" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="Email" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
+ <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
+ <operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+ <operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+ </resource_history>
</node>
</node_history>
<bans>
<ban id="not-on-cluster1" resource="dummy" node="cluster01" weight="-1000000" master_only="false"/>
</bans>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output filtered by tag - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by tag
=#=#=#= Begin test: Complete text output filtered by resource tag =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 21 resource instances configured (1 DISABLED)
+ * 26 resource instances configured (1 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Active Resources:
* Fencing (stonith:fence_xvm): Started cluster01
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster01:
* Fencing: migration-threshold=1000000:
* (15) start
* (19) monitor: interval="60000ms"
=#=#=#= End test: Complete text output filtered by resource tag - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output filtered by resource tag
=#=#=#= Begin test: XML output filtered by resource tag =#=#=#=
<pacemaker-result api-version="2.2" request="crm_mon --output-as=xml --include=all --resource=fencing-rscs">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
- <resources_configured number="21" disabled="1" blocked="0"/>
+ <resources_configured number="26" disabled="1" blocked="0"/>
<cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
</summary>
<nodes>
- <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="2" type="member"/>
- <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="4" type="member"/>
+ <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
+ <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
<node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
</nodes>
<resources>
<resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history>
<node name="cluster01">
<resource_history id="Fencing" orphan="false" migration-threshold="1000000">
<operation_history call="15" task="start" exec-time="36ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="19" task="monitor" interval="60000ms" exec-time="24ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
</node_history>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output filtered by resource tag - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by resource tag
=#=#=#= Begin test: Basic text output filtered by node that doesn't exist =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 21 resource instances configured (1 DISABLED)
+ * 26 resource instances configured (1 DISABLED)
Active Resources:
* No active resources
=#=#=#= End test: Basic text output filtered by node that doesn't exist - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output filtered by node that doesn't exist
=#=#=#= Begin test: XML output filtered by node that doesn't exist =#=#=#=
<pacemaker-result api-version="2.2" request="crm_mon --output-as=xml --node=blah">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
- <resources_configured number="21" disabled="1" blocked="0"/>
+ <resources_configured number="26" disabled="1" blocked="0"/>
<cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
</summary>
<nodes/>
<resources>
<resource id="inactive-dummy" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<clone id="inactive-clone-master" multi_state="true" unique="false" managed="true" failed="false" failure_ignored="false">
<resource id="inactive-clone" resource_agent="ocf::pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="inactive-clone" resource_agent="ocf::pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</clone>
<bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
<replica id="0">
<resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-0" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-0" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
<replica id="1">
<resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-1" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-1" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
<replica id="2">
<resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-2" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-2" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
</bundle>
</resources>
<bans>
<ban id="not-on-cluster1" resource="dummy" node="cluster01" weight="-1000000" master_only="false"/>
</bans>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output filtered by node that doesn't exist - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by node that doesn't exist
=#=#=#= Begin test: Basic text output with inactive resources =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 21 resource instances configured (1 DISABLED)
+ * 26 resource instances configured (1 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Full List of Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
* Fencing (stonith:fence_xvm): Started cluster01
* dummy (ocf::pacemaker:Dummy): Started cluster02
* inactive-dummy (ocf::pacemaker:Dummy): Stopped (disabled)
* Clone Set: inactive-clone-master [inactive-clone] (promotable):
* Stopped: [ cluster01 cluster02 ]
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf::heartbeat:apache): Stopped
* httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped
* httpd-bundle-2 (192.168.122.133) (ocf::heartbeat:apache): Stopped
* Resource Group: exim-group:
* Public-IP (ocf::heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
+ * Clone Set: mysql-clone-group [mysql-group]:
+ * Started: [ cluster01 cluster02 ]
=#=#=#= End test: Basic text output with inactive resources - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output with inactive resources
=#=#=#= Begin test: Basic text output with inactive resources, filtered by node =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 21 resource instances configured (1 DISABLED)
+ * 26 resource instances configured (1 DISABLED)
Node List:
* Online: [ cluster02 ]
Full List of Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster02 ]
* dummy (ocf::pacemaker:Dummy): Started cluster02
* inactive-dummy (ocf::pacemaker:Dummy): Stopped (disabled)
* Clone Set: inactive-clone-master [inactive-clone] (promotable):
* Stopped: [ cluster02 ]
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf::heartbeat:apache): Stopped
* httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped
* httpd-bundle-2 (192.168.122.133) (ocf::heartbeat:apache): Stopped
* Resource Group: exim-group:
* Public-IP (ocf::heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
+ * Clone Set: mysql-clone-group [mysql-group]:
+ * Started: [ cluster02 ]
=#=#=#= End test: Basic text output with inactive resources, filtered by node - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output with inactive resources, filtered by node
=#=#=#= Begin test: Complete text output filtered by primitive resource =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 21 resource instances configured (1 DISABLED)
+ * 26 resource instances configured (1 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Active Resources:
* Fencing (stonith:fence_xvm): Started cluster01
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster01:
* Fencing: migration-threshold=1000000:
* (15) start
* (19) monitor: interval="60000ms"
=#=#=#= End test: Complete text output filtered by primitive resource - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output filtered by primitive resource
=#=#=#= Begin test: XML output filtered by primitive resource =#=#=#=
<pacemaker-result api-version="2.2" request="crm_mon --output-as=xml --resource=Fencing">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
- <resources_configured number="21" disabled="1" blocked="0"/>
+ <resources_configured number="26" disabled="1" blocked="0"/>
<cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
</summary>
<nodes>
- <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="2" type="member"/>
- <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="4" type="member"/>
+ <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
+ <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
<node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
</nodes>
<resources>
<resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history>
<node name="cluster01">
<resource_history id="Fencing" orphan="false" migration-threshold="1000000">
<operation_history call="15" task="start" exec-time="36ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="19" task="monitor" interval="60000ms" exec-time="24ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
</node_history>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output filtered by primitive resource - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by primitive resource
=#=#=#= Begin test: Complete text output filtered by group resource =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 21 resource instances configured (1 DISABLED)
+ * 26 resource instances configured (1 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Active Resources:
* Resource Group: exim-group:
* Public-IP (ocf::heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* Public-IP: migration-threshold=1000000:
* (2) start
* Email: migration-threshold=1000000:
* (2) start
=#=#=#= End test: Complete text output filtered by group resource - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output filtered by group resource
=#=#=#= Begin test: XML output filtered by group resource =#=#=#=
<pacemaker-result api-version="2.2" request="crm_mon --output-as=xml --resource=exim-group">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
- <resources_configured number="21" disabled="1" blocked="0"/>
+ <resources_configured number="26" disabled="1" blocked="0"/>
<cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
</summary>
<nodes>
- <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="2" type="member"/>
- <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="4" type="member"/>
+ <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
+ <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
<node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
</nodes>
<resources>
<group id="exim-group" number_resources="2">
<resource id="Public-IP" resource_agent="ocf::heartbeat:IPaddr" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
<resource id="Email" resource_agent="lsb:exim" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
</group>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history>
<node name="cluster02">
<resource_history id="Public-IP" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="Email" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
</node_history>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output filtered by group resource - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by group resource
=#=#=#= Begin test: Complete text output filtered by group resource member =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 21 resource instances configured (1 DISABLED)
+ * 26 resource instances configured (1 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Active Resources:
* Resource Group: exim-group:
* Public-IP (ocf::heartbeat:IPaddr): Started cluster02
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* Public-IP: migration-threshold=1000000:
* (2) start
=#=#=#= End test: Complete text output filtered by group resource member - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output filtered by group resource member
=#=#=#= Begin test: XML output filtered by group resource member =#=#=#=
<pacemaker-result api-version="2.2" request="crm_mon --output-as=xml --resource=Email">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
- <resources_configured number="21" disabled="1" blocked="0"/>
+ <resources_configured number="26" disabled="1" blocked="0"/>
<cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
</summary>
<nodes>
- <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="2" type="member"/>
- <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="4" type="member"/>
+ <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
+ <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
<node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
</nodes>
<resources>
<group id="exim-group" number_resources="2">
<resource id="Email" resource_agent="lsb:exim" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
</group>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history>
<node name="cluster02">
<resource_history id="Email" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
</node_history>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output filtered by group resource member - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by group resource member
=#=#=#= Begin test: Complete text output filtered by clone resource =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 21 resource instances configured (1 DISABLED)
+ * 26 resource instances configured (1 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Active Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* Node: cluster01:
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
=#=#=#= End test: Complete text output filtered by clone resource - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output filtered by clone resource
=#=#=#= Begin test: XML output filtered by clone resource =#=#=#=
<pacemaker-result api-version="2.2" request="crm_mon --output-as=xml --resource=ping-clone">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
- <resources_configured number="21" disabled="1" blocked="0"/>
+ <resources_configured number="26" disabled="1" blocked="0"/>
<cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
</summary>
<nodes>
- <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="2" type="member"/>
- <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="4" type="member"/>
+ <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
+ <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
<node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
</nodes>
<resources>
<clone id="ping-clone" multi_state="false" unique="false" managed="true" failed="false" failure_ignored="false">
<resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
<resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
</clone>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history>
<node name="cluster02">
<resource_history id="ping" orphan="false" migration-threshold="1000000">
<operation_history call="11" task="start" exec-time="2044ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="12" task="monitor" interval="10000ms" exec-time="2031ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
<node name="cluster01">
<resource_history id="ping" orphan="false" migration-threshold="1000000">
<operation_history call="17" task="start" exec-time="2038ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="18" task="monitor" interval="10000ms" exec-time="2034ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
</node_history>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output filtered by clone resource - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by clone resource
=#=#=#= Begin test: Complete text output filtered by clone resource instance =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 21 resource instances configured (1 DISABLED)
+ * 26 resource instances configured (1 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Active Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* Node: cluster01:
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
=#=#=#= End test: Complete text output filtered by clone resource instance - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output filtered by clone resource instance
=#=#=#= Begin test: XML output filtered by clone resource instance =#=#=#=
<pacemaker-result api-version="2.2" request="crm_mon --output-as=xml --resource=ping">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
- <resources_configured number="21" disabled="1" blocked="0"/>
+ <resources_configured number="26" disabled="1" blocked="0"/>
<cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
</summary>
<nodes>
- <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="2" type="member"/>
- <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="4" type="member"/>
+ <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
+ <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
<node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
</nodes>
<resources>
<clone id="ping-clone" multi_state="false" unique="false" managed="true" failed="false" failure_ignored="false">
<resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
<resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
</clone>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history>
<node name="cluster02">
<resource_history id="ping" orphan="false" migration-threshold="1000000">
<operation_history call="11" task="start" exec-time="2044ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="12" task="monitor" interval="10000ms" exec-time="2031ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
<node name="cluster01">
<resource_history id="ping" orphan="false" migration-threshold="1000000">
<operation_history call="17" task="start" exec-time="2038ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="18" task="monitor" interval="10000ms" exec-time="2034ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
</node_history>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output filtered by clone resource instance - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by clone resource instance
=#=#=#= Begin test: Complete text output filtered by exact clone resource instance =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (2) (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 21 resource instances configured (1 DISABLED)
+ * 26 resource instances configured (1 DISABLED)
Node List:
* Online: [ cluster01 (1) cluster02 (2) ]
Active Resources:
* Clone Set: ping-clone [ping]:
* ping (ocf::pacemaker:ping): Started cluster02
Node Attributes:
* Node: cluster01 (1):
* location : office
* pingd : 1000
* Node: cluster02 (2):
* pingd : 1000
Operations:
* Node: cluster02 (2):
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* Node: cluster01 (1):
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
=#=#=#= End test: Complete text output filtered by exact clone resource instance - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output filtered by exact clone resource instance
=#=#=#= Begin test: XML output filtered by exact clone resource instance =#=#=#=
<pacemaker-result api-version="2.2" request="crm_mon --output-as=xml --resource=ping:1">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
- <resources_configured number="21" disabled="1" blocked="0"/>
+ <resources_configured number="26" disabled="1" blocked="0"/>
<cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
</summary>
<nodes>
- <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="2" type="member"/>
- <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="4" type="member"/>
+ <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
+ <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
<node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
</nodes>
<resources>
<clone id="ping-clone" multi_state="false" unique="false" managed="true" failed="false" failure_ignored="false">
<resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
</clone>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
- <node_history/>
+ <node_history>
+ <node name="cluster02">
+ <resource_history id="ping" orphan="false" migration-threshold="1000000">
+ <operation_history call="11" task="start" exec-time="2044ms" queue-time="0ms" rc="0" rc_text="ok"/>
+ <operation_history call="12" task="monitor" interval="10000ms" exec-time="2031ms" queue-time="0ms" rc="0" rc_text="ok"/>
+ </resource_history>
+ </node>
+ <node name="cluster01">
+ <resource_history id="ping" orphan="false" migration-threshold="1000000">
+ <operation_history call="17" task="start" exec-time="2038ms" queue-time="0ms" rc="0" rc_text="ok"/>
+ <operation_history call="18" task="monitor" interval="10000ms" exec-time="2034ms" queue-time="0ms" rc="0" rc_text="ok"/>
+ </resource_history>
+ </node>
+ </node_history>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output filtered by exact clone resource instance - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by exact clone resource instance
=#=#=#= Begin test: Basic text output filtered by resource that doesn't exist =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 21 resource instances configured (1 DISABLED)
+ * 26 resource instances configured (1 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Active Resources:
* No active resources
=#=#=#= End test: Basic text output filtered by resource that doesn't exist - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output filtered by resource that doesn't exist
=#=#=#= Begin test: XML output filtered by resource that doesn't exist =#=#=#=
<pacemaker-result api-version="2.2" request="crm_mon --output-as=xml --resource=blah">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
- <resources_configured number="21" disabled="1" blocked="0"/>
+ <resources_configured number="26" disabled="1" blocked="0"/>
<cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
</summary>
<nodes>
- <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="2" type="member"/>
- <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="4" type="member"/>
+ <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
+ <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
<node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
</nodes>
<resources/>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history/>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output filtered by resource that doesn't exist - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by resource that doesn't exist
=#=#=#= Begin test: Basic text output with inactive resources, filtered by tag =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 21 resource instances configured (1 DISABLED)
+ * 26 resource instances configured (1 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Full List of Resources:
* inactive-dummy (ocf::pacemaker:Dummy): Stopped (disabled)
* Clone Set: inactive-clone-master [inactive-clone] (promotable):
* Stopped: [ cluster01 cluster02 ]
=#=#=#= End test: Basic text output with inactive resources, filtered by tag - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output with inactive resources, filtered by tag
=#=#=#= Begin test: Basic text output with inactive resources, filtered by bundle resource =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 21 resource instances configured (1 DISABLED)
+ * 26 resource instances configured (1 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Full List of Resources:
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf::heartbeat:apache): Stopped
* httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped
* httpd-bundle-2 (192.168.122.133) (ocf::heartbeat:apache): Stopped
=#=#=#= End test: Basic text output with inactive resources, filtered by bundle resource - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output with inactive resources, filtered by bundle resource
=#=#=#= Begin test: XML output filtered by inactive bundle resource =#=#=#=
<pacemaker-result api-version="2.2" request="crm_mon --output-as=xml --resource=httpd-bundle">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
- <resources_configured number="21" disabled="1" blocked="0"/>
+ <resources_configured number="26" disabled="1" blocked="0"/>
<cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
</summary>
<nodes>
- <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="2" type="member"/>
- <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="4" type="member"/>
+ <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
+ <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
<node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
</nodes>
<resources>
<bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
<replica id="0">
<resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-0" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-0" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
<replica id="1">
<resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-1" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-1" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
<replica id="2">
<resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-2" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-2" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
</bundle>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history/>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output filtered by inactive bundle resource - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by inactive bundle resource
=#=#=#= Begin test: Basic text output with inactive resources, filtered by bundled IP address resource =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 21 resource instances configured (1 DISABLED)
+ * 26 resource instances configured (1 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Full List of Resources:
* Container bundle set: httpd-bundle [pcmk:http]:
* Replica[0]
* httpd-bundle-ip-192.168.122.131 (ocf::heartbeat:IPaddr2): Stopped
=#=#=#= End test: Basic text output with inactive resources, filtered by bundled IP address resource - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output with inactive resources, filtered by bundled IP address resource
=#=#=#= Begin test: XML output filtered by bundled IP address resource =#=#=#=
<pacemaker-result api-version="2.2" request="crm_mon --output-as=xml --resource=httpd-bundle-ip-192.168.122.132">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
- <resources_configured number="21" disabled="1" blocked="0"/>
+ <resources_configured number="26" disabled="1" blocked="0"/>
<cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
</summary>
<nodes>
- <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="2" type="member"/>
- <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="4" type="member"/>
+ <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
+ <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
<node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
</nodes>
<resources>
<bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
<replica id="1">
<resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
</bundle>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history/>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output filtered by bundled IP address resource - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by bundled IP address resource
=#=#=#= Begin test: Basic text output with inactive resources, filtered by bundled container =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 21 resource instances configured (1 DISABLED)
+ * 26 resource instances configured (1 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Full List of Resources:
* Container bundle set: httpd-bundle [pcmk:http]:
* Replica[1]
* httpd-bundle-docker-1 (ocf::heartbeat:docker): Stopped
=#=#=#= End test: Basic text output with inactive resources, filtered by bundled container - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output with inactive resources, filtered by bundled container
=#=#=#= Begin test: XML output filtered by bundled container =#=#=#=
<pacemaker-result api-version="2.2" request="crm_mon --output-as=xml --resource=httpd-bundle-docker-2">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
- <resources_configured number="21" disabled="1" blocked="0"/>
+ <resources_configured number="26" disabled="1" blocked="0"/>
<cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
</summary>
<nodes>
- <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="2" type="member"/>
- <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="4" type="member"/>
+ <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
+ <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
<node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
</nodes>
<resources>
<bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
<replica id="2">
<resource id="httpd-bundle-docker-2" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
</bundle>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history/>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output filtered by bundled container - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by bundled container
=#=#=#= Begin test: Basic text output with inactive resources, filtered by bundle connection =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 21 resource instances configured (1 DISABLED)
+ * 26 resource instances configured (1 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Full List of Resources:
* Container bundle set: httpd-bundle [pcmk:http]:
* Replica[0]
* httpd-bundle-0 (ocf::pacemaker:remote): Stopped
=#=#=#= End test: Basic text output with inactive resources, filtered by bundle connection - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output with inactive resources, filtered by bundle connection
=#=#=#= Begin test: XML output filtered by bundle connection =#=#=#=
<pacemaker-result api-version="2.2" request="crm_mon --output-as=xml --resource=httpd-bundle-0">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
- <resources_configured number="21" disabled="1" blocked="0"/>
+ <resources_configured number="26" disabled="1" blocked="0"/>
<cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
</summary>
<nodes>
- <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="2" type="member"/>
- <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="4" type="member"/>
+ <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
+ <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
<node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
</nodes>
<resources>
<bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
<replica id="0">
<resource id="httpd-bundle-0" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
</bundle>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history/>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output filtered by bundle connection - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by bundle connection
=#=#=#= Begin test: Basic text output with inactive resources, filtered by bundled primitive resource =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 21 resource instances configured (1 DISABLED)
+ * 26 resource instances configured (1 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Full List of Resources:
* Container bundle set: httpd-bundle [pcmk:http]:
* Replica[0]
* httpd (ocf::heartbeat:apache): Stopped
* Replica[1]
* httpd (ocf::heartbeat:apache): Stopped
* Replica[2]
* httpd (ocf::heartbeat:apache): Stopped
=#=#=#= End test: Basic text output with inactive resources, filtered by bundled primitive resource - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output with inactive resources, filtered by bundled primitive resource
=#=#=#= Begin test: XML output filtered by bundled primitive resource =#=#=#=
<pacemaker-result api-version="2.2" request="crm_mon --output-as=xml --resource=httpd">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="5"/>
- <resources_configured number="21" disabled="1" blocked="0"/>
+ <resources_configured number="26" disabled="1" blocked="0"/>
<cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
</summary>
<nodes>
- <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="2" type="member"/>
- <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="4" type="member"/>
+ <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
+ <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
<node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
</nodes>
<resources>
<bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
<replica id="0">
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
<replica id="1">
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
<replica id="2">
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
</bundle>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="location" value="office"/>
<attribute name="pingd" value="1000" expected="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000" expected="1000"/>
</node>
</node_attributes>
<node_history/>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output filtered by bundled primitive resource - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by bundled primitive resource
+=#=#=#= Begin test: Complete text output, filtered by clone name in cloned group =#=#=#=
+Cluster Summary:
+ * Stack: corosync
+ * Current DC: cluster02 (2) (version) - partition with quorum
+ * Last updated:
+ * Last change:
+ * 5 nodes configured
+ * 26 resource instances configured (1 DISABLED)
+
+Node List:
+ * Online: [ cluster01 (1) cluster02 (2) ]
+
+Active Resources:
+ * Clone Set: mysql-clone-group [mysql-group]:
+ * Resource Group: mysql-group:0:
+ * mysql-proxy (lsb:mysql-proxy): Started cluster02
+ * Resource Group: mysql-group:1:
+ * mysql-proxy (lsb:mysql-proxy): Started cluster01
+ * Resource Group: mysql-group:2:
+ * mysql-proxy (lsb:mysql-proxy): Stopped
+ * Resource Group: mysql-group:3:
+ * mysql-proxy (lsb:mysql-proxy): Stopped
+ * Resource Group: mysql-group:4:
+ * mysql-proxy (lsb:mysql-proxy): Stopped
+
+Node Attributes:
+ * Node: cluster01 (1):
+ * location : office
+ * pingd : 1000
+ * Node: cluster02 (2):
+ * pingd : 1000
+
+Operations:
+ * Node: cluster02 (2):
+ * mysql-proxy: migration-threshold=1000000:
+ * (2) start
+ * (3) monitor: interval="10000ms"
+ * Node: cluster01 (1):
+ * mysql-proxy: migration-threshold=1000000:
+ * (2) start
+ * (3) monitor: interval="10000ms"
+=#=#=#= End test: Complete text output, filtered by clone name in cloned group - OK (0) =#=#=#=
+* Passed: crm_mon - Complete text output, filtered by clone name in cloned group
+=#=#=#= Begin test: XML output, filtered by clone name in cloned group =#=#=#=
+<pacemaker-result api-version="2.2" request="crm_mon --output-as=xml --resource=mysql-clone-group">
+ <summary>
+ <stack type="corosync"/>
+ <current_dc present="true" version="" with_quorum="true"/>
+ <last_update time=""/>
+ <last_change time=""/>
+ <nodes_configured number="5"/>
+ <resources_configured number="26" disabled="1" blocked="0"/>
+ <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
+ </summary>
+ <nodes>
+ <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
+ <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
+ <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
+ <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
+ <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
+ </nodes>
+ <resources>
+ <clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" failed="false" failure_ignored="false"/>
+ </resources>
+ <node_attributes>
+ <node name="cluster01">
+ <attribute name="location" value="office"/>
+ <attribute name="pingd" value="1000" expected="1000"/>
+ </node>
+ <node name="cluster02">
+ <attribute name="pingd" value="1000" expected="1000"/>
+ </node>
+ </node_attributes>
+ <node_history>
+ <node name="cluster02">
+ <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
+ <operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+ <operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+ </resource_history>
+ </node>
+ <node name="cluster01">
+ <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
+ <operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+ <operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+ </resource_history>
+ </node>
+ </node_history>
+ <status code="0" message="OK"/>
+</pacemaker-result>
+=#=#=#= End test: XML output, filtered by clone name in cloned group - OK (0) =#=#=#=
+* Passed: crm_mon - XML output, filtered by clone name in cloned group
+=#=#=#= Begin test: Complete text output, filtered by group name in cloned group =#=#=#=
+Cluster Summary:
+ * Stack: corosync
+ * Current DC: cluster02 (2) (version) - partition with quorum
+ * Last updated:
+ * Last change:
+ * 5 nodes configured
+ * 26 resource instances configured (1 DISABLED)
+
+Node List:
+ * Online: [ cluster01 (1) cluster02 (2) ]
+
+Active Resources:
+ * Clone Set: mysql-clone-group [mysql-group]:
+ * Resource Group: mysql-group:0:
+ * mysql-proxy (lsb:mysql-proxy): Started cluster02
+ * Resource Group: mysql-group:1:
+ * mysql-proxy (lsb:mysql-proxy): Started cluster01
+ * Resource Group: mysql-group:2:
+ * mysql-proxy (lsb:mysql-proxy): Stopped
+ * Resource Group: mysql-group:3:
+ * mysql-proxy (lsb:mysql-proxy): Stopped
+ * Resource Group: mysql-group:4:
+ * mysql-proxy (lsb:mysql-proxy): Stopped
+
+Node Attributes:
+ * Node: cluster01 (1):
+ * location : office
+ * pingd : 1000
+ * Node: cluster02 (2):
+ * pingd : 1000
+
+Operations:
+ * Node: cluster02 (2):
+ * mysql-proxy: migration-threshold=1000000:
+ * (2) start
+ * (3) monitor: interval="10000ms"
+ * Node: cluster01 (1):
+ * mysql-proxy: migration-threshold=1000000:
+ * (2) start
+ * (3) monitor: interval="10000ms"
+=#=#=#= End test: Complete text output, filtered by group name in cloned group - OK (0) =#=#=#=
+* Passed: crm_mon - Complete text output, filtered by group name in cloned group
+=#=#=#= Begin test: XML output, filtered by group name in cloned group =#=#=#=
+<pacemaker-result api-version="2.2" request="crm_mon --output-as=xml --resource=mysql-group">
+ <summary>
+ <stack type="corosync"/>
+ <current_dc present="true" version="" with_quorum="true"/>
+ <last_update time=""/>
+ <last_change time=""/>
+ <nodes_configured number="5"/>
+ <resources_configured number="26" disabled="1" blocked="0"/>
+ <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
+ </summary>
+ <nodes>
+ <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
+ <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
+ <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
+ <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
+ <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
+ </nodes>
+ <resources>
+ <clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" failed="false" failure_ignored="false">
+ <group id="mysql-group:0" number_resources="1">
+ <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+ <node name="cluster02" id="2" cached="true"/>
+ </resource>
+ </group>
+ <group id="mysql-group:1" number_resources="1">
+ <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+ <node name="cluster01" id="1" cached="true"/>
+ </resource>
+ </group>
+ <group id="mysql-group:2" number_resources="1">
+ <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+ </group>
+ <group id="mysql-group:3" number_resources="1">
+ <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+ </group>
+ <group id="mysql-group:4" number_resources="1">
+ <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+ </group>
+ </clone>
+ </resources>
+ <node_attributes>
+ <node name="cluster01">
+ <attribute name="location" value="office"/>
+ <attribute name="pingd" value="1000" expected="1000"/>
+ </node>
+ <node name="cluster02">
+ <attribute name="pingd" value="1000" expected="1000"/>
+ </node>
+ </node_attributes>
+ <node_history>
+ <node name="cluster02">
+ <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
+ <operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+ <operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+ </resource_history>
+ </node>
+ <node name="cluster01">
+ <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
+ <operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+ <operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+ </resource_history>
+ </node>
+ </node_history>
+ <status code="0" message="OK"/>
+</pacemaker-result>
+=#=#=#= End test: XML output, filtered by group name in cloned group - OK (0) =#=#=#=
+* Passed: crm_mon - XML output, filtered by group name in cloned group
+=#=#=#= Begin test: Complete text output, filtered by exact group instance name in cloned group =#=#=#=
+Cluster Summary:
+ * Stack: corosync
+ * Current DC: cluster02 (2) (version) - partition with quorum
+ * Last updated:
+ * Last change:
+ * 5 nodes configured
+ * 26 resource instances configured (1 DISABLED)
+
+Node List:
+ * Online: [ cluster01 (1) cluster02 (2) ]
+
+Active Resources:
+ * Clone Set: mysql-clone-group [mysql-group]:
+ * Resource Group: mysql-group:1:
+ * mysql-proxy (lsb:mysql-proxy): Started cluster01
+
+Node Attributes:
+ * Node: cluster01 (1):
+ * location : office
+ * pingd : 1000
+ * Node: cluster02 (2):
+ * pingd : 1000
+
+Operations:
+ * Node: cluster02 (2):
+ * mysql-proxy: migration-threshold=1000000:
+ * (2) start
+ * (3) monitor: interval="10000ms"
+ * Node: cluster01 (1):
+ * mysql-proxy: migration-threshold=1000000:
+ * (2) start
+ * (3) monitor: interval="10000ms"
+=#=#=#= End test: Complete text output, filtered by exact group instance name in cloned group - OK (0) =#=#=#=
+* Passed: crm_mon - Complete text output, filtered by exact group instance name in cloned group
+=#=#=#= Begin test: XML output, filtered by exact group instance name in cloned group =#=#=#=
+<pacemaker-result api-version="2.2" request="crm_mon --output-as=xml --resource=mysql-group:1">
+ <summary>
+ <stack type="corosync"/>
+ <current_dc present="true" version="" with_quorum="true"/>
+ <last_update time=""/>
+ <last_change time=""/>
+ <nodes_configured number="5"/>
+ <resources_configured number="26" disabled="1" blocked="0"/>
+ <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
+ </summary>
+ <nodes>
+ <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
+ <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
+ <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
+ <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
+ <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
+ </nodes>
+ <resources>
+ <clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" failed="false" failure_ignored="false">
+ <group id="mysql-group:1" number_resources="1">
+ <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+ <node name="cluster01" id="1" cached="true"/>
+ </resource>
+ </group>
+ </clone>
+ </resources>
+ <node_attributes>
+ <node name="cluster01">
+ <attribute name="location" value="office"/>
+ <attribute name="pingd" value="1000" expected="1000"/>
+ </node>
+ <node name="cluster02">
+ <attribute name="pingd" value="1000" expected="1000"/>
+ </node>
+ </node_attributes>
+ <node_history>
+ <node name="cluster02">
+ <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
+ <operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+ <operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+ </resource_history>
+ </node>
+ <node name="cluster01">
+ <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
+ <operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+ <operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+ </resource_history>
+ </node>
+ </node_history>
+ <status code="0" message="OK"/>
+</pacemaker-result>
+=#=#=#= End test: XML output, filtered by exact group instance name in cloned group - OK (0) =#=#=#=
+* Passed: crm_mon - XML output, filtered by exact group instance name in cloned group
+=#=#=#= Begin test: Complete text output, filtered by primitive name in cloned group =#=#=#=
+Cluster Summary:
+ * Stack: corosync
+ * Current DC: cluster02 (2) (version) - partition with quorum
+ * Last updated:
+ * Last change:
+ * 5 nodes configured
+ * 26 resource instances configured (1 DISABLED)
+
+Node List:
+ * Online: [ cluster01 (1) cluster02 (2) ]
+
+Active Resources:
+ * Clone Set: mysql-clone-group [mysql-group]:
+ * Resource Group: mysql-group:0:
+ * mysql-proxy (lsb:mysql-proxy): Started cluster02
+ * Resource Group: mysql-group:1:
+ * mysql-proxy (lsb:mysql-proxy): Started cluster01
+ * Resource Group: mysql-group:2:
+ * mysql-proxy (lsb:mysql-proxy): Stopped
+ * Resource Group: mysql-group:3:
+ * mysql-proxy (lsb:mysql-proxy): Stopped
+ * Resource Group: mysql-group:4:
+ * mysql-proxy (lsb:mysql-proxy): Stopped
+
+Node Attributes:
+ * Node: cluster01 (1):
+ * location : office
+ * pingd : 1000
+ * Node: cluster02 (2):
+ * pingd : 1000
+
+Operations:
+ * Node: cluster02 (2):
+ * mysql-proxy: migration-threshold=1000000:
+ * (2) start
+ * (3) monitor: interval="10000ms"
+ * Node: cluster01 (1):
+ * mysql-proxy: migration-threshold=1000000:
+ * (2) start
+ * (3) monitor: interval="10000ms"
+=#=#=#= End test: Complete text output, filtered by primitive name in cloned group - OK (0) =#=#=#=
+* Passed: crm_mon - Complete text output, filtered by primitive name in cloned group
+=#=#=#= Begin test: XML output, filtered by primitive name in cloned group =#=#=#=
+<pacemaker-result api-version="2.2" request="crm_mon --output-as=xml --resource=mysql-proxy">
+ <summary>
+ <stack type="corosync"/>
+ <current_dc present="true" version="" with_quorum="true"/>
+ <last_update time=""/>
+ <last_change time=""/>
+ <nodes_configured number="5"/>
+ <resources_configured number="26" disabled="1" blocked="0"/>
+ <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
+ </summary>
+ <nodes>
+ <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
+ <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
+ <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
+ <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
+ <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
+ </nodes>
+ <resources>
+ <clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" failed="false" failure_ignored="false">
+ <group id="mysql-group:0" number_resources="1">
+ <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+ <node name="cluster02" id="2" cached="true"/>
+ </resource>
+ </group>
+ <group id="mysql-group:1" number_resources="1">
+ <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+ <node name="cluster01" id="1" cached="true"/>
+ </resource>
+ </group>
+ <group id="mysql-group:2" number_resources="1">
+ <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+ </group>
+ <group id="mysql-group:3" number_resources="1">
+ <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+ </group>
+ <group id="mysql-group:4" number_resources="1">
+ <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
+ </group>
+ </clone>
+ </resources>
+ <node_attributes>
+ <node name="cluster01">
+ <attribute name="location" value="office"/>
+ <attribute name="pingd" value="1000" expected="1000"/>
+ </node>
+ <node name="cluster02">
+ <attribute name="pingd" value="1000" expected="1000"/>
+ </node>
+ </node_attributes>
+ <node_history>
+ <node name="cluster02">
+ <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
+ <operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+ <operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+ </resource_history>
+ </node>
+ <node name="cluster01">
+ <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
+ <operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+ <operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+ </resource_history>
+ </node>
+ </node_history>
+ <status code="0" message="OK"/>
+</pacemaker-result>
+=#=#=#= End test: XML output, filtered by primitive name in cloned group - OK (0) =#=#=#=
+* Passed: crm_mon - XML output, filtered by primitive name in cloned group
+=#=#=#= Begin test: Complete text output, filtered by exact primitive instance name in cloned group =#=#=#=
+Cluster Summary:
+ * Stack: corosync
+ * Current DC: cluster02 (2) (version) - partition with quorum
+ * Last updated:
+ * Last change:
+ * 5 nodes configured
+ * 26 resource instances configured (1 DISABLED)
+
+Node List:
+ * Online: [ cluster01 (1) cluster02 (2) ]
+
+Active Resources:
+ * Clone Set: mysql-clone-group [mysql-group]:
+ * Resource Group: mysql-group:1:
+ * mysql-proxy (lsb:mysql-proxy): Started cluster01
+
+Node Attributes:
+ * Node: cluster01 (1):
+ * location : office
+ * pingd : 1000
+ * Node: cluster02 (2):
+ * pingd : 1000
+
+Operations:
+ * Node: cluster02 (2):
+ * mysql-proxy: migration-threshold=1000000:
+ * (2) start
+ * (3) monitor: interval="10000ms"
+ * Node: cluster01 (1):
+ * mysql-proxy: migration-threshold=1000000:
+ * (2) start
+ * (3) monitor: interval="10000ms"
+=#=#=#= End test: Complete text output, filtered by exact primitive instance name in cloned group - OK (0) =#=#=#=
+* Passed: crm_mon - Complete text output, filtered by exact primitive instance name in cloned group
+=#=#=#= Begin test: XML output, filtered by exact primitive instance name in cloned group =#=#=#=
+<pacemaker-result api-version="2.2" request="crm_mon --output-as=xml --resource=mysql-proxy:1">
+ <summary>
+ <stack type="corosync"/>
+ <current_dc present="true" version="" with_quorum="true"/>
+ <last_update time=""/>
+ <last_change time=""/>
+ <nodes_configured number="5"/>
+ <resources_configured number="26" disabled="1" blocked="0"/>
+ <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
+ </summary>
+ <nodes>
+ <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/>
+ <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
+ <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/>
+ <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
+ <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
+ </nodes>
+ <resources>
+ <clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" failed="false" failure_ignored="false">
+ <group id="mysql-group:1" number_resources="1">
+ <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
+ <node name="cluster01" id="1" cached="true"/>
+ </resource>
+ </group>
+ </clone>
+ </resources>
+ <node_attributes>
+ <node name="cluster01">
+ <attribute name="location" value="office"/>
+ <attribute name="pingd" value="1000" expected="1000"/>
+ </node>
+ <node name="cluster02">
+ <attribute name="pingd" value="1000" expected="1000"/>
+ </node>
+ </node_attributes>
+ <node_history>
+ <node name="cluster02">
+ <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
+ <operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+ <operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+ </resource_history>
+ </node>
+ <node name="cluster01">
+ <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
+ <operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+ <operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
+ </resource_history>
+ </node>
+ </node_history>
+ <status code="0" message="OK"/>
+</pacemaker-result>
+=#=#=#= End test: XML output, filtered by exact primitive instance name in cloned group - OK (0) =#=#=#=
+* Passed: crm_mon - XML output, filtered by exact primitive instance name in cloned group
=#=#=#= Begin test: Text output of partially active resources =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 4 nodes configured
* 11 resource instances configured
Node List:
* Online: [ cluster01 cluster02 ]
* GuestOnline: [ httpd-bundle-0@cluster02 httpd-bundle-1@cluster01 ]
Active Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 ]
* Fencing (stonith:fence_xvm): Started cluster01
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf::heartbeat:apache): Started cluster02
* httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped cluster01
=#=#=#= End test: Text output of partially active resources - OK (0) =#=#=#=
* Passed: crm_mon - Text output of partially active resources
=#=#=#= Begin test: XML output of partially active resources =#=#=#=
<pacemaker-result api-version="2.2" request="crm_mon -1 --output-as=xml">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="4"/>
<resources_configured number="11" disabled="0" blocked="0"/>
<cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
</summary>
<nodes>
<node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="5" type="member"/>
<node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="3" type="member"/>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/>
</nodes>
<resources>
<clone id="ping-clone" multi_state="false" unique="false" managed="true" failed="false" failure_ignored="false">
<resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
<resource id="ping" resource_agent="ocf::pacemaker:ping" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</clone>
<resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
<bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
<replica id="0">
<resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf::heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="httpd-bundle-0" id="httpd-bundle-0" cached="true"/>
</resource>
<resource id="httpd-bundle-docker-0" resource_agent="ocf::heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
<resource id="httpd-bundle-0" resource_agent="ocf::pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
</replica>
<replica id="1">
<resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf::heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-1" resource_agent="ocf::heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
<resource id="httpd-bundle-1" resource_agent="ocf::pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
</replica>
</bundle>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="pingd" value="1000"/>
</node>
<node name="cluster02">
<attribute name="pingd" value="1000"/>
</node>
</node_attributes>
<node_history>
<node name="cluster02">
<resource_history id="httpd-bundle-ip-192.168.122.131" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="60000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="httpd-bundle-docker-0" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="60000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="httpd-bundle-0" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="30000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
<node name="cluster01">
<resource_history id="Fencing" orphan="false" migration-threshold="1000000">
<operation_history call="15" task="start" exec-time="36ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="20" task="monitor" interval="60000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="ping" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="httpd-bundle-ip-192.168.122.132" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="60000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="httpd-bundle-docker-1" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="60000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="httpd-bundle-1" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="30000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
<node name="httpd-bundle-0">
<resource_history id="httpd" orphan="false" migration-threshold="1000000">
<operation_history call="1" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
</node_history>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: XML output of partially active resources - OK (0) =#=#=#=
* Passed: crm_mon - XML output of partially active resources
=#=#=#= Begin test: Text output of partially active resources, with inactive resources =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 4 nodes configured
* 11 resource instances configured
Node List:
* Online: [ cluster01 cluster02 ]
* GuestOnline: [ httpd-bundle-0@cluster02 httpd-bundle-1@cluster01 ]
Full List of Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 ]
* Stopped: [ cluster02 ]
* Fencing (stonith:fence_xvm): Started cluster01
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf::heartbeat:apache): Started cluster02
* httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped cluster01
=#=#=#= End test: Text output of partially active resources, with inactive resources - OK (0) =#=#=#=
* Passed: crm_mon - Text output of partially active resources, with inactive resources
=#=#=#= Begin test: Complete brief text output, with inactive resources =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 4 nodes configured
* 11 resource instances configured
Node List:
* Online: [ cluster01 cluster02 ]
* GuestOnline: [ httpd-bundle-0@cluster02 httpd-bundle-1@cluster01 ]
Full List of Resources:
* 1/1 (stonith:fence_xvm): Active cluster01
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 ]
* Stopped: [ cluster02 ]
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf::heartbeat:apache): Started cluster02
* httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped cluster01
Node Attributes:
* Node: cluster01:
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* httpd-bundle-ip-192.168.122.131: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-docker-0: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-0: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="30000ms"
* Node: cluster01:
* Fencing: migration-threshold=1000000:
* (15) start
* (20) monitor: interval="60000ms"
* ping: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* httpd-bundle-ip-192.168.122.132: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-docker-1: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-1: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="30000ms"
* Node: httpd-bundle-0@cluster02:
* httpd: migration-threshold=1000000:
* (1) start
=#=#=#= End test: Complete brief text output, with inactive resources - OK (0) =#=#=#=
* Passed: crm_mon - Complete brief text output, with inactive resources
=#=#=#= Begin test: Complete brief text output grouped by node, with inactive resources =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 4 nodes configured
* 11 resource instances configured
Node List:
* Node cluster01: online:
* Resources:
* 1 (ocf::heartbeat:IPaddr2): Active
* 1 (ocf::heartbeat:docker): Active
* 1 (ocf::pacemaker:ping): Active
* 1 (ocf::pacemaker:remote): Active
* 1 (stonith:fence_xvm): Active
* Node cluster02: online:
* Resources:
* 1 (ocf::heartbeat:IPaddr2): Active
* 1 (ocf::heartbeat:docker): Active
* 1 (ocf::pacemaker:remote): Active
* GuestNode httpd-bundle-0@cluster02: online:
* Resources:
* 1 (ocf::heartbeat:apache): Active
Inactive Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 ]
* Stopped: [ cluster02 ]
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf::heartbeat:apache): Started cluster02
* httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped cluster01
Node Attributes:
* Node: cluster01:
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* httpd-bundle-ip-192.168.122.131: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-docker-0: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-0: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="30000ms"
* Node: cluster01:
* Fencing: migration-threshold=1000000:
* (15) start
* (20) monitor: interval="60000ms"
* ping: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* httpd-bundle-ip-192.168.122.132: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-docker-1: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-1: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="30000ms"
* Node: httpd-bundle-0@cluster02:
* httpd: migration-threshold=1000000:
* (1) start
=#=#=#= End test: Complete brief text output grouped by node, with inactive resources - OK (0) =#=#=#=
* Passed: crm_mon - Complete brief text output grouped by node, with inactive resources
=#=#=#= Begin test: Text output of partially active resources, with inactive resources, filtered by node =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 4 nodes configured
* 11 resource instances configured
Node List:
* Online: [ cluster01 ]
Full List of Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 ]
* Fencing (stonith:fence_xvm): Started cluster01
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped cluster01
=#=#=#= End test: Text output of partially active resources, with inactive resources, filtered by node - OK (0) =#=#=#=
* Passed: crm_mon - Text output of partially active resources, with inactive resources, filtered by node
=#=#=#= Begin test: Text output of partially active resources, filtered by node =#=#=#=
<pacemaker-result api-version="2.2" request="crm_mon -1 --output-as=xml --node=cluster01">
<summary>
<stack type="corosync"/>
<current_dc present="true" version="" with_quorum="true"/>
<last_update time=""/>
<last_change time=""/>
<nodes_configured number="4"/>
<resources_configured number="11" disabled="0" blocked="0"/>
<cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false"/>
</summary>
<nodes>
<node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="5" type="member"/>
</nodes>
<resources>
<clone id="ping-clone" multi_state="false" unique="false" managed="true" failed="false" failure_ignored="false">
<resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
<resource id="ping" resource_agent="ocf::pacemaker:ping" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</clone>
<resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
<bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false">
<replica id="1">
<resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf::heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
<resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-1" resource_agent="ocf::heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
<resource id="httpd-bundle-1" resource_agent="ocf::pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
</replica>
</bundle>
</resources>
<node_attributes>
<node name="cluster01">
<attribute name="pingd" value="1000"/>
</node>
</node_attributes>
<node_history>
<node name="cluster01">
<resource_history id="Fencing" orphan="false" migration-threshold="1000000">
<operation_history call="15" task="start" exec-time="36ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="20" task="monitor" interval="60000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="ping" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="10000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="httpd-bundle-ip-192.168.122.132" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="60000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="httpd-bundle-docker-1" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="60000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
<resource_history id="httpd-bundle-1" orphan="false" migration-threshold="1000000">
<operation_history call="2" task="start" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
<operation_history call="3" task="monitor" interval="30000ms" exec-time="0ms" queue-time="0ms" rc="0" rc_text="ok"/>
</resource_history>
</node>
</node_history>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Text output of partially active resources, filtered by node - OK (0) =#=#=#=
* Passed: crm_mon - Text output of partially active resources, filtered by node
diff --git a/cts/cts-cli.in b/cts/cts-cli.in
index 3d655c8f54..bec06c8a96 100755
--- a/cts/cts-cli.in
+++ b/cts/cts-cli.in
@@ -1,1628 +1,1668 @@
#!@BASH_PATH@
#
# Copyright 2008-2020 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
# This source code is licensed under the GNU General Public License version 2
# or later (GPLv2+) WITHOUT ANY WARRANTY.
#
#
# Note on portable usage of sed: GNU/POSIX/*BSD sed have a limited subset of
# compatible functionality. Do not use the -i option, alternation (\|),
# \0, or character sequences such as \n or \s.
#
USAGE_TEXT="Usage: cts-cli [<options>]
Options:
--help Display this text, then exit
-V, --verbose Display any differences from expected output
-t 'TEST [...]' Run only specified tests (default: 'dates tools crm_mon acls validity upgrade rules')
-p DIR Look for executables in DIR (may be specified multiple times)
-v, --valgrind Run all commands under valgrind
-s Save actual output as expected output"
# If readlink supports -e (i.e. GNU), use it
readlink -e / >/dev/null 2>/dev/null
if [ $? -eq 0 ]; then
test_home="$(dirname "$(readlink -e "$0")")"
else
test_home="$(dirname "$0")"
fi
: ${shadow=cts-cli}
shadow_dir=$(mktemp -d ${TMPDIR:-/tmp}/cts-cli.shadow.XXXXXXXXXX)
num_errors=0
num_passed=0
verbose=0
tests="dates tools crm_mon acls validity upgrade rules"
do_save=0
VALGRIND_CMD=
VALGRIND_OPTS="
-q
--gen-suppressions=all
--show-reachable=no
--leak-check=full
--trace-children=no
--time-stamp=yes
--num-callers=20
--suppressions=$test_home/valgrind-pcmk.suppressions
"
# These constants must track crm_exit_t values
CRM_EX_OK=0
CRM_EX_ERROR=1
CRM_EX_INVALID_PARAM=2
CRM_EX_UNIMPLEMENT_FEATURE=3
CRM_EX_INSUFFICIENT_PRIV=4
CRM_EX_USAGE=64
CRM_EX_CONFIG=78
CRM_EX_OLD=103
CRM_EX_DIGEST=104
CRM_EX_NOSUCH=105
CRM_EX_UNSAFE=107
CRM_EX_EXISTS=108
CRM_EX_MULTIPLE=109
CRM_EX_EXPIRED=110
CRM_EX_NOT_YET_IN_EFFECT=111
function test_assert() {
target=$1; shift
cib=$1; shift
app=`echo "$cmd" | sed 's/\ .*//'`
printf "* Running: $app - $desc\n" 1>&2
printf "=#=#=#= Begin test: $desc =#=#=#=\n"
eval $VALGRIND_CMD $cmd 2>&1
rc=$?
if [ x$cib != x0 ]; then
printf "=#=#=#= Current cib after: $desc =#=#=#=\n"
CIB_user=root cibadmin -Q
fi
printf "=#=#=#= End test: $desc - $(crm_error --exit $rc) ($rc) =#=#=#=\n"
if [ $rc -ne $target ]; then
num_errors=$(( $num_errors + 1 ))
printf "* Failed (rc=%.3d): %-14s - %s\n" $rc $app "$desc"
printf "* Failed (rc=%.3d): %-14s - %s\n" $rc $app "$desc (`which $app`)" 1>&2
return
exit $CRM_EX_ERROR
else
printf "* Passed: %-14s - %s\n" $app "$desc"
num_passed=$(( $num_passed + 1 ))
fi
}
function test_crm_mon() {
export CIB_file="$test_home/cli/crm_mon.xml"
desc="Basic text output"
cmd="crm_mon -1"
test_assert $CRM_EX_OK 0
desc="XML output"
cmd="crm_mon --output-as=xml"
test_assert $CRM_EX_OK 0
desc="Basic text output without node section"
cmd="crm_mon -1 --exclude=nodes"
test_assert $CRM_EX_OK 0
desc="XML output without the node section"
cmd="crm_mon --output-as=xml --exclude=nodes"
test_assert $CRM_EX_OK 0
desc="Text output with only the node section"
cmd="crm_mon -1 --exclude=all --include=nodes"
test_assert $CRM_EX_OK 0
# The above test doesn't need to be performed for other output formats. It's
# really just a test to make sure that blank lines are correct.
desc="Complete text output"
cmd="crm_mon -1 --include=all"
test_assert $CRM_EX_OK 0
# XML includes everything already so there's no need for a complete test
desc="Complete text output with detail"
cmd="crm_mon -1R --include=all"
test_assert $CRM_EX_OK 0
# XML includes detailed output already
desc="Complete brief text output"
cmd="crm_mon -1 --include=all --brief"
test_assert $CRM_EX_OK 0
desc="Complete text output grouped by node"
cmd="crm_mon -1 --include=all --group-by-node"
test_assert $CRM_EX_OK 0
# XML does not have a brief output option
desc="Complete brief text output grouped by node"
cmd="crm_mon -1 --include=all --group-by-node --brief"
test_assert $CRM_EX_OK 0
desc="XML output grouped by node"
cmd="crm_mon -1 --output-as=xml --group-by-node"
test_assert $CRM_EX_OK 0
desc="Complete text output filtered by node"
cmd="crm_mon -1 --include=all --node=cluster01"
test_assert $CRM_EX_OK 0
desc="XML output filtered by node"
cmd="crm_mon --output-as xml --include=all --node=cluster01"
test_assert $CRM_EX_OK 0
desc="Complete text output filtered by tag"
cmd="crm_mon -1 --include=all --node=even-nodes"
test_assert $CRM_EX_OK 0
desc="XML output filtered by tag"
cmd="crm_mon --output-as=xml --include=all --node=even-nodes"
test_assert $CRM_EX_OK 0
desc="Complete text output filtered by resource tag"
cmd="crm_mon -1 --include=all --resource=fencing-rscs"
test_assert $CRM_EX_OK 0
desc="XML output filtered by resource tag"
cmd="crm_mon --output-as=xml --include=all --resource=fencing-rscs"
test_assert $CRM_EX_OK 0
desc="Basic text output filtered by node that doesn't exist"
cmd="crm_mon -1 --node=blah"
test_assert $CRM_EX_OK 0
desc="XML output filtered by node that doesn't exist"
cmd="crm_mon --output-as=xml --node=blah"
test_assert $CRM_EX_OK 0
desc="Basic text output with inactive resources"
cmd="crm_mon -1 -r"
test_assert $CRM_EX_OK 0
# XML already includes inactive resources
desc="Basic text output with inactive resources, filtered by node"
cmd="crm_mon -1 -r --node=cluster02"
test_assert $CRM_EX_OK 0
# XML already includes inactive resources
desc="Complete text output filtered by primitive resource"
cmd="crm_mon -1 --include=all --resource=Fencing"
test_assert $CRM_EX_OK 0
desc="XML output filtered by primitive resource"
cmd="crm_mon --output-as=xml --resource=Fencing"
test_assert $CRM_EX_OK 0
desc="Complete text output filtered by group resource"
cmd="crm_mon -1 --include=all --resource=exim-group"
test_assert $CRM_EX_OK 0
desc="XML output filtered by group resource"
cmd="crm_mon --output-as=xml --resource=exim-group"
test_assert $CRM_EX_OK 0
desc="Complete text output filtered by group resource member"
cmd="crm_mon -1 --include=all --resource=Public-IP"
test_assert $CRM_EX_OK 0
desc="XML output filtered by group resource member"
cmd="crm_mon --output-as=xml --resource=Email"
test_assert $CRM_EX_OK 0
desc="Complete text output filtered by clone resource"
cmd="crm_mon -1 --include=all --resource=ping-clone"
test_assert $CRM_EX_OK 0
desc="XML output filtered by clone resource"
cmd="crm_mon --output-as=xml --resource=ping-clone"
test_assert $CRM_EX_OK 0
desc="Complete text output filtered by clone resource instance"
cmd="crm_mon -1 --include=all --resource=ping"
test_assert $CRM_EX_OK 0
desc="XML output filtered by clone resource instance"
cmd="crm_mon --output-as=xml --resource=ping"
test_assert $CRM_EX_OK 0
desc="Complete text output filtered by exact clone resource instance"
cmd="crm_mon -1 --include=all --show-detail --resource=ping:0"
test_assert $CRM_EX_OK 0
desc="XML output filtered by exact clone resource instance"
cmd="crm_mon --output-as=xml --resource=ping:1"
test_assert $CRM_EX_OK 0
desc="Basic text output filtered by resource that doesn't exist"
cmd="crm_mon -1 --resource=blah"
test_assert $CRM_EX_OK 0
desc="XML output filtered by resource that doesn't exist"
cmd="crm_mon --output-as=xml --resource=blah"
test_assert $CRM_EX_OK 0
desc="Basic text output with inactive resources, filtered by tag"
cmd="crm_mon -1 -r --resource=inactive-rscs"
test_assert $CRM_EX_OK 0
desc="Basic text output with inactive resources, filtered by bundle resource"
cmd="crm_mon -1 -r --resource=httpd-bundle"
test_assert $CRM_EX_OK 0
desc="XML output filtered by inactive bundle resource"
cmd="crm_mon --output-as=xml --resource=httpd-bundle"
test_assert $CRM_EX_OK 0
desc="Basic text output with inactive resources, filtered by bundled IP address resource"
cmd="crm_mon -1 -r --resource=httpd-bundle-ip-192.168.122.131"
test_assert $CRM_EX_OK 0
desc="XML output filtered by bundled IP address resource"
cmd="crm_mon --output-as=xml --resource=httpd-bundle-ip-192.168.122.132"
test_assert $CRM_EX_OK 0
desc="Basic text output with inactive resources, filtered by bundled container"
cmd="crm_mon -1 -r --resource=httpd-bundle-docker-1"
test_assert $CRM_EX_OK 0
desc="XML output filtered by bundled container"
cmd="crm_mon --output-as=xml --resource=httpd-bundle-docker-2"
test_assert $CRM_EX_OK 0
desc="Basic text output with inactive resources, filtered by bundle connection"
cmd="crm_mon -1 -r --resource=httpd-bundle-0"
test_assert $CRM_EX_OK 0
desc="XML output filtered by bundle connection"
cmd="crm_mon --output-as=xml --resource=httpd-bundle-0"
test_assert $CRM_EX_OK 0
desc="Basic text output with inactive resources, filtered by bundled primitive resource"
cmd="crm_mon -1 -r --resource=httpd"
test_assert $CRM_EX_OK 0
desc="XML output filtered by bundled primitive resource"
cmd="crm_mon --output-as=xml --resource=httpd"
test_assert $CRM_EX_OK 0
+ desc="Complete text output, filtered by clone name in cloned group"
+ cmd="crm_mon -1 --include=all --show-detail --resource=mysql-clone-group"
+ test_assert $CRM_EX_OK 0
+
+ desc="XML output, filtered by clone name in cloned group"
+ cmd="crm_mon --output-as=xml --resource=mysql-clone-group"
+ test_assert $CRM_EX_OK 0
+
+ desc="Complete text output, filtered by group name in cloned group"
+ cmd="crm_mon -1 --include=all --show-detail --resource=mysql-group"
+ test_assert $CRM_EX_OK 0
+
+ desc="XML output, filtered by group name in cloned group"
+ cmd="crm_mon --output-as=xml --resource=mysql-group"
+ test_assert $CRM_EX_OK 0
+
+ desc="Complete text output, filtered by exact group instance name in cloned group"
+ cmd="crm_mon -1 --include=all --show-detail --resource=mysql-group:1"
+ test_assert $CRM_EX_OK 0
+
+ desc="XML output, filtered by exact group instance name in cloned group"
+ cmd="crm_mon --output-as=xml --resource=mysql-group:1"
+ test_assert $CRM_EX_OK 0
+
+ desc="Complete text output, filtered by primitive name in cloned group"
+ cmd="crm_mon -1 --include=all --show-detail --resource=mysql-proxy"
+ test_assert $CRM_EX_OK 0
+
+ desc="XML output, filtered by primitive name in cloned group"
+ cmd="crm_mon --output-as=xml --resource=mysql-proxy"
+ test_assert $CRM_EX_OK 0
+
+ desc="Complete text output, filtered by exact primitive instance name in cloned group"
+ cmd="crm_mon -1 --include=all --show-detail --resource=mysql-proxy:1"
+ test_assert $CRM_EX_OK 0
+
+ desc="XML output, filtered by exact primitive instance name in cloned group"
+ cmd="crm_mon --output-as=xml --resource=mysql-proxy:1"
+ test_assert $CRM_EX_OK 0
+
unset CIB_file
export CIB_file="$test_home/cli/crm_mon-partial.xml"
desc="Text output of partially active resources"
cmd="crm_mon -1"
test_assert $CRM_EX_OK 0
desc="XML output of partially active resources"
cmd="crm_mon -1 --output-as=xml"
test_assert $CRM_EX_OK 0
desc="Text output of partially active resources, with inactive resources"
cmd="crm_mon -1 -r"
test_assert $CRM_EX_OK 0
# XML already includes inactive resources
desc="Complete brief text output, with inactive resources"
cmd="crm_mon -1 -r --include=all --brief"
test_assert $CRM_EX_OK 0
# XML does not have a brief output option
desc="Complete brief text output grouped by node, with inactive resources"
cmd="crm_mon -1 -r --include=all --group-by-node --brief"
test_assert $CRM_EX_OK 0
desc="Text output of partially active resources, with inactive resources, filtered by node"
cmd="crm_mon -1 -r --node=cluster01"
test_assert $CRM_EX_OK 0
desc="Text output of partially active resources, filtered by node"
cmd="crm_mon -1 --output-as=xml --node=cluster01"
test_assert $CRM_EX_OK 0
unset CIB_file
}
function test_tools() {
local TMPXML
local TMPORIG
TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX)
TMPORIG=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.existing.xml.XXXXXXXXXX)
export CIB_shadow_dir="${shadow_dir}"
$VALGRIND_CMD crm_shadow --batch --force --create-empty $shadow 2>&1
export CIB_shadow=$shadow
desc="Validate CIB"
cmd="cibadmin -Q"
test_assert $CRM_EX_OK
desc="Configure something before erasing"
cmd="crm_attribute -n cluster-delay -v 60s"
test_assert $CRM_EX_OK
desc="Require --force for CIB erasure"
cmd="cibadmin -E"
test_assert $CRM_EX_UNSAFE
desc="Allow CIB erasure with --force"
cmd="cibadmin -E --force"
test_assert $CRM_EX_OK
desc="Query CIB"
cmd="cibadmin -Q > $TMPORIG"
test_assert $CRM_EX_OK
desc="Set cluster option"
cmd="crm_attribute -n cluster-delay -v 60s"
test_assert $CRM_EX_OK
desc="Query new cluster option"
cmd="cibadmin -Q -o crm_config | grep cib-bootstrap-options-cluster-delay"
test_assert $CRM_EX_OK
desc="Query cluster options"
cmd="cibadmin -Q -o crm_config > $TMPXML"
test_assert $CRM_EX_OK
desc="Set no-quorum policy"
cmd="crm_attribute -n no-quorum-policy -v ignore"
test_assert $CRM_EX_OK
desc="Delete nvpair"
cmd="cibadmin -D -o crm_config --xml-text '<nvpair id=\"cib-bootstrap-options-cluster-delay\"/>'"
test_assert $CRM_EX_OK
desc="Create operation should fail"
cmd="cibadmin -C -o crm_config --xml-file $TMPXML"
test_assert $CRM_EX_EXISTS
desc="Modify cluster options section"
cmd="cibadmin -M -o crm_config --xml-file $TMPXML"
test_assert $CRM_EX_OK
desc="Query updated cluster option"
cmd="cibadmin -Q -o crm_config | grep cib-bootstrap-options-cluster-delay"
test_assert $CRM_EX_OK
desc="Set duplicate cluster option"
cmd="crm_attribute -n cluster-delay -v 40s -s duplicate"
test_assert $CRM_EX_OK
desc="Setting multiply defined cluster option should fail"
cmd="crm_attribute -n cluster-delay -v 30s"
test_assert $CRM_EX_MULTIPLE
desc="Set cluster option with -s"
cmd="crm_attribute -n cluster-delay -v 30s -s duplicate"
test_assert $CRM_EX_OK
desc="Delete cluster option with -i"
cmd="crm_attribute -n cluster-delay -D -i cib-bootstrap-options-cluster-delay"
test_assert $CRM_EX_OK
desc="Create node1 and bring it online"
cmd="crm_simulate --live-check --in-place --node-up=node1"
test_assert $CRM_EX_OK
desc="Create node attribute"
cmd="crm_attribute -n ram -v 1024M -N node1 -t nodes"
test_assert $CRM_EX_OK
desc="Query new node attribute"
cmd="cibadmin -Q -o nodes | grep node1-ram"
test_assert $CRM_EX_OK
desc="Set a transient (fail-count) node attribute"
cmd="crm_attribute -n fail-count-foo -v 3 -N node1 -t status"
test_assert $CRM_EX_OK
desc="Query a fail count"
cmd="crm_failcount --query -r foo -N node1"
test_assert $CRM_EX_OK
desc="Delete a transient (fail-count) node attribute"
cmd="crm_attribute -n fail-count-foo -D -N node1 -t status"
test_assert $CRM_EX_OK
desc="Digest calculation"
cmd="cibadmin -Q | cibadmin -5 -p 2>&1 > /dev/null"
test_assert $CRM_EX_OK
# This update will fail because it has version numbers
desc="Replace operation should fail"
cmd="cibadmin -R --xml-file $TMPORIG"
test_assert $CRM_EX_OLD
desc="Default standby value"
cmd="crm_standby -N node1 -G"
test_assert $CRM_EX_OK
desc="Set standby status"
cmd="crm_standby -N node1 -v true"
test_assert $CRM_EX_OK
desc="Query standby value"
cmd="crm_standby -N node1 -G"
test_assert $CRM_EX_OK
desc="Delete standby value"
cmd="crm_standby -N node1 -D"
test_assert $CRM_EX_OK
desc="Create a resource"
cmd="cibadmin -C -o resources --xml-text '<primitive id=\"dummy\" class=\"ocf\" provider=\"pacemaker\" type=\"Dummy\"/>'"
test_assert $CRM_EX_OK
desc="Create a resource meta attribute"
cmd="crm_resource -r dummy --meta -p is-managed -v false"
test_assert $CRM_EX_OK
desc="Query a resource meta attribute"
cmd="crm_resource -r dummy --meta -g is-managed"
test_assert $CRM_EX_OK
desc="Remove a resource meta attribute"
cmd="crm_resource -r dummy --meta -d is-managed"
test_assert $CRM_EX_OK
desc="Create another resource meta attribute"
cmd="crm_resource -r dummy --meta -p target-role -v Stopped"
test_assert $CRM_EX_OK 0
desc="Show why a resource is not running"
cmd="crm_resource -Y -r dummy"
test_assert $CRM_EX_OK 0
desc="Remove another resource meta attribute"
cmd="crm_resource -r dummy --meta -d target-role"
test_assert $CRM_EX_OK 0
desc="Create a resource attribute"
cmd="crm_resource -r dummy -p delay -v 10s"
test_assert $CRM_EX_OK
desc="List the configured resources"
cmd="crm_resource -L"
test_assert $CRM_EX_OK
desc="List IDs of instantiated resources"
cmd="crm_resource -l"
test_assert $CRM_EX_OK 0
desc="Show XML configuration of resource"
cmd="crm_resource -q -r dummy"
test_assert $CRM_EX_OK 0
desc="Require a destination when migrating a resource that is stopped"
cmd="crm_resource -r dummy -M"
test_assert $CRM_EX_USAGE
desc="Don't support migration to non-existent locations"
cmd="crm_resource -r dummy -M -N i.do.not.exist"
test_assert $CRM_EX_NOSUCH
desc="Create a fencing resource"
cmd="cibadmin -C -o resources --xml-text '<primitive id=\"Fence\" class=\"stonith\" type=\"fence_true\"/>'"
test_assert $CRM_EX_OK
desc="Bring resources online"
cmd="crm_simulate --live-check --in-place -S"
test_assert $CRM_EX_OK
desc="Try to move a resource to its existing location"
cmd="crm_resource -r dummy --move --node node1"
test_assert $CRM_EX_EXISTS
desc="Move a resource from its existing location"
cmd="crm_resource -r dummy --move"
test_assert $CRM_EX_OK
desc="Clear out constraints generated by --move"
cmd="crm_resource -r dummy --clear"
test_assert $CRM_EX_OK
desc="Default ticket granted state"
cmd="crm_ticket -t ticketA -G granted -d false"
test_assert $CRM_EX_OK
desc="Set ticket granted state"
cmd="crm_ticket -t ticketA -r --force"
test_assert $CRM_EX_OK
desc="Query ticket granted state"
cmd="crm_ticket -t ticketA -G granted"
test_assert $CRM_EX_OK
desc="Delete ticket granted state"
cmd="crm_ticket -t ticketA -D granted --force"
test_assert $CRM_EX_OK
desc="Make a ticket standby"
cmd="crm_ticket -t ticketA -s"
test_assert $CRM_EX_OK
desc="Query ticket standby state"
cmd="crm_ticket -t ticketA -G standby"
test_assert $CRM_EX_OK
desc="Activate a ticket"
cmd="crm_ticket -t ticketA -a"
test_assert $CRM_EX_OK
desc="Delete ticket standby state"
cmd="crm_ticket -t ticketA -D standby"
test_assert $CRM_EX_OK
desc="Ban a resource on unknown node"
cmd="crm_resource -r dummy -B -N host1"
test_assert $CRM_EX_NOSUCH
desc="Create two more nodes and bring them online"
cmd="crm_simulate --live-check --in-place --node-up=node2 --node-up=node3"
test_assert $CRM_EX_OK
desc="Ban dummy from node1"
cmd="crm_resource -r dummy -B -N node1"
test_assert $CRM_EX_OK
desc="Show where a resource is running"
cmd="crm_resource -r dummy -W"
test_assert $CRM_EX_OK 0
desc="Show constraints on a resource"
cmd="crm_resource -a -r dummy"
test_assert $CRM_EX_OK 0
desc="Ban dummy from node2"
cmd="crm_resource -r dummy -B -N node2"
test_assert $CRM_EX_OK
desc="Relocate resources due to ban"
cmd="crm_simulate --live-check --in-place -S"
test_assert $CRM_EX_OK
desc="Move dummy to node1"
cmd="crm_resource -r dummy -M -N node1"
test_assert $CRM_EX_OK
desc="Clear implicit constraints for dummy on node2"
cmd="crm_resource -r dummy -U -N node2"
test_assert $CRM_EX_OK
desc="Drop the status section"
cmd="cibadmin -R -o status --xml-text '<status/>'"
test_assert $CRM_EX_OK 0
desc="Create a clone"
cmd="cibadmin -C -o resources --xml-text '<clone id=\"test-clone\"><primitive id=\"test-primitive\" class=\"ocf\" provider=\"pacemaker\" type=\"Dummy\"/></clone>'"
test_assert $CRM_EX_OK 0
desc="Create a resource meta attribute"
cmd="crm_resource -r test-primitive --meta -p is-managed -v false"
test_assert $CRM_EX_OK
desc="Create a resource meta attribute in the primitive"
cmd="crm_resource -r test-primitive --meta -p is-managed -v false --force"
test_assert $CRM_EX_OK
desc="Update resource meta attribute with duplicates"
cmd="crm_resource -r test-clone --meta -p is-managed -v true"
test_assert $CRM_EX_OK
desc="Update resource meta attribute with duplicates (force clone)"
cmd="crm_resource -r test-clone --meta -p is-managed -v true --force"
test_assert $CRM_EX_OK
desc="Update child resource meta attribute with duplicates"
cmd="crm_resource -r test-primitive --meta -p is-managed -v false"
test_assert $CRM_EX_OK
desc="Delete resource meta attribute with duplicates"
cmd="crm_resource -r test-clone --meta -d is-managed"
test_assert $CRM_EX_OK
desc="Delete resource meta attribute in parent"
cmd="crm_resource -r test-primitive --meta -d is-managed"
test_assert $CRM_EX_OK
desc="Create a resource meta attribute in the primitive"
cmd="crm_resource -r test-primitive --meta -p is-managed -v false --force"
test_assert $CRM_EX_OK
desc="Update existing resource meta attribute"
cmd="crm_resource -r test-clone --meta -p is-managed -v true"
test_assert $CRM_EX_OK
desc="Create a resource meta attribute in the parent"
cmd="crm_resource -r test-clone --meta -p is-managed -v true --force"
test_assert $CRM_EX_OK
desc="Copy resources"
cmd="cibadmin -Q -o resources > $TMPXML"
test_assert $CRM_EX_OK 0
desc="Delete resource parent meta attribute (force)"
cmd="crm_resource -r test-clone --meta -d is-managed --force"
test_assert $CRM_EX_OK
desc="Restore duplicates"
cmd="cibadmin -R -o resources --xml-file $TMPXML"
test_assert $CRM_EX_OK
desc="Delete resource child meta attribute"
cmd="crm_resource -r test-primitive --meta -d is-managed"
test_assert $CRM_EX_OK
cibadmin -C -o resources --xml-text '<group id="dummy-group"> \
<primitive id="dummy1" class="ocf" provider="pacemaker" type="Dummy"\/> \
<primitive id="dummy2" class="ocf" provider="pacemaker" type="Dummy"\/> \
</group>'
desc="Create a resource meta attribute in dummy1"
cmd="crm_resource -r dummy1 --meta -p is-managed -v true"
test_assert $CRM_EX_OK
desc="Create a resource meta attribute in dummy-group"
cmd="crm_resource -r dummy-group --meta -p is-managed -v false"
test_assert $CRM_EX_OK
cibadmin -D -o resource --xml-text '<group id="dummy-group">'
desc="Specify a lifetime when moving a resource"
cmd="crm_resource -r dummy --move --node node2 --lifetime=PT1H"
test_assert $CRM_EX_OK
desc="Try to move a resource previously moved with a lifetime"
cmd="crm_resource -r dummy --move --node node1"
test_assert $CRM_EX_OK
desc="Ban dummy from node1 for a short time"
cmd="crm_resource -r dummy -B -N node1 --lifetime=PT1S"
test_assert $CRM_EX_OK
desc="Remove expired constraints"
sleep 2
cmd="crm_resource --clear --expired"
test_assert $CRM_EX_OK
# Clear has already been tested elsewhere, but we need to get rid of the
# constraints so testing delete works. It won't delete if there's still
# a reference to the resource somewhere.
desc="Clear all implicit constraints for dummy"
cmd="crm_resource -r dummy -U"
test_assert $CRM_EX_OK
desc="Delete a resource"
cmd="crm_resource -D -r dummy -t primitive"
test_assert $CRM_EX_OK
unset CIB_shadow
unset CIB_shadow_dir
rm -f "$TMPXML" "$TMPORIG"
desc="Create an XML patchset"
cmd="crm_diff -o $test_home/cli/crm_diff_old.xml -n $test_home/cli/crm_diff_new.xml"
test_assert $CRM_EX_ERROR 0
}
INVALID_PERIODS=(
"2019-01-01 00:00:00Z" # Start with no end
"2019-01-01 00:00:00Z/" # Start with only a trailing slash
"PT2S/P1M" # Two durations
"2019-13-01 00:00:00Z/P1M" # Out-of-range month
"20191077T15/P1M" # Out-of-range day
"2019-10-01T25:00:00Z/P1M" # Out-of-range hour
"2019-10-01T24:00:01Z/P1M" # Hour 24 with anything but :00:00
"PT5H/20191001T007000Z" # Out-of-range minute
"2019-10-01 00:00:80Z/P1M" # Out-of-range second
"2019-10-01 00:00:10 +25:00/P1M" # Out-of-range offset hour
"20191001T000010 -00:61/P1M" # Out-of-range offset minute
"P1Y/2019-02-29 00:00:00Z" # Feb. 29 in non-leap-year
"2019-01-01 00:00:00Z/P" # Duration with no values
"P1Z/2019-02-20 00:00:00Z" # Invalid duration unit
"P1YM/2019-02-20 00:00:00Z" # No number for duration unit
)
function test_dates() {
# Ensure invalid period specifications are rejected
for spec in '' "${INVALID_PERIODS[@]}"; do
desc="Invalid period - [$spec]"
cmd="iso8601 -p \"$spec\""
test_assert $CRM_EX_INVALID_PARAM 0
done
desc="2014-01-01 00:30:00 - 1 Hour"
cmd="iso8601 -d '2014-01-01 00:30:00Z' -D P-1H -E '2013-12-31 23:30:00Z'"
test_assert $CRM_EX_OK 0
desc="Valid date - Feb 29 in leap year"
cmd="iso8601 -d '2020-02-29 00:00:00Z' -E '2020-02-29 00:00:00Z'"
test_assert $CRM_EX_OK 0
desc="Valid date - using 'T' and offset"
cmd="iso8601 -d '20191201T131211 -05:00' -E '2019-12-01 18:12:11Z'"
test_assert $CRM_EX_OK 0
desc="24:00:00 equivalent to 00:00:00 of next day"
cmd="iso8601 -d '2019-12-31 24:00:00Z' -E '2020-01-01 00:00:00Z'"
test_assert $CRM_EX_OK 0
for y in 06 07 08 09 10 11 12 13 14 15 16 17 18 40; do
desc="20$y-W01-7"
cmd="iso8601 -d '20$y-W01-7 00Z'"
test_assert $CRM_EX_OK 0
desc="20$y-W01-7 - round-trip"
cmd="iso8601 -d '20$y-W01-7 00Z' -W -E '20$y-W01-7 00:00:00Z'"
test_assert $CRM_EX_OK 0
desc="20$y-W01-1"
cmd="iso8601 -d '20$y-W01-1 00Z'"
test_assert $CRM_EX_OK 0
desc="20$y-W01-1 - round-trip"
cmd="iso8601 -d '20$y-W01-1 00Z' -W -E '20$y-W01-1 00:00:00Z'"
test_assert $CRM_EX_OK 0
done
desc="2009-W53-07"
cmd="iso8601 -d '2009-W53-7 00:00:00Z' -W -E '2009-W53-7 00:00:00Z'"
test_assert $CRM_EX_OK 0
desc="epoch + 2 Years 5 Months 6 Minutes"
cmd="iso8601 -d 'epoch' -D P2Y5MT6M -E '1972-06-01 00:06:00Z'"
test_assert $CRM_EX_OK 0
desc="2009-01-31 + 1 Month"
cmd="iso8601 -d '20090131T000000Z' -D P1M -E '2009-02-28 00:00:00Z'"
test_assert $CRM_EX_OK 0
desc="2009-01-31 + 2 Months"
cmd="iso8601 -d '2009-01-31 00:00:00Z' -D P2M -E '2009-03-31 00:00:00Z'"
test_assert $CRM_EX_OK 0
desc="2009-01-31 + 3 Months"
cmd="iso8601 -d '2009-01-31 00:00:00Z' -D P3M -E '2009-04-30 00:00:00Z'"
test_assert $CRM_EX_OK 0
desc="2009-03-31 - 1 Month"
cmd="iso8601 -d '2009-03-31 01:00:00 +01:00' -D P-1M -E '2009-02-28 00:00:00Z'"
test_assert $CRM_EX_OK 0
desc="2038-01-01 + 3 Months"
cmd="iso8601 -d '2038-01-01 00:00:00Z' -D P3M -E '2038-04-01 00:00:00Z'"
test_assert $CRM_EX_OK 0
}
function test_acl_loop() {
local TMPXML
TMPXML="$1"
# Make sure we're rejecting things for the right reasons
export PCMK_trace_functions=pcmk__check_acl,pcmk__apply_creation_acl
export PCMK_stderr=1
CIB_user=root cibadmin --replace --xml-text '<resources/>'
### no ACL ###
export CIB_user=unknownguy
desc="$CIB_user: Query configuration"
cmd="cibadmin -Q"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
desc="$CIB_user: Set enable-acl"
cmd="crm_attribute -n enable-acl -v false"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
desc="$CIB_user: Set stonith-enabled"
cmd="crm_attribute -n stonith-enabled -v false"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
desc="$CIB_user: Create a resource"
cmd="cibadmin -C -o resources --xml-text '<primitive id=\"dummy\" class=\"ocf\" provider=\"pacemaker\" type=\"Dummy\"/>'"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
### deny /cib permission ###
export CIB_user=l33t-haxor
desc="$CIB_user: Query configuration"
cmd="cibadmin -Q"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
desc="$CIB_user: Set enable-acl"
cmd="crm_attribute -n enable-acl -v false"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
desc="$CIB_user: Set stonith-enabled"
cmd="crm_attribute -n stonith-enabled -v false"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
desc="$CIB_user: Create a resource"
cmd="cibadmin -C -o resources --xml-text '<primitive id=\"dummy\" class=\"ocf\" provider=\"pacemaker\" type=\"Dummy\"/>'"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
### observer role ###
export CIB_user=niceguy
desc="$CIB_user: Query configuration"
cmd="cibadmin -Q"
test_assert $CRM_EX_OK 0
desc="$CIB_user: Set enable-acl"
cmd="crm_attribute -n enable-acl -v false"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
desc="$CIB_user: Set stonith-enabled"
cmd="crm_attribute -n stonith-enabled -v false"
test_assert $CRM_EX_OK
desc="$CIB_user: Create a resource"
cmd="cibadmin -C -o resources --xml-text '<primitive id=\"dummy\" class=\"ocf\" provider=\"pacemaker\" type=\"Dummy\"/>'"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
export CIB_user=root
desc="$CIB_user: Query configuration"
cmd="cibadmin -Q"
test_assert $CRM_EX_OK 0
desc="$CIB_user: Set stonith-enabled"
cmd="crm_attribute -n stonith-enabled -v true"
test_assert $CRM_EX_OK
desc="$CIB_user: Create a resource"
cmd="cibadmin -C -o resources --xml-text '<primitive id=\"dummy\" class=\"ocf\" provider=\"pacemaker\" type=\"Dummy\"/>'"
test_assert $CRM_EX_OK
### deny /cib permission ###
export CIB_user=l33t-haxor
desc="$CIB_user: Create a resource meta attribute"
cmd="crm_resource -r dummy --meta -p target-role -v Stopped"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
desc="$CIB_user: Query a resource meta attribute"
cmd="crm_resource -r dummy --meta -g target-role"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
desc="$CIB_user: Remove a resource meta attribute"
cmd="crm_resource -r dummy --meta -d target-role"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
### observer role ###
export CIB_user=niceguy
desc="$CIB_user: Create a resource meta attribute"
cmd="crm_resource -r dummy --meta -p target-role -v Stopped"
test_assert $CRM_EX_OK
desc="$CIB_user: Query a resource meta attribute"
cmd="crm_resource -r dummy --meta -g target-role"
test_assert $CRM_EX_OK
desc="$CIB_user: Remove a resource meta attribute"
cmd="crm_resource -r dummy --meta -d target-role"
test_assert $CRM_EX_OK
desc="$CIB_user: Create a resource meta attribute"
cmd="crm_resource -r dummy --meta -p target-role -v Started"
test_assert $CRM_EX_OK
### read //meta_attributes ###
export CIB_user=badidea
desc="$CIB_user: Query configuration - implied deny"
cmd="cibadmin -Q"
test_assert $CRM_EX_OK 0
### deny /cib, read //meta_attributes ###
export CIB_user=betteridea
desc="$CIB_user: Query configuration - explicit deny"
cmd="cibadmin -Q"
test_assert $CRM_EX_OK 0
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --delete --xml-text '<acls/>'
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
### observer role ###
export CIB_user=niceguy
desc="$CIB_user: Replace - remove acls"
cmd="cibadmin --replace --xml-file $TMPXML"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -C -o resources --xml-text '<primitive id="dummy2" class="ocf" provider="pacemaker" type="Dummy"/>'
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - create resource"
cmd="cibadmin --replace --xml-file $TMPXML"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" crm_attribute -n enable-acl -v false
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - modify attribute (deny)"
cmd="cibadmin --replace --xml-file $TMPXML"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --replace --xml-text '<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl"/>'
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - delete attribute (deny)"
cmd="cibadmin --replace --xml-file $TMPXML"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text '<primitive id="dummy" description="nothing interesting"/>'
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - create attribute (deny)"
cmd="cibadmin --replace --xml-file $TMPXML"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
### admin role ###
CIB_user=bob
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text '<primitive id="dummy" description="nothing interesting"/>'
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - create attribute (direct allow)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_OK 0
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text '<primitive id="dummy" description="something interesting"/>'
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - modify attribute (direct allow)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_OK 0
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --replace -o resources --xml-text '<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy"/>'
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - delete attribute (direct allow)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_OK 0
### super_user role ###
export CIB_user=joe
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text '<primitive id="dummy" description="nothing interesting"/>'
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - create attribute (inherited allow)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_OK 0
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text '<primitive id="dummy" description="something interesting"/>'
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - modify attribute (inherited allow)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_OK 0
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --replace -o resources --xml-text '<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy"/>'
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - delete attribute (inherited allow)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_OK 0
### rsc_writer role ###
export CIB_user=mike
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text '<primitive id="dummy" description="nothing interesting"/>'
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - create attribute (allow overrides deny)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_OK 0
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text '<primitive id="dummy" description="something interesting"/>'
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - modify attribute (allow overrides deny)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_OK 0
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --replace -o resources --xml-text '<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy"/>'
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - delete attribute (allow overrides deny)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_OK 0
### rsc_denied role ###
export CIB_user=chris
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text '<primitive id="dummy" description="nothing interesting"/>'
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - create attribute (deny overrides allow)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
# Set as root since setting as chris failed
CIB_user=root cibadmin --modify --xml-text '<primitive id="dummy" description="nothing interesting"/>'
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text '<primitive id="dummy" description="something interesting"/>'
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - modify attribute (deny overrides allow)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
# Set as root since setting as chris failed
CIB_user=root cibadmin --modify --xml-text '<primitive id="dummy" description="something interesting"/>'
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --replace -o resources --xml-text '<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy"/>'
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - delete attribute (deny overrides allow)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
}
function test_acls() {
local SHADOWPATH
local TMPXML
TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.acls.xml.XXXXXXXXXX)
export CIB_shadow_dir="${shadow_dir}"
$VALGRIND_CMD crm_shadow --batch --force --create-empty $shadow --validate-with pacemaker-1.3 2>&1
export CIB_shadow=$shadow
cat <<EOF > "$TMPXML"
<acls>
<acl_user id="l33t-haxor">
<deny id="crook-nothing" xpath="/cib"/>
</acl_user>
<acl_user id="niceguy">
<role_ref id="observer"/>
</acl_user>
<acl_user id="bob">
<role_ref id="admin"/>
</acl_user>
<acl_user id="joe">
<role_ref id="super_user"/>
</acl_user>
<acl_user id="mike">
<role_ref id="rsc_writer"/>
</acl_user>
<acl_user id="chris">
<role_ref id="rsc_denied"/>
</acl_user>
<acl_role id="observer">
<read id="observer-read-1" xpath="/cib"/>
<write id="observer-write-1" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
<write id="observer-write-2" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
</acl_role>
<acl_role id="admin">
<read id="admin-read-1" xpath="/cib"/>
<write id="admin-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="super_user">
<write id="super_user-write-1" xpath="/cib"/>
</acl_role>
<acl_role id="rsc_writer">
<deny id="rsc-writer-deny-1" xpath="/cib"/>
<write id="rsc-writer-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="rsc_denied">
<write id="rsc-denied-write-1" xpath="/cib"/>
<deny id="rsc-denied-deny-1" xpath="//resources"/>
</acl_role>
</acls>
EOF
desc="Configure some ACLs"
cmd="cibadmin -M -o acls --xml-file $TMPXML"
test_assert $CRM_EX_OK
desc="Enable ACLs"
cmd="crm_attribute -n enable-acl -v true"
test_assert $CRM_EX_OK
desc="Set cluster option"
cmd="crm_attribute -n no-quorum-policy -v ignore"
test_assert $CRM_EX_OK
desc="New ACL"
cmd="cibadmin --create -o acls --xml-text '<acl_user id=\"badidea\"><read id=\"badidea-resources\" xpath=\"//meta_attributes\"/></acl_user>'"
test_assert $CRM_EX_OK
desc="Another ACL"
cmd="cibadmin --create -o acls --xml-text '<acl_user id=\"betteridea\"><read id=\"betteridea-resources\" xpath=\"//meta_attributes\"/></acl_user>'"
test_assert $CRM_EX_OK
desc="Updated ACL"
cmd="cibadmin --replace -o acls --xml-text '<acl_user id=\"betteridea\"><deny id=\"betteridea-nothing\" xpath=\"/cib\"/><read id=\"betteridea-resources\" xpath=\"//meta_attributes\"/></acl_user>'"
test_assert $CRM_EX_OK
test_acl_loop "$TMPXML"
printf "\n\n !#!#!#!#! Upgrading to latest CIB schema and re-testing !#!#!#!#!\n"
printf "\nUpgrading to latest CIB schema and re-testing\n" 1>&2
export CIB_user=root
desc="$CIB_user: Upgrade to latest CIB schema"
cmd="cibadmin --upgrade --force -V"
test_assert $CRM_EX_OK
SHADOWPATH="$(crm_shadow --file)"
# sed -i isn't portable :-(
cp -p "$SHADOWPATH" "${SHADOWPATH}.$$" # to keep permissions
sed -e 's/epoch=.2/epoch=\"6/g' -e 's/admin_epoch=.1/admin_epoch=\"0/g' \
"$SHADOWPATH" > "${SHADOWPATH}.$$"
mv -- "${SHADOWPATH}.$$" "$SHADOWPATH"
test_acl_loop "$TMPXML"
unset CIB_shadow_dir
rm -f "$TMPXML"
}
function test_validity() {
local TMPGOOD
local TMPBAD
TMPGOOD=$(mktemp ${TMPDIR:-/tmp}/cts-cli.validity.good.xml.XXXXXXXXXX)
TMPBAD=$(mktemp ${TMPDIR:-/tmp}/cts-cli.validity.bad.xml.XXXXXXXXXX)
export CIB_shadow_dir="${shadow_dir}"
$VALGRIND_CMD crm_shadow --batch --force --create-empty $shadow --validate-with pacemaker-1.2 2>&1
export CIB_shadow=$shadow
export PCMK_trace_functions=apply_upgrade,update_validation,cli_config_update
export PCMK_stderr=1
cibadmin -C -o resources --xml-text '<primitive id="dummy1" class="ocf" provider="pacemaker" type="Dummy"/>'
cibadmin -C -o resources --xml-text '<primitive id="dummy2" class="ocf" provider="pacemaker" type="Dummy"/>'
cibadmin -C -o constraints --xml-text '<rsc_order id="ord_1-2" first="dummy1" first-action="start" then="dummy2"/>'
cibadmin -Q > "$TMPGOOD"
desc="Try to make resulting CIB invalid (enum violation)"
cmd="cibadmin -M -o constraints --xml-text '<rsc_order id=\"ord_1-2\" first=\"dummy1\" first-action=\"break\" then=\"dummy2\"/>'"
test_assert $CRM_EX_CONFIG
sed 's|"start"|"break"|' "$TMPGOOD" > "$TMPBAD"
desc="Run crm_simulate with invalid CIB (enum violation)"
cmd="crm_simulate -x $TMPBAD -S"
test_assert $CRM_EX_CONFIG 0
desc="Try to make resulting CIB invalid (unrecognized validate-with)"
cmd="cibadmin -M --xml-text '<cib validate-with=\"pacemaker-9999.0\"/>'"
test_assert $CRM_EX_CONFIG
sed 's|"pacemaker-1.2"|"pacemaker-9999.0"|' "$TMPGOOD" > "$TMPBAD"
desc="Run crm_simulate with invalid CIB (unrecognized validate-with)"
cmd="crm_simulate -x $TMPBAD -S"
test_assert $CRM_EX_CONFIG 0
desc="Try to make resulting CIB invalid, but possibly recoverable (valid with X.Y+1)"
cmd="cibadmin -C -o configuration --xml-text '<tags/>'"
test_assert $CRM_EX_CONFIG
sed 's|</configuration>|<tags/></configuration>|' "$TMPGOOD" > "$TMPBAD"
desc="Run crm_simulate with invalid, but possibly recoverable CIB (valid with X.Y+1)"
cmd="crm_simulate -x $TMPBAD -S"
test_assert $CRM_EX_OK 0
sed 's|[ ][ ]*validate-with="[^"]*"||' "$TMPGOOD" > "$TMPBAD"
desc="Make resulting CIB valid, although without validate-with attribute"
cmd="cibadmin -R --xml-file $TMPBAD"
test_assert $CRM_EX_OK
desc="Run crm_simulate with valid CIB, but without validate-with attribute"
cmd="crm_simulate -x $TMPBAD -S"
test_assert $CRM_EX_OK 0
# this will just disable validation and accept the config, outputting
# validation errors
sed -e 's|[ ][ ]*validate-with="[^"]*"||' \
-e 's|\([ ][ ]*epoch="[^"]*\)"|\10"|' -e 's|"start"|"break"|' \
"$TMPGOOD" > "$TMPBAD"
desc="Make resulting CIB invalid, and without validate-with attribute"
cmd="cibadmin -R --xml-file $TMPBAD"
test_assert $CRM_EX_OK
desc="Run crm_simulate with invalid CIB, also without validate-with attribute"
cmd="crm_simulate -x $TMPBAD -S"
test_assert $CRM_EX_OK 0
unset CIB_shadow_dir
rm -f "$TMPGOOD" "$TMPBAD"
}
test_upgrade() {
local TMPXML
TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX)
export CIB_shadow_dir="${shadow_dir}"
$VALGRIND_CMD crm_shadow --batch --force --create-empty $shadow --validate-with pacemaker-2.10 2>&1
export CIB_shadow=$shadow
desc="Set stonith-enabled=false"
cmd="crm_attribute -n stonith-enabled -v false"
test_assert $CRM_EX_OK
cat <<EOF > "$TMPXML"
<resources>
<primitive id="mySmartFuse" class="ocf" provider="experiment" type="SmartFuse">
<operations>
<op id="mySmartFuse-start" name="start" interval="0" timeout="40s"/>
<op id="mySmartFuse-monitor-inputpower" name="monitor" interval="30s">
<instance_attributes id="mySmartFuse-inputpower-instanceparams">
<nvpair id="mySmartFuse-inputpower-requires" name="requires" value="inputpower"/>
</instance_attributes>
</op>
<op id="mySmartFuse-monitor-outputpower" name="monitor" interval="2s">
<instance_attributes id="mySmartFuse-outputpower-instanceparams">
<nvpair id="mySmartFuse-outputpower-requires" name="requires" value="outputpower"/>
</instance_attributes>
</op>
</operations>
<instance_attributes id="mySmartFuse-params">
<nvpair id="mySmartFuse-params-ip" name="ip" value="192.0.2.10"/>
</instance_attributes>
<!-- a bit hairy but valid -->
<instance_attributes id-ref="mySmartFuse-outputpower-instanceparams"/>
</primitive>
</resources>
EOF
desc="Configure the initial resource"
cmd="cibadmin -M -o resources --xml-file $TMPXML"
test_assert $CRM_EX_OK
desc="Upgrade to latest CIB schema (trigger 2.10.xsl + the wrapping)"
cmd="cibadmin --upgrade --force -V -V"
test_assert $CRM_EX_OK
desc="Query a resource instance attribute (shall survive)"
cmd="crm_resource -r mySmartFuse -g requires"
test_assert $CRM_EX_OK
unset CIB_shadow_dir
rm -f "$TMPXML"
}
test_rules() {
local TMPXML
export CIB_shadow_dir="${shadow_dir}"
$VALGRIND_CMD crm_shadow --batch --force --create-empty $shadow 2>&1
export CIB_shadow=$shadow
cibadmin -C -o resources --xml-text '<primitive class="ocf" id="dummy" provider="heartbeat" type="Dummy" />'
TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX)
cat <<EOF > "$TMPXML"
<rsc_location id="cli-too-many-date-expressions" rsc="dummy">
<rule id="cli-rule-too-many-date-expressions" score="INFINITY" boolean-op="or">
<date_expression id="cli-date-expression-1" operation="gt" start="2020-01-01 01:00:00 -0500"/>
<date_expression id="cli-date-expression-2" operation="lt" end="2019-01-01 01:00:00 -0500"/>
</rule>
</rsc_location>
EOF
cibadmin -C -o constraints -x "$TMPXML"
rm -f "$TMPXML"
TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX)
cat <<EOF > "$TMPXML"
<rsc_location id="cli-prefer-dummy-expired" rsc="dummy">
<rule id="cli-prefer-rule-dummy-expired" score="INFINITY">
<date_expression id="cli-prefer-lifetime-end-dummy-expired" operation="lt" end="2019-01-01 12:00:00 -05:00"/>
</rule>
</rsc_location>
EOF
cibadmin -C -o constraints -x "$TMPXML"
rm -f "$TMPXML"
if [ "$(uname)" == "FreeBSD" ]; then
tomorrow=$(date -v+1d +"%F %T %z")
else
tomorrow=$(date --date=tomorrow +"%F %T %z")
fi
TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX)
cat <<EOF > "$TMPXML"
<rsc_location id="cli-prefer-dummy-not-yet" rsc="dummy">
<rule id="cli-prefer-rule-dummy-not-yet" score="INFINITY">
<date_expression id="cli-prefer-lifetime-end-dummy-not-yet" operation="gt" start="${tomorrow}"/>
</rule>
</rsc_location>
EOF
cibadmin -C -o constraints -x "$TMPXML"
rm -f "$TMPXML"
TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX)
cat <<EOF > "$TMPXML"
<rsc_location id="cli-prefer-dummy-date_spec-only-years" rsc="dummy">
<rule id="cli-prefer-rule-dummy-date_spec-only-years" score="INFINITY">
<date_expression id="cli-prefer-dummy-date_spec-only-years-expr" operation="date_spec">
<date_spec id="cli-prefer-dummy-date_spec-only-years-spec" years="2019"/>
</date_expression>
</rule>
</rsc_location>
EOF
cibadmin -C -o constraints -x "$TMPXML"
rm -f "$TMPXML"
TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX)
cat <<EOF > "$TMPXML"
<rsc_location id="cli-prefer-dummy-date_spec-without-years" rsc="dummy">
<rule id="cli-prefer-rule-dummy-date_spec-without-years" score="INFINITY">
<date_expression id="cli-prefer-dummy-date_spec-without-years-expr" operation="date_spec">
<date_spec id="cli-prefer-dummy-date_spec-without-years-spec" hours="20" months="1,3,5,7"/>
</date_expression>
</rule>
</rsc_location>
EOF
cibadmin -C -o constraints -x "$TMPXML"
rm -f "$TMPXML"
TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX)
cat <<EOF > "$TMPXML"
<rsc_location id="cli-prefer-dummy-date_spec-years-moon" rsc="dummy">
<rule id="cli-prefer-rule-dummy-date_spec-years-moon" score="INFINITY">
<date_expression id="cli-prefer-dummy-date_spec-years-moon-expr" operation="date_spec">
<date_spec id="cli-prefer-dummy-date_spec-years-moon-spec" years="2019" moon="1"/>
</date_expression>
</rule>
</rsc_location>
EOF
cibadmin -C -o constraints -x "$TMPXML"
rm -f "$TMPXML"
TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX)
cat <<EOF > "$TMPXML"
<rsc_location id="cli-no-date_expression" rsc="dummy">
<rule id="cli-no-date_expression-rule" score="INFINITY">
<expression id="ban-apache-expr" attribute="#uname" operation="eq" value="node3"/>
</rule>
</rsc_location>
EOF
cibadmin -C -o constraints -x "$TMPXML"
rm -f "$TMPXML"
desc="Try to check a rule that doesn't exist"
cmd="crm_rule -c -r blahblah"
test_assert $CRM_EX_NOSUCH
desc="Try to check a rule that has too many date_expressions"
cmd="crm_rule -c -r cli-rule-too-many-date-expressions"
test_assert $CRM_EX_UNIMPLEMENT_FEATURE
desc="Verify basic rule is expired"
cmd="crm_rule -c -r cli-prefer-rule-dummy-expired"
test_assert $CRM_EX_EXPIRED
desc="Verify basic rule worked in the past"
cmd="crm_rule -c -r cli-prefer-rule-dummy-expired -d 20180101"
test_assert $CRM_EX_OK
desc="Verify basic rule is not yet in effect"
cmd="crm_rule -c -r cli-prefer-rule-dummy-not-yet"
test_assert $CRM_EX_NOT_YET_IN_EFFECT
desc="Verify date_spec rule with years has expired"
cmd="crm_rule -c -r cli-prefer-rule-dummy-date_spec-only-years"
test_assert $CRM_EX_EXPIRED
desc="Verify date_spec rule with years is in effect"
cmd="crm_rule -c -r cli-prefer-rule-dummy-date_spec-only-years -d 20190201"
test_assert $CRM_EX_OK
desc="Try to check a rule whose date_spec does not contain years="
cmd="crm_rule -c -r cli-prefer-rule-dummy-date_spec-without-years"
test_assert $CRM_EX_NOSUCH
desc="Try to check a rule whose date_spec contains years= and moon="
cmd="crm_rule -c -r cli-prefer-rule-dummy-date_spec-years-moon"
test_assert $CRM_EX_NOSUCH
desc="Try to check a rule with no date_expression"
cmd="crm_rule -c -r cli-no-date_expression-rule"
test_assert $CRM_EX_UNIMPLEMENT_FEATURE
unset CIB_shadow_dir
}
# Process command-line arguments
while [ $# -gt 0 ]; do
case "$1" in
-t)
tests="$2"
shift 2
;;
-V|--verbose)
verbose=1
shift
;;
-v|--valgrind)
export G_SLICE=always-malloc
VALGRIND_CMD="valgrind $VALGRIND_OPTS"
shift
;;
-s)
do_save=1
shift
;;
-p)
export PATH="$2:$PATH"
shift
;;
--help)
echo "$USAGE_TEXT"
exit $CRM_EX_OK
;;
*)
echo "error: unknown option $1"
echo
echo "$USAGE_TEXT"
exit $CRM_EX_USAGE
;;
esac
done
for t in $tests; do
case "$t" in
dates) ;;
tools) ;;
acls) ;;
validity) ;;
upgrade) ;;
rules) ;;
crm_mon) ;;
*)
echo "error: unknown test $t"
echo
echo "$USAGE_TEXT"
exit $CRM_EX_USAGE
;;
esac
done
# Check whether we're running from source directory
SRCDIR=$(dirname $test_home)
if [ -x "$SRCDIR/tools/crm_simulate" ]; then
export PATH="$SRCDIR/tools:$PATH"
echo "Using local binaries from: $SRCDIR/tools"
if [ -x "$SRCDIR/xml" ]; then
export PCMK_schema_directory="$SRCDIR/xml"
echo "Using local schemas from: $PCMK_schema_directory"
fi
fi
for t in $tests; do
echo "Testing $t"
TMPFILE=$(mktemp ${TMPDIR:-/tmp}/cts-cli.$t.XXXXXXXXXX)
eval TMPFILE_$t="$TMPFILE"
test_$t > "$TMPFILE"
# last-run= and last-rc-change= are always numeric in the CIB. However,
# for the crm_mon test we also need to compare against the XML output of
# the crm_mon program. There, these are shown as human readable strings
# (like the output of the `date` command).
sed -e 's/cib-last-written.*>/>/'\
-e 's/ last-run=\"[A-Za-z0-9: ]*\"//'\
-e 's/Last updated: .*/Last updated:/' \
-e 's/Last change: .*/Last change:/' \
-e 's/(version .*)/(version)/' \
-e 's/last_update time=\".*\"/last_update time=\"\"/' \
-e 's/last_change time=\".*\"/last_change time=\"\"/' \
-e 's/ version=\".*\" / version=\"\" /' \
-e 's/request=\".*crm_mon/request=\"crm_mon/' \
-e 's/crm_feature_set="[^"]*" //'\
-e 's/validate-with="[^"]*" //'\
-e 's/Created new pacemaker-.* configuration/Created new pacemaker configuration/'\
-e 's/.*\(pcmk__.*\)@.*\.c:[0-9][0-9]*)/\1/g' \
-e 's/.*\(unpack_.*\)@.*\.c:[0-9][0-9]*)/\1/g' \
-e 's/.*\(update_validation\)@.*\.c:[0-9][0-9]*)/\1/g' \
-e 's/.*\(apply_upgrade\)@.*\.c:[0-9][0-9]*)/\1/g' \
-e 's/ last-rc-change=\"[A-Za-z0-9: ]*\"//'\
-e 's|^/tmp/cts-cli\.validity\.bad.xml\.[^:]*:|validity.bad.xml:|'\
-e 's/^Entity: line [0-9][0-9]*: //'\
-e 's/\(validation ([0-9][0-9]* of \)[0-9][0-9]*\().*\)/\1X\2/' \
-e 's/^Migration will take effect until: .*/Migration will take effect until:/' \
-e 's/ end=\"[0-9][-+: 0-9]*Z*\"/ end=\"\"/' \
-e 's/ start=\"[0-9][-+: 0-9]*Z*\"/ start=\"\"/' \
-e 's/^Error checking rule: Device not configured/Error checking rule: No such device or address/' \
"$TMPFILE" > "${TMPFILE}.$$"
mv -- "${TMPFILE}.$$" "$TMPFILE"
if [ $do_save -eq 1 ]; then
cp "$TMPFILE" $test_home/cli/regression.$t.exp
fi
done
rm -rf "${shadow_dir}"
failed=0
if [ $verbose -eq 1 ]; then
echo -e "\n\nResults"
fi
for t in $tests; do
eval TMPFILE="\$TMPFILE_$t"
if [ $verbose -eq 1 ]; then
diff -wu $test_home/cli/regression.$t.exp "$TMPFILE"
else
diff -w $test_home/cli/regression.$t.exp "$TMPFILE" >/dev/null 2>&1
fi
if [ $? -ne 0 ]; then
failed=1
fi
done
echo -e "\n\nSummary"
for t in $tests; do
eval TMPFILE="\$TMPFILE_$t"
grep -e '^\* \(Passed\|Failed\)' "$TMPFILE"
done
if [ $num_errors -ne 0 ]; then
echo "$num_errors tests failed; see output in:"
for t in $tests; do
eval TMPFILE="\$TMPFILE_$t"
echo " $TMPFILE"
done
exit $CRM_EX_ERROR
elif [ $failed -eq 1 ]; then
echo "$num_passed tests passed but output was unexpected; see output in:"
for t in $tests; do
eval TMPFILE="\$TMPFILE_$t"
echo " $TMPFILE"
done
exit $CRM_EX_DIGEST
else
echo $num_passed tests passed
for t in $tests; do
eval TMPFILE="\$TMPFILE_$t"
rm -f "$TMPFILE"
done
crm_shadow --force --delete $shadow >/dev/null 2>&1
exit $CRM_EX_OK
fi

File Metadata

Mime Type
text/x-diff
Expires
Sat, Nov 23, 6:54 AM (1 d, 19 h)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
1018335
Default Alt Text
(269 KB)

Event Timeline