diff --git a/cts/cli/crm_mon-partial.xml b/cts/cli/crm_mon-partial.xml
index bc60e3a22a..3e76836c9c 100644
--- a/cts/cli/crm_mon-partial.xml
+++ b/cts/cli/crm_mon-partial.xml
@@ -1,144 +1,155 @@
+
+
+
+
+
+
+
+
+
+
+
diff --git a/cts/cli/crm_mon.xml b/cts/cli/crm_mon.xml
index ffb2f80d8e..d8d5d35614 100644
--- a/cts/cli/crm_mon.xml
+++ b/cts/cli/crm_mon.xml
@@ -1,197 +1,197 @@
-
-
-
+
+
+
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
-
+
diff --git a/cts/cli/regression.crm_mon.exp b/cts/cli/regression.crm_mon.exp
index b36ffa25f5..7e0ec068d0 100644
--- a/cts/cli/regression.crm_mon.exp
+++ b/cts/cli/regression.crm_mon.exp
@@ -1,3030 +1,3118 @@
=#=#=#= Begin test: Basic text output =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 26 resource instances configured (1 DISABLED)
+ * 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Active Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
* Fencing (stonith:fence_xvm): Started cluster01
* dummy (ocf::pacemaker:Dummy): Started cluster02
* Resource Group: exim-group:
* Public-IP (ocf::heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
* Clone Set: mysql-clone-group [mysql-group]:
* Started: [ cluster01 cluster02 ]
=#=#=#= End test: Basic text output - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output
=#=#=#= Begin test: XML output =#=#=#=
-
+
-
-
+
+
-
+
-
-
-
-
+
+
+
+
+
+
+
-
+
-
-
+
+
-
+
-
+
-
+
-
+
=#=#=#= End test: XML output - OK (0) =#=#=#=
* Passed: crm_mon - XML output
=#=#=#= Begin test: Basic text output without node section =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 26 resource instances configured (1 DISABLED)
+ * 27 resource instances configured (4 DISABLED)
Active Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
* Fencing (stonith:fence_xvm): Started cluster01
* dummy (ocf::pacemaker:Dummy): Started cluster02
* Resource Group: exim-group:
* Public-IP (ocf::heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
* Clone Set: mysql-clone-group [mysql-group]:
* Started: [ cluster01 cluster02 ]
=#=#=#= End test: Basic text output without node section - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output without node section
=#=#=#= Begin test: XML output without the node section =#=#=#=
-
+
-
-
+
+
-
+
-
-
-
-
+
+
+
+
+
+
+
-
+
-
-
+
+
-
+
-
+
-
+
-
+
=#=#=#= End test: XML output without the node section - OK (0) =#=#=#=
* Passed: crm_mon - XML output without the node section
=#=#=#= Begin test: Text output with only the node section =#=#=#=
Node List:
* Online: [ cluster01 cluster02 ]
=#=#=#= End test: Text output with only the node section - OK (0) =#=#=#=
* Passed: crm_mon - Text output with only the node section
=#=#=#= Begin test: Complete text output =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 26 resource instances configured (1 DISABLED)
+ * 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Active Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
* Fencing (stonith:fence_xvm): Started cluster01
* dummy (ocf::pacemaker:Dummy): Started cluster02
* Resource Group: exim-group:
* Public-IP (ocf::heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
* Clone Set: mysql-clone-group [mysql-group]:
* Started: [ cluster01 cluster02 ]
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* dummy: migration-threshold=1000000:
* (18) start
* (19) monitor: interval="60000ms"
* Public-IP: migration-threshold=1000000:
* (2) start
* Email: migration-threshold=1000000:
* (2) start
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* Node: cluster01:
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
* Fencing: migration-threshold=1000000:
* (15) start
* (19) monitor: interval="60000ms"
* dummy: migration-threshold=1000000:
* (16) stop
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
Negative Location Constraints:
* not-on-cluster1 prevents dummy from running on cluster01
=#=#=#= End test: Complete text output - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output
=#=#=#= Begin test: Complete text output with detail =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (2) (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 26 resource instances configured (1 DISABLED)
+ * 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 (1) cluster02 (2) ]
Active Resources:
* Clone Set: ping-clone [ping]:
* ping (ocf::pacemaker:ping): Started cluster02
* ping (ocf::pacemaker:ping): Started cluster01
* Fencing (stonith:fence_xvm): Started cluster01
* dummy (ocf::pacemaker:Dummy): Started cluster02
* Resource Group: exim-group:
* Public-IP (ocf::heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
* Clone Set: mysql-clone-group [mysql-group]:
* Resource Group: mysql-group:0:
* mysql-proxy (lsb:mysql-proxy): Started cluster02
* Resource Group: mysql-group:1:
* mysql-proxy (lsb:mysql-proxy): Started cluster01
* Resource Group: mysql-group:2:
* mysql-proxy (lsb:mysql-proxy): Stopped
* Resource Group: mysql-group:3:
* mysql-proxy (lsb:mysql-proxy): Stopped
* Resource Group: mysql-group:4:
* mysql-proxy (lsb:mysql-proxy): Stopped
Node Attributes:
* Node: cluster01 (1):
* location : office
* pingd : 1000
* Node: cluster02 (2):
* pingd : 1000
Operations:
* Node: cluster02 (2):
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* dummy: migration-threshold=1000000:
* (18) start
* (19) monitor: interval="60000ms"
* Public-IP: migration-threshold=1000000:
* (2) start
* Email: migration-threshold=1000000:
* (2) start
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* Node: cluster01 (1):
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
* Fencing: migration-threshold=1000000:
* (15) start
* (19) monitor: interval="60000ms"
* dummy: migration-threshold=1000000:
* (16) stop
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
Negative Location Constraints:
* not-on-cluster1 prevents dummy from running on cluster01 (1)
=#=#=#= End test: Complete text output with detail - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output with detail
=#=#=#= Begin test: Complete brief text output =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 26 resource instances configured (1 DISABLED)
+ * 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Active Resources:
* 1 (ocf::pacemaker:Dummy): Active cluster02
* 1 (stonith:fence_xvm): Active cluster01
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
* Resource Group: exim-group:
* 1/1 (lsb:exim): Active cluster02
* 1/1 (ocf::heartbeat:IPaddr): Active cluster02
* Clone Set: mysql-clone-group [mysql-group]:
* Started: [ cluster01 cluster02 ]
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* dummy: migration-threshold=1000000:
* (18) start
* (19) monitor: interval="60000ms"
* Public-IP: migration-threshold=1000000:
* (2) start
* Email: migration-threshold=1000000:
* (2) start
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* Node: cluster01:
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
* Fencing: migration-threshold=1000000:
* (15) start
* (19) monitor: interval="60000ms"
* dummy: migration-threshold=1000000:
* (16) stop
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
Negative Location Constraints:
* not-on-cluster1 prevents dummy from running on cluster01
=#=#=#= End test: Complete brief text output - OK (0) =#=#=#=
* Passed: crm_mon - Complete brief text output
=#=#=#= Begin test: Complete text output grouped by node =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 26 resource instances configured (1 DISABLED)
+ * 27 resource instances configured (4 DISABLED)
Node List:
* Node cluster01: online:
* Resources:
* ping (ocf::pacemaker:ping): Started
* Fencing (stonith:fence_xvm): Started
* mysql-proxy (lsb:mysql-proxy): Started
* Node cluster02: online:
* Resources:
* ping (ocf::pacemaker:ping): Started
* dummy (ocf::pacemaker:Dummy): Started
* Public-IP (ocf::heartbeat:IPaddr): Started
* Email (lsb:exim): Started
* mysql-proxy (lsb:mysql-proxy): Started
* GuestNode httpd-bundle-0@: OFFLINE:
* Resources:
* GuestNode httpd-bundle-1@: OFFLINE:
* Resources:
* GuestNode httpd-bundle-2@: OFFLINE:
* Resources:
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* dummy: migration-threshold=1000000:
* (18) start
* (19) monitor: interval="60000ms"
* Public-IP: migration-threshold=1000000:
* (2) start
* Email: migration-threshold=1000000:
* (2) start
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* Node: cluster01:
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
* Fencing: migration-threshold=1000000:
* (15) start
* (19) monitor: interval="60000ms"
* dummy: migration-threshold=1000000:
* (16) stop
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
Negative Location Constraints:
* not-on-cluster1 prevents dummy from running on cluster01
=#=#=#= End test: Complete text output grouped by node - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output grouped by node
=#=#=#= Begin test: Complete brief text output grouped by node =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 26 resource instances configured (1 DISABLED)
+ * 27 resource instances configured (4 DISABLED)
Node List:
* Node cluster01: online:
* Resources:
* 1 (lsb:mysql-proxy): Active
* 1 (ocf::pacemaker:ping): Active
* 1 (stonith:fence_xvm): Active
* Node cluster02: online:
* Resources:
* 1 (lsb:exim): Active
* 1 (lsb:mysql-proxy): Active
* 1 (ocf::heartbeat:IPaddr): Active
* 1 (ocf::pacemaker:Dummy): Active
* 1 (ocf::pacemaker:ping): Active
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* dummy: migration-threshold=1000000:
* (18) start
* (19) monitor: interval="60000ms"
* Public-IP: migration-threshold=1000000:
* (2) start
* Email: migration-threshold=1000000:
* (2) start
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* Node: cluster01:
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
* Fencing: migration-threshold=1000000:
* (15) start
* (19) monitor: interval="60000ms"
* dummy: migration-threshold=1000000:
* (16) stop
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
Negative Location Constraints:
* not-on-cluster1 prevents dummy from running on cluster01
=#=#=#= End test: Complete brief text output grouped by node - OK (0) =#=#=#=
* Passed: crm_mon - Complete brief text output grouped by node
=#=#=#= Begin test: XML output grouped by node =#=#=#=
-
+
-
-
+
+
-
-
-
-
+
+
+
+
+
+
+
-
-
+
+
-
+
-
+
-
+
-
+
=#=#=#= End test: XML output grouped by node - OK (0) =#=#=#=
* Passed: crm_mon - XML output grouped by node
=#=#=#= Begin test: Complete text output filtered by node =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 26 resource instances configured (1 DISABLED)
+ * 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 ]
Active Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 ]
* Fencing (stonith:fence_xvm): Started cluster01
* Clone Set: mysql-clone-group [mysql-group]:
* Started: [ cluster01 ]
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
Operations:
* Node: cluster01:
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
* Fencing: migration-threshold=1000000:
* (15) start
* (19) monitor: interval="60000ms"
* dummy: migration-threshold=1000000:
* (16) stop
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
Negative Location Constraints:
* not-on-cluster1 prevents dummy from running on cluster01
=#=#=#= End test: Complete text output filtered by node - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output filtered by node
=#=#=#= Begin test: XML output filtered by node =#=#=#=
-
+
-
-
+
+
-
+
-
-
-
-
+
+
+
+
+
+
+
-
-
+
+
-
+
-
+
-
+
=#=#=#= End test: XML output filtered by node - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by node
=#=#=#= Begin test: Complete text output filtered by tag =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 26 resource instances configured (1 DISABLED)
+ * 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster02 ]
Active Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster02 ]
* dummy (ocf::pacemaker:Dummy): Started cluster02
* Resource Group: exim-group:
* Public-IP (ocf::heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
* Clone Set: mysql-clone-group [mysql-group]:
* Started: [ cluster02 ]
Node Attributes:
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* dummy: migration-threshold=1000000:
* (18) start
* (19) monitor: interval="60000ms"
* Public-IP: migration-threshold=1000000:
* (2) start
* Email: migration-threshold=1000000:
* (2) start
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
Negative Location Constraints:
* not-on-cluster1 prevents dummy from running on cluster01
=#=#=#= End test: Complete text output filtered by tag - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output filtered by tag
=#=#=#= Begin test: XML output filtered by tag =#=#=#=
-
+
-
-
+
+
-
+
-
-
-
-
+
+
+
+
+
+
+
-
+
-
-
+
+
-
+
-
+
-
+
=#=#=#= End test: XML output filtered by tag - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by tag
=#=#=#= Begin test: Complete text output filtered by resource tag =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 26 resource instances configured (1 DISABLED)
+ * 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Active Resources:
* Fencing (stonith:fence_xvm): Started cluster01
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster01:
* Fencing: migration-threshold=1000000:
* (15) start
* (19) monitor: interval="60000ms"
=#=#=#= End test: Complete text output filtered by resource tag - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output filtered by resource tag
=#=#=#= Begin test: XML output filtered by resource tag =#=#=#=
-
+
-
-
+
+
=#=#=#= End test: XML output filtered by resource tag - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by resource tag
=#=#=#= Begin test: Basic text output filtered by node that doesn't exist =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 26 resource instances configured (1 DISABLED)
+ * 27 resource instances configured (4 DISABLED)
Active Resources:
* No active resources
=#=#=#= End test: Basic text output filtered by node that doesn't exist - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output filtered by node that doesn't exist
=#=#=#= Begin test: XML output filtered by node that doesn't exist =#=#=#=
-
+
-
-
+
+
-
-
-
-
+
+
+
+
+
+
+
=#=#=#= End test: XML output filtered by node that doesn't exist - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by node that doesn't exist
=#=#=#= Begin test: Basic text output with inactive resources =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 26 resource instances configured (1 DISABLED)
+ * 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Full List of Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
* Fencing (stonith:fence_xvm): Started cluster01
* dummy (ocf::pacemaker:Dummy): Started cluster02
- * inactive-dummy (ocf::pacemaker:Dummy): Stopped (disabled)
- * Clone Set: inactive-clone-master [inactive-clone] (promotable):
- * Stopped: [ cluster01 cluster02 ]
+ * Clone Set: inactive-clone [inactive-dhcpd] (disabled):
+ * Stopped (disabled): [ cluster01 cluster02 ]
+ * Resource Group: inactive-group (disabled):
+ * inactive-dummy-1 (ocf::pacemaker:Dummy): Stopped (disabled)
+ * inactive-dummy-2 (ocf::pacemaker:Dummy): Stopped (disabled)
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf::heartbeat:apache): Stopped
* httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped
* httpd-bundle-2 (192.168.122.133) (ocf::heartbeat:apache): Stopped
* Resource Group: exim-group:
* Public-IP (ocf::heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
* Clone Set: mysql-clone-group [mysql-group]:
* Started: [ cluster01 cluster02 ]
=#=#=#= End test: Basic text output with inactive resources - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output with inactive resources
=#=#=#= Begin test: Basic text output with inactive resources, filtered by node =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 26 resource instances configured (1 DISABLED)
+ * 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster02 ]
Full List of Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster02 ]
* dummy (ocf::pacemaker:Dummy): Started cluster02
- * inactive-dummy (ocf::pacemaker:Dummy): Stopped (disabled)
- * Clone Set: inactive-clone-master [inactive-clone] (promotable):
- * Stopped: [ cluster02 ]
+ * Clone Set: inactive-clone [inactive-dhcpd] (disabled):
+ * Stopped (disabled): [ cluster02 ]
+ * Resource Group: inactive-group (disabled):
+ * inactive-dummy-1 (ocf::pacemaker:Dummy): Stopped (disabled)
+ * inactive-dummy-2 (ocf::pacemaker:Dummy): Stopped (disabled)
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf::heartbeat:apache): Stopped
* httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped
* httpd-bundle-2 (192.168.122.133) (ocf::heartbeat:apache): Stopped
* Resource Group: exim-group:
* Public-IP (ocf::heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
* Clone Set: mysql-clone-group [mysql-group]:
* Started: [ cluster02 ]
=#=#=#= End test: Basic text output with inactive resources, filtered by node - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output with inactive resources, filtered by node
=#=#=#= Begin test: Complete text output filtered by primitive resource =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 26 resource instances configured (1 DISABLED)
+ * 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Active Resources:
* Fencing (stonith:fence_xvm): Started cluster01
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster01:
* Fencing: migration-threshold=1000000:
* (15) start
* (19) monitor: interval="60000ms"
=#=#=#= End test: Complete text output filtered by primitive resource - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output filtered by primitive resource
=#=#=#= Begin test: XML output filtered by primitive resource =#=#=#=
-
+
-
-
+
+
=#=#=#= End test: XML output filtered by primitive resource - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by primitive resource
=#=#=#= Begin test: Complete text output filtered by group resource =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 26 resource instances configured (1 DISABLED)
+ * 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Active Resources:
* Resource Group: exim-group:
* Public-IP (ocf::heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* Public-IP: migration-threshold=1000000:
* (2) start
* Email: migration-threshold=1000000:
* (2) start
=#=#=#= End test: Complete text output filtered by group resource - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output filtered by group resource
=#=#=#= Begin test: XML output filtered by group resource =#=#=#=
-
+
-
-
+
+
-
+
=#=#=#= End test: XML output filtered by group resource - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by group resource
=#=#=#= Begin test: Complete text output filtered by group resource member =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 26 resource instances configured (1 DISABLED)
+ * 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Active Resources:
* Resource Group: exim-group:
* Public-IP (ocf::heartbeat:IPaddr): Started cluster02
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* Public-IP: migration-threshold=1000000:
* (2) start
=#=#=#= End test: Complete text output filtered by group resource member - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output filtered by group resource member
=#=#=#= Begin test: XML output filtered by group resource member =#=#=#=
-
+
-
-
+
+
-
+
=#=#=#= End test: XML output filtered by group resource member - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by group resource member
=#=#=#= Begin test: Complete text output filtered by clone resource =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 26 resource instances configured (1 DISABLED)
+ * 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Active Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* Node: cluster01:
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
=#=#=#= End test: Complete text output filtered by clone resource - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output filtered by clone resource
=#=#=#= Begin test: XML output filtered by clone resource =#=#=#=
-
+
-
-
+
+
-
+
=#=#=#= End test: XML output filtered by clone resource - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by clone resource
=#=#=#= Begin test: Complete text output filtered by clone resource instance =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 26 resource instances configured (1 DISABLED)
+ * 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Active Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* Node: cluster01:
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
=#=#=#= End test: Complete text output filtered by clone resource instance - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output filtered by clone resource instance
=#=#=#= Begin test: XML output filtered by clone resource instance =#=#=#=
-
+
-
-
+
+
-
+
=#=#=#= End test: XML output filtered by clone resource instance - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by clone resource instance
=#=#=#= Begin test: Complete text output filtered by exact clone resource instance =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (2) (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 26 resource instances configured (1 DISABLED)
+ * 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 (1) cluster02 (2) ]
Active Resources:
* Clone Set: ping-clone [ping]:
* ping (ocf::pacemaker:ping): Started cluster02
Node Attributes:
* Node: cluster01 (1):
* location : office
* pingd : 1000
* Node: cluster02 (2):
* pingd : 1000
Operations:
* Node: cluster02 (2):
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* Node: cluster01 (1):
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
=#=#=#= End test: Complete text output filtered by exact clone resource instance - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output filtered by exact clone resource instance
=#=#=#= Begin test: XML output filtered by exact clone resource instance =#=#=#=
-
+
-
-
+
+
-
+
=#=#=#= End test: XML output filtered by exact clone resource instance - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by exact clone resource instance
=#=#=#= Begin test: Basic text output filtered by resource that doesn't exist =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 26 resource instances configured (1 DISABLED)
+ * 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Active Resources:
* No active resources
=#=#=#= End test: Basic text output filtered by resource that doesn't exist - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output filtered by resource that doesn't exist
=#=#=#= Begin test: XML output filtered by resource that doesn't exist =#=#=#=
-
+
-
-
+
+
=#=#=#= End test: XML output filtered by resource that doesn't exist - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by resource that doesn't exist
=#=#=#= Begin test: Basic text output with inactive resources, filtered by tag =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 26 resource instances configured (1 DISABLED)
+ * 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Full List of Resources:
- * inactive-dummy (ocf::pacemaker:Dummy): Stopped (disabled)
- * Clone Set: inactive-clone-master [inactive-clone] (promotable):
- * Stopped: [ cluster01 cluster02 ]
+ * Clone Set: inactive-clone [inactive-dhcpd] (disabled):
+ * Stopped (disabled): [ cluster01 cluster02 ]
+ * Resource Group: inactive-group (disabled):
+ * inactive-dummy-1 (ocf::pacemaker:Dummy): Stopped (disabled)
+ * inactive-dummy-2 (ocf::pacemaker:Dummy): Stopped (disabled)
=#=#=#= End test: Basic text output with inactive resources, filtered by tag - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output with inactive resources, filtered by tag
=#=#=#= Begin test: Basic text output with inactive resources, filtered by bundle resource =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 26 resource instances configured (1 DISABLED)
+ * 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Full List of Resources:
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf::heartbeat:apache): Stopped
* httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped
* httpd-bundle-2 (192.168.122.133) (ocf::heartbeat:apache): Stopped
=#=#=#= End test: Basic text output with inactive resources, filtered by bundle resource - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output with inactive resources, filtered by bundle resource
=#=#=#= Begin test: XML output filtered by inactive bundle resource =#=#=#=
-
+
-
-
+
+
=#=#=#= End test: XML output filtered by inactive bundle resource - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by inactive bundle resource
=#=#=#= Begin test: Basic text output with inactive resources, filtered by bundled IP address resource =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 26 resource instances configured (1 DISABLED)
+ * 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Full List of Resources:
* Container bundle set: httpd-bundle [pcmk:http]:
* Replica[0]
* httpd-bundle-ip-192.168.122.131 (ocf::heartbeat:IPaddr2): Stopped
=#=#=#= End test: Basic text output with inactive resources, filtered by bundled IP address resource - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output with inactive resources, filtered by bundled IP address resource
=#=#=#= Begin test: XML output filtered by bundled IP address resource =#=#=#=
-
+
-
-
+
+
=#=#=#= End test: XML output filtered by bundled IP address resource - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by bundled IP address resource
=#=#=#= Begin test: Basic text output with inactive resources, filtered by bundled container =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 26 resource instances configured (1 DISABLED)
+ * 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Full List of Resources:
* Container bundle set: httpd-bundle [pcmk:http]:
* Replica[1]
* httpd-bundle-docker-1 (ocf::heartbeat:docker): Stopped
=#=#=#= End test: Basic text output with inactive resources, filtered by bundled container - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output with inactive resources, filtered by bundled container
=#=#=#= Begin test: XML output filtered by bundled container =#=#=#=
-
+
-
-
+
+
=#=#=#= End test: XML output filtered by bundled container - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by bundled container
=#=#=#= Begin test: Basic text output with inactive resources, filtered by bundle connection =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 26 resource instances configured (1 DISABLED)
+ * 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Full List of Resources:
* Container bundle set: httpd-bundle [pcmk:http]:
* Replica[0]
* httpd-bundle-0 (ocf::pacemaker:remote): Stopped
=#=#=#= End test: Basic text output with inactive resources, filtered by bundle connection - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output with inactive resources, filtered by bundle connection
=#=#=#= Begin test: XML output filtered by bundle connection =#=#=#=
-
+
-
-
+
+
=#=#=#= End test: XML output filtered by bundle connection - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by bundle connection
=#=#=#= Begin test: Basic text output with inactive resources, filtered by bundled primitive resource =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 26 resource instances configured (1 DISABLED)
+ * 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Full List of Resources:
* Container bundle set: httpd-bundle [pcmk:http]:
* Replica[0]
* httpd (ocf::heartbeat:apache): Stopped
* Replica[1]
* httpd (ocf::heartbeat:apache): Stopped
* Replica[2]
* httpd (ocf::heartbeat:apache): Stopped
=#=#=#= End test: Basic text output with inactive resources, filtered by bundled primitive resource - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output with inactive resources, filtered by bundled primitive resource
=#=#=#= Begin test: XML output filtered by bundled primitive resource =#=#=#=
-
+
-
-
+
+
=#=#=#= End test: XML output filtered by bundled primitive resource - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by bundled primitive resource
=#=#=#= Begin test: Complete text output, filtered by clone name in cloned group =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (2) (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 26 resource instances configured (1 DISABLED)
+ * 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 (1) cluster02 (2) ]
Active Resources:
* Clone Set: mysql-clone-group [mysql-group]:
* Resource Group: mysql-group:0:
* mysql-proxy (lsb:mysql-proxy): Started cluster02
* Resource Group: mysql-group:1:
* mysql-proxy (lsb:mysql-proxy): Started cluster01
* Resource Group: mysql-group:2:
* mysql-proxy (lsb:mysql-proxy): Stopped
* Resource Group: mysql-group:3:
* mysql-proxy (lsb:mysql-proxy): Stopped
* Resource Group: mysql-group:4:
* mysql-proxy (lsb:mysql-proxy): Stopped
Node Attributes:
* Node: cluster01 (1):
* location : office
* pingd : 1000
* Node: cluster02 (2):
* pingd : 1000
Operations:
* Node: cluster02 (2):
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* Node: cluster01 (1):
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
=#=#=#= End test: Complete text output, filtered by clone name in cloned group - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output, filtered by clone name in cloned group
=#=#=#= Begin test: XML output, filtered by clone name in cloned group =#=#=#=
-
+
-
-
+
+
-
+
=#=#=#= End test: XML output, filtered by clone name in cloned group - OK (0) =#=#=#=
* Passed: crm_mon - XML output, filtered by clone name in cloned group
=#=#=#= Begin test: Complete text output, filtered by group name in cloned group =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (2) (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 26 resource instances configured (1 DISABLED)
+ * 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 (1) cluster02 (2) ]
Active Resources:
* Clone Set: mysql-clone-group [mysql-group]:
* Resource Group: mysql-group:0:
* mysql-proxy (lsb:mysql-proxy): Started cluster02
* Resource Group: mysql-group:1:
* mysql-proxy (lsb:mysql-proxy): Started cluster01
* Resource Group: mysql-group:2:
* mysql-proxy (lsb:mysql-proxy): Stopped
* Resource Group: mysql-group:3:
* mysql-proxy (lsb:mysql-proxy): Stopped
* Resource Group: mysql-group:4:
* mysql-proxy (lsb:mysql-proxy): Stopped
Node Attributes:
* Node: cluster01 (1):
* location : office
* pingd : 1000
* Node: cluster02 (2):
* pingd : 1000
Operations:
* Node: cluster02 (2):
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* Node: cluster01 (1):
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
=#=#=#= End test: Complete text output, filtered by group name in cloned group - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output, filtered by group name in cloned group
=#=#=#= Begin test: XML output, filtered by group name in cloned group =#=#=#=
-
+
-
-
+
+
-
-
+
+
-
+
-
+
-
+
-
+
=#=#=#= End test: XML output, filtered by group name in cloned group - OK (0) =#=#=#=
* Passed: crm_mon - XML output, filtered by group name in cloned group
=#=#=#= Begin test: Complete text output, filtered by exact group instance name in cloned group =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (2) (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 26 resource instances configured (1 DISABLED)
+ * 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 (1) cluster02 (2) ]
Active Resources:
* Clone Set: mysql-clone-group [mysql-group]:
* Resource Group: mysql-group:1:
* mysql-proxy (lsb:mysql-proxy): Started cluster01
Node Attributes:
* Node: cluster01 (1):
* location : office
* pingd : 1000
* Node: cluster02 (2):
* pingd : 1000
Operations:
* Node: cluster02 (2):
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* Node: cluster01 (1):
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
=#=#=#= End test: Complete text output, filtered by exact group instance name in cloned group - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output, filtered by exact group instance name in cloned group
=#=#=#= Begin test: XML output, filtered by exact group instance name in cloned group =#=#=#=
-
+
-
-
+
+
-
-
+
+
=#=#=#= End test: XML output, filtered by exact group instance name in cloned group - OK (0) =#=#=#=
* Passed: crm_mon - XML output, filtered by exact group instance name in cloned group
=#=#=#= Begin test: Complete text output, filtered by primitive name in cloned group =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (2) (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 26 resource instances configured (1 DISABLED)
+ * 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 (1) cluster02 (2) ]
Active Resources:
* Clone Set: mysql-clone-group [mysql-group]:
* Resource Group: mysql-group:0:
* mysql-proxy (lsb:mysql-proxy): Started cluster02
* Resource Group: mysql-group:1:
* mysql-proxy (lsb:mysql-proxy): Started cluster01
* Resource Group: mysql-group:2:
* mysql-proxy (lsb:mysql-proxy): Stopped
* Resource Group: mysql-group:3:
* mysql-proxy (lsb:mysql-proxy): Stopped
* Resource Group: mysql-group:4:
* mysql-proxy (lsb:mysql-proxy): Stopped
Node Attributes:
* Node: cluster01 (1):
* location : office
* pingd : 1000
* Node: cluster02 (2):
* pingd : 1000
Operations:
* Node: cluster02 (2):
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* Node: cluster01 (1):
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
=#=#=#= End test: Complete text output, filtered by primitive name in cloned group - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output, filtered by primitive name in cloned group
=#=#=#= Begin test: XML output, filtered by primitive name in cloned group =#=#=#=
-
+
-
-
+
+
-
-
+
+
-
+
-
+
-
+
-
+
=#=#=#= End test: XML output, filtered by primitive name in cloned group - OK (0) =#=#=#=
* Passed: crm_mon - XML output, filtered by primitive name in cloned group
=#=#=#= Begin test: Complete text output, filtered by exact primitive instance name in cloned group =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (2) (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 26 resource instances configured (1 DISABLED)
+ * 27 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 (1) cluster02 (2) ]
Active Resources:
* Clone Set: mysql-clone-group [mysql-group]:
* Resource Group: mysql-group:1:
* mysql-proxy (lsb:mysql-proxy): Started cluster01
Node Attributes:
* Node: cluster01 (1):
* location : office
* pingd : 1000
* Node: cluster02 (2):
* pingd : 1000
Operations:
* Node: cluster02 (2):
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* Node: cluster01 (1):
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
=#=#=#= End test: Complete text output, filtered by exact primitive instance name in cloned group - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output, filtered by exact primitive instance name in cloned group
=#=#=#= Begin test: XML output, filtered by exact primitive instance name in cloned group =#=#=#=
-
+
-
-
+
+
-
-
+
+
=#=#=#= End test: XML output, filtered by exact primitive instance name in cloned group - OK (0) =#=#=#=
* Passed: crm_mon - XML output, filtered by exact primitive instance name in cloned group
=#=#=#= Begin test: Text output of partially active resources =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 4 nodes configured
- * 11 resource instances configured
+ * 13 resource instances configured (1 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
* GuestOnline: [ httpd-bundle-0@cluster02 httpd-bundle-1@cluster01 ]
Active Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 ]
* Fencing (stonith:fence_xvm): Started cluster01
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf::heartbeat:apache): Started cluster02
* httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped cluster01
+ * Resource Group: partially-active-group:
+ * dummy-1 (ocf::pacemaker:Dummy): Started cluster02
+ * dummy-2 (ocf::pacemaker:Dummy): Stopped (disabled)
=#=#=#= End test: Text output of partially active resources - OK (0) =#=#=#=
* Passed: crm_mon - Text output of partially active resources
=#=#=#= Begin test: XML output of partially active resources =#=#=#=
-
+
-
-
+
+
-
+
-
+
+
+
+
+
+
+
+
+
+
=#=#=#= End test: XML output of partially active resources - OK (0) =#=#=#=
* Passed: crm_mon - XML output of partially active resources
=#=#=#= Begin test: Text output of partially active resources, with inactive resources =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 4 nodes configured
- * 11 resource instances configured
+ * 13 resource instances configured (1 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
* GuestOnline: [ httpd-bundle-0@cluster02 httpd-bundle-1@cluster01 ]
Full List of Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 ]
* Stopped: [ cluster02 ]
* Fencing (stonith:fence_xvm): Started cluster01
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf::heartbeat:apache): Started cluster02
* httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped cluster01
+ * Resource Group: partially-active-group:
+ * dummy-1 (ocf::pacemaker:Dummy): Started cluster02
+ * dummy-2 (ocf::pacemaker:Dummy): Stopped (disabled)
=#=#=#= End test: Text output of partially active resources, with inactive resources - OK (0) =#=#=#=
* Passed: crm_mon - Text output of partially active resources, with inactive resources
=#=#=#= Begin test: Complete brief text output, with inactive resources =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 4 nodes configured
- * 11 resource instances configured
+ * 13 resource instances configured (1 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
* GuestOnline: [ httpd-bundle-0@cluster02 httpd-bundle-1@cluster01 ]
Full List of Resources:
* 1/1 (stonith:fence_xvm): Active cluster01
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 ]
* Stopped: [ cluster02 ]
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf::heartbeat:apache): Started cluster02
* httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped cluster01
+ * Resource Group: partially-active-group:
+ * 1/2 (ocf::pacemaker:Dummy): Active cluster02
Node Attributes:
* Node: cluster01:
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* httpd-bundle-ip-192.168.122.131: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-docker-0: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-0: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="30000ms"
+ * dummy-1: migration-threshold=1000000:
+ * (2) start
* Node: cluster01:
* Fencing: migration-threshold=1000000:
* (15) start
* (20) monitor: interval="60000ms"
* ping: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* httpd-bundle-ip-192.168.122.132: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-docker-1: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-1: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="30000ms"
* Node: httpd-bundle-0@cluster02:
* httpd: migration-threshold=1000000:
* (1) start
=#=#=#= End test: Complete brief text output, with inactive resources - OK (0) =#=#=#=
* Passed: crm_mon - Complete brief text output, with inactive resources
=#=#=#= Begin test: Complete brief text output grouped by node, with inactive resources =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 4 nodes configured
- * 11 resource instances configured
+ * 13 resource instances configured (1 DISABLED)
Node List:
* Node cluster01: online:
* Resources:
* 1 (ocf::heartbeat:IPaddr2): Active
* 1 (ocf::heartbeat:docker): Active
* 1 (ocf::pacemaker:ping): Active
* 1 (ocf::pacemaker:remote): Active
* 1 (stonith:fence_xvm): Active
* Node cluster02: online:
* Resources:
* 1 (ocf::heartbeat:IPaddr2): Active
* 1 (ocf::heartbeat:docker): Active
+ * 1 (ocf::pacemaker:Dummy): Active
* 1 (ocf::pacemaker:remote): Active
* GuestNode httpd-bundle-0@cluster02: online:
* Resources:
* 1 (ocf::heartbeat:apache): Active
Inactive Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 ]
* Stopped: [ cluster02 ]
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf::heartbeat:apache): Started cluster02
* httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped cluster01
+ * Resource Group: partially-active-group:
+ * 1/2 (ocf::pacemaker:Dummy): Active cluster02
Node Attributes:
* Node: cluster01:
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* httpd-bundle-ip-192.168.122.131: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-docker-0: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-0: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="30000ms"
+ * dummy-1: migration-threshold=1000000:
+ * (2) start
* Node: cluster01:
* Fencing: migration-threshold=1000000:
* (15) start
* (20) monitor: interval="60000ms"
* ping: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* httpd-bundle-ip-192.168.122.132: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-docker-1: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-1: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="30000ms"
* Node: httpd-bundle-0@cluster02:
* httpd: migration-threshold=1000000:
* (1) start
=#=#=#= End test: Complete brief text output grouped by node, with inactive resources - OK (0) =#=#=#=
* Passed: crm_mon - Complete brief text output grouped by node, with inactive resources
=#=#=#= Begin test: Text output of partially active resources, with inactive resources, filtered by node =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 4 nodes configured
- * 11 resource instances configured
+ * 13 resource instances configured (1 DISABLED)
Node List:
* Online: [ cluster01 ]
Full List of Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 ]
* Fencing (stonith:fence_xvm): Started cluster01
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped cluster01
=#=#=#= End test: Text output of partially active resources, with inactive resources, filtered by node - OK (0) =#=#=#=
* Passed: crm_mon - Text output of partially active resources, with inactive resources, filtered by node
=#=#=#= Begin test: Text output of partially active resources, filtered by node =#=#=#=
-
+
-
-
+
+
-
+
=#=#=#= End test: Text output of partially active resources, filtered by node - OK (0) =#=#=#=
* Passed: crm_mon - Text output of partially active resources, filtered by node
+=#=#=#= Begin test: Text output of all resources with maintenance-mode enabled =#=#=#=
+Cluster Summary:
+ * Stack: corosync
+ * Current DC: cluster02 (version) - partition with quorum
+ * Last updated:
+ * Last change:
+ * 5 nodes configured
+ * 27 resource instances configured (4 DISABLED)
+
+ *** Resource management is DISABLED ***
+ The cluster will not attempt to start, stop or recover services
+
+Node List:
+ * Online: [ cluster01 cluster02 ]
+
+Full List of Resources:
+ * Clone Set: ping-clone [ping] (unmanaged):
+ * ping (ocf::pacemaker:ping): Started cluster02 (unmanaged)
+ * ping (ocf::pacemaker:ping): Started cluster01 (unmanaged)
+ * Fencing (stonith:fence_xvm): Started cluster01 (unmanaged)
+ * dummy (ocf::pacemaker:Dummy): Started cluster02 (unmanaged)
+ * Clone Set: inactive-clone [inactive-dhcpd] (unmanaged) (disabled):
+ * Stopped (disabled): [ cluster01 cluster02 ]
+ * Resource Group: inactive-group (unmanaged) (disabled):
+ * inactive-dummy-1 (ocf::pacemaker:Dummy): Stopped (disabled, unmanaged)
+ * inactive-dummy-2 (ocf::pacemaker:Dummy): Stopped (disabled, unmanaged)
+ * Container bundle set: httpd-bundle [pcmk:http] (unmanaged):
+ * httpd-bundle-0 (192.168.122.131) (ocf::heartbeat:apache): Stopped (unmanaged)
+ * httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped (unmanaged)
+ * httpd-bundle-2 (192.168.122.133) (ocf::heartbeat:apache): Stopped (unmanaged)
+ * Resource Group: exim-group (unmanaged):
+ * Public-IP (ocf::heartbeat:IPaddr): Started cluster02 (unmanaged)
+ * Email (lsb:exim): Started cluster02 (unmanaged)
+ * Clone Set: mysql-clone-group [mysql-group] (unmanaged):
+ * Resource Group: mysql-group:0 (unmanaged):
+ * mysql-proxy (lsb:mysql-proxy): Started cluster02 (unmanaged)
+ * Resource Group: mysql-group:1 (unmanaged):
+ * mysql-proxy (lsb:mysql-proxy): Started cluster01 (unmanaged)
+=#=#=#= End test: Text output of all resources with maintenance-mode enabled - OK (0) =#=#=#=
+* Passed: crm_mon - Text output of all resources with maintenance-mode enabled
diff --git a/cts/cts-cli.in b/cts/cts-cli.in
index bec06c8a96..7cb78a97be 100755
--- a/cts/cts-cli.in
+++ b/cts/cts-cli.in
@@ -1,1668 +1,1679 @@
#!@BASH_PATH@
#
# Copyright 2008-2020 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
# This source code is licensed under the GNU General Public License version 2
# or later (GPLv2+) WITHOUT ANY WARRANTY.
#
#
# Note on portable usage of sed: GNU/POSIX/*BSD sed have a limited subset of
# compatible functionality. Do not use the -i option, alternation (\|),
# \0, or character sequences such as \n or \s.
#
USAGE_TEXT="Usage: cts-cli []
Options:
--help Display this text, then exit
-V, --verbose Display any differences from expected output
-t 'TEST [...]' Run only specified tests (default: 'dates tools crm_mon acls validity upgrade rules')
-p DIR Look for executables in DIR (may be specified multiple times)
-v, --valgrind Run all commands under valgrind
-s Save actual output as expected output"
# If readlink supports -e (i.e. GNU), use it
readlink -e / >/dev/null 2>/dev/null
if [ $? -eq 0 ]; then
test_home="$(dirname "$(readlink -e "$0")")"
else
test_home="$(dirname "$0")"
fi
: ${shadow=cts-cli}
shadow_dir=$(mktemp -d ${TMPDIR:-/tmp}/cts-cli.shadow.XXXXXXXXXX)
num_errors=0
num_passed=0
verbose=0
tests="dates tools crm_mon acls validity upgrade rules"
do_save=0
VALGRIND_CMD=
VALGRIND_OPTS="
-q
--gen-suppressions=all
--show-reachable=no
--leak-check=full
--trace-children=no
--time-stamp=yes
--num-callers=20
--suppressions=$test_home/valgrind-pcmk.suppressions
"
# These constants must track crm_exit_t values
CRM_EX_OK=0
CRM_EX_ERROR=1
CRM_EX_INVALID_PARAM=2
CRM_EX_UNIMPLEMENT_FEATURE=3
CRM_EX_INSUFFICIENT_PRIV=4
CRM_EX_USAGE=64
CRM_EX_CONFIG=78
CRM_EX_OLD=103
CRM_EX_DIGEST=104
CRM_EX_NOSUCH=105
CRM_EX_UNSAFE=107
CRM_EX_EXISTS=108
CRM_EX_MULTIPLE=109
CRM_EX_EXPIRED=110
CRM_EX_NOT_YET_IN_EFFECT=111
function test_assert() {
target=$1; shift
cib=$1; shift
app=`echo "$cmd" | sed 's/\ .*//'`
printf "* Running: $app - $desc\n" 1>&2
printf "=#=#=#= Begin test: $desc =#=#=#=\n"
eval $VALGRIND_CMD $cmd 2>&1
rc=$?
if [ x$cib != x0 ]; then
printf "=#=#=#= Current cib after: $desc =#=#=#=\n"
CIB_user=root cibadmin -Q
fi
printf "=#=#=#= End test: $desc - $(crm_error --exit $rc) ($rc) =#=#=#=\n"
if [ $rc -ne $target ]; then
num_errors=$(( $num_errors + 1 ))
printf "* Failed (rc=%.3d): %-14s - %s\n" $rc $app "$desc"
printf "* Failed (rc=%.3d): %-14s - %s\n" $rc $app "$desc (`which $app`)" 1>&2
return
exit $CRM_EX_ERROR
else
printf "* Passed: %-14s - %s\n" $app "$desc"
num_passed=$(( $num_passed + 1 ))
fi
}
function test_crm_mon() {
+ local TMPXML
export CIB_file="$test_home/cli/crm_mon.xml"
desc="Basic text output"
cmd="crm_mon -1"
test_assert $CRM_EX_OK 0
desc="XML output"
cmd="crm_mon --output-as=xml"
test_assert $CRM_EX_OK 0
desc="Basic text output without node section"
cmd="crm_mon -1 --exclude=nodes"
test_assert $CRM_EX_OK 0
desc="XML output without the node section"
cmd="crm_mon --output-as=xml --exclude=nodes"
test_assert $CRM_EX_OK 0
desc="Text output with only the node section"
cmd="crm_mon -1 --exclude=all --include=nodes"
test_assert $CRM_EX_OK 0
# The above test doesn't need to be performed for other output formats. It's
# really just a test to make sure that blank lines are correct.
desc="Complete text output"
cmd="crm_mon -1 --include=all"
test_assert $CRM_EX_OK 0
# XML includes everything already so there's no need for a complete test
desc="Complete text output with detail"
cmd="crm_mon -1R --include=all"
test_assert $CRM_EX_OK 0
# XML includes detailed output already
desc="Complete brief text output"
cmd="crm_mon -1 --include=all --brief"
test_assert $CRM_EX_OK 0
desc="Complete text output grouped by node"
cmd="crm_mon -1 --include=all --group-by-node"
test_assert $CRM_EX_OK 0
# XML does not have a brief output option
desc="Complete brief text output grouped by node"
cmd="crm_mon -1 --include=all --group-by-node --brief"
test_assert $CRM_EX_OK 0
desc="XML output grouped by node"
cmd="crm_mon -1 --output-as=xml --group-by-node"
test_assert $CRM_EX_OK 0
desc="Complete text output filtered by node"
cmd="crm_mon -1 --include=all --node=cluster01"
test_assert $CRM_EX_OK 0
desc="XML output filtered by node"
cmd="crm_mon --output-as xml --include=all --node=cluster01"
test_assert $CRM_EX_OK 0
desc="Complete text output filtered by tag"
cmd="crm_mon -1 --include=all --node=even-nodes"
test_assert $CRM_EX_OK 0
desc="XML output filtered by tag"
cmd="crm_mon --output-as=xml --include=all --node=even-nodes"
test_assert $CRM_EX_OK 0
desc="Complete text output filtered by resource tag"
cmd="crm_mon -1 --include=all --resource=fencing-rscs"
test_assert $CRM_EX_OK 0
desc="XML output filtered by resource tag"
cmd="crm_mon --output-as=xml --include=all --resource=fencing-rscs"
test_assert $CRM_EX_OK 0
desc="Basic text output filtered by node that doesn't exist"
cmd="crm_mon -1 --node=blah"
test_assert $CRM_EX_OK 0
desc="XML output filtered by node that doesn't exist"
cmd="crm_mon --output-as=xml --node=blah"
test_assert $CRM_EX_OK 0
desc="Basic text output with inactive resources"
cmd="crm_mon -1 -r"
test_assert $CRM_EX_OK 0
# XML already includes inactive resources
desc="Basic text output with inactive resources, filtered by node"
cmd="crm_mon -1 -r --node=cluster02"
test_assert $CRM_EX_OK 0
# XML already includes inactive resources
desc="Complete text output filtered by primitive resource"
cmd="crm_mon -1 --include=all --resource=Fencing"
test_assert $CRM_EX_OK 0
desc="XML output filtered by primitive resource"
cmd="crm_mon --output-as=xml --resource=Fencing"
test_assert $CRM_EX_OK 0
desc="Complete text output filtered by group resource"
cmd="crm_mon -1 --include=all --resource=exim-group"
test_assert $CRM_EX_OK 0
desc="XML output filtered by group resource"
cmd="crm_mon --output-as=xml --resource=exim-group"
test_assert $CRM_EX_OK 0
desc="Complete text output filtered by group resource member"
cmd="crm_mon -1 --include=all --resource=Public-IP"
test_assert $CRM_EX_OK 0
desc="XML output filtered by group resource member"
cmd="crm_mon --output-as=xml --resource=Email"
test_assert $CRM_EX_OK 0
desc="Complete text output filtered by clone resource"
cmd="crm_mon -1 --include=all --resource=ping-clone"
test_assert $CRM_EX_OK 0
desc="XML output filtered by clone resource"
cmd="crm_mon --output-as=xml --resource=ping-clone"
test_assert $CRM_EX_OK 0
desc="Complete text output filtered by clone resource instance"
cmd="crm_mon -1 --include=all --resource=ping"
test_assert $CRM_EX_OK 0
desc="XML output filtered by clone resource instance"
cmd="crm_mon --output-as=xml --resource=ping"
test_assert $CRM_EX_OK 0
desc="Complete text output filtered by exact clone resource instance"
cmd="crm_mon -1 --include=all --show-detail --resource=ping:0"
test_assert $CRM_EX_OK 0
desc="XML output filtered by exact clone resource instance"
cmd="crm_mon --output-as=xml --resource=ping:1"
test_assert $CRM_EX_OK 0
desc="Basic text output filtered by resource that doesn't exist"
cmd="crm_mon -1 --resource=blah"
test_assert $CRM_EX_OK 0
desc="XML output filtered by resource that doesn't exist"
cmd="crm_mon --output-as=xml --resource=blah"
test_assert $CRM_EX_OK 0
desc="Basic text output with inactive resources, filtered by tag"
cmd="crm_mon -1 -r --resource=inactive-rscs"
test_assert $CRM_EX_OK 0
desc="Basic text output with inactive resources, filtered by bundle resource"
cmd="crm_mon -1 -r --resource=httpd-bundle"
test_assert $CRM_EX_OK 0
desc="XML output filtered by inactive bundle resource"
cmd="crm_mon --output-as=xml --resource=httpd-bundle"
test_assert $CRM_EX_OK 0
desc="Basic text output with inactive resources, filtered by bundled IP address resource"
cmd="crm_mon -1 -r --resource=httpd-bundle-ip-192.168.122.131"
test_assert $CRM_EX_OK 0
desc="XML output filtered by bundled IP address resource"
cmd="crm_mon --output-as=xml --resource=httpd-bundle-ip-192.168.122.132"
test_assert $CRM_EX_OK 0
desc="Basic text output with inactive resources, filtered by bundled container"
cmd="crm_mon -1 -r --resource=httpd-bundle-docker-1"
test_assert $CRM_EX_OK 0
desc="XML output filtered by bundled container"
cmd="crm_mon --output-as=xml --resource=httpd-bundle-docker-2"
test_assert $CRM_EX_OK 0
desc="Basic text output with inactive resources, filtered by bundle connection"
cmd="crm_mon -1 -r --resource=httpd-bundle-0"
test_assert $CRM_EX_OK 0
desc="XML output filtered by bundle connection"
cmd="crm_mon --output-as=xml --resource=httpd-bundle-0"
test_assert $CRM_EX_OK 0
desc="Basic text output with inactive resources, filtered by bundled primitive resource"
cmd="crm_mon -1 -r --resource=httpd"
test_assert $CRM_EX_OK 0
desc="XML output filtered by bundled primitive resource"
cmd="crm_mon --output-as=xml --resource=httpd"
test_assert $CRM_EX_OK 0
desc="Complete text output, filtered by clone name in cloned group"
cmd="crm_mon -1 --include=all --show-detail --resource=mysql-clone-group"
test_assert $CRM_EX_OK 0
desc="XML output, filtered by clone name in cloned group"
cmd="crm_mon --output-as=xml --resource=mysql-clone-group"
test_assert $CRM_EX_OK 0
desc="Complete text output, filtered by group name in cloned group"
cmd="crm_mon -1 --include=all --show-detail --resource=mysql-group"
test_assert $CRM_EX_OK 0
desc="XML output, filtered by group name in cloned group"
cmd="crm_mon --output-as=xml --resource=mysql-group"
test_assert $CRM_EX_OK 0
desc="Complete text output, filtered by exact group instance name in cloned group"
cmd="crm_mon -1 --include=all --show-detail --resource=mysql-group:1"
test_assert $CRM_EX_OK 0
desc="XML output, filtered by exact group instance name in cloned group"
cmd="crm_mon --output-as=xml --resource=mysql-group:1"
test_assert $CRM_EX_OK 0
desc="Complete text output, filtered by primitive name in cloned group"
cmd="crm_mon -1 --include=all --show-detail --resource=mysql-proxy"
test_assert $CRM_EX_OK 0
desc="XML output, filtered by primitive name in cloned group"
cmd="crm_mon --output-as=xml --resource=mysql-proxy"
test_assert $CRM_EX_OK 0
desc="Complete text output, filtered by exact primitive instance name in cloned group"
cmd="crm_mon -1 --include=all --show-detail --resource=mysql-proxy:1"
test_assert $CRM_EX_OK 0
desc="XML output, filtered by exact primitive instance name in cloned group"
cmd="crm_mon --output-as=xml --resource=mysql-proxy:1"
test_assert $CRM_EX_OK 0
unset CIB_file
export CIB_file="$test_home/cli/crm_mon-partial.xml"
desc="Text output of partially active resources"
cmd="crm_mon -1"
test_assert $CRM_EX_OK 0
desc="XML output of partially active resources"
cmd="crm_mon -1 --output-as=xml"
test_assert $CRM_EX_OK 0
desc="Text output of partially active resources, with inactive resources"
cmd="crm_mon -1 -r"
test_assert $CRM_EX_OK 0
# XML already includes inactive resources
desc="Complete brief text output, with inactive resources"
cmd="crm_mon -1 -r --include=all --brief"
test_assert $CRM_EX_OK 0
# XML does not have a brief output option
desc="Complete brief text output grouped by node, with inactive resources"
cmd="crm_mon -1 -r --include=all --group-by-node --brief"
test_assert $CRM_EX_OK 0
desc="Text output of partially active resources, with inactive resources, filtered by node"
cmd="crm_mon -1 -r --node=cluster01"
test_assert $CRM_EX_OK 0
desc="Text output of partially active resources, filtered by node"
cmd="crm_mon -1 --output-as=xml --node=cluster01"
test_assert $CRM_EX_OK 0
unset CIB_file
+
+ export CIB_file=$(mktemp ${TMPDIR:-/tmp}/cts-cli.crm_mon.xml.XXXXXXXXXX)
+ sed -e '/maintenance-mode/ s/false/true/' "$test_home/cli/crm_mon.xml" > $CIB_file
+
+ desc="Text output of all resources with maintenance-mode enabled"
+ cmd="crm_mon -1 -r"
+ test_assert $CRM_EX_OK 0
+
+ rm -r "$CIB_file"
+ unset CIB_file
}
function test_tools() {
local TMPXML
local TMPORIG
TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX)
TMPORIG=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.existing.xml.XXXXXXXXXX)
export CIB_shadow_dir="${shadow_dir}"
$VALGRIND_CMD crm_shadow --batch --force --create-empty $shadow 2>&1
export CIB_shadow=$shadow
desc="Validate CIB"
cmd="cibadmin -Q"
test_assert $CRM_EX_OK
desc="Configure something before erasing"
cmd="crm_attribute -n cluster-delay -v 60s"
test_assert $CRM_EX_OK
desc="Require --force for CIB erasure"
cmd="cibadmin -E"
test_assert $CRM_EX_UNSAFE
desc="Allow CIB erasure with --force"
cmd="cibadmin -E --force"
test_assert $CRM_EX_OK
desc="Query CIB"
cmd="cibadmin -Q > $TMPORIG"
test_assert $CRM_EX_OK
desc="Set cluster option"
cmd="crm_attribute -n cluster-delay -v 60s"
test_assert $CRM_EX_OK
desc="Query new cluster option"
cmd="cibadmin -Q -o crm_config | grep cib-bootstrap-options-cluster-delay"
test_assert $CRM_EX_OK
desc="Query cluster options"
cmd="cibadmin -Q -o crm_config > $TMPXML"
test_assert $CRM_EX_OK
desc="Set no-quorum policy"
cmd="crm_attribute -n no-quorum-policy -v ignore"
test_assert $CRM_EX_OK
desc="Delete nvpair"
cmd="cibadmin -D -o crm_config --xml-text ''"
test_assert $CRM_EX_OK
desc="Create operation should fail"
cmd="cibadmin -C -o crm_config --xml-file $TMPXML"
test_assert $CRM_EX_EXISTS
desc="Modify cluster options section"
cmd="cibadmin -M -o crm_config --xml-file $TMPXML"
test_assert $CRM_EX_OK
desc="Query updated cluster option"
cmd="cibadmin -Q -o crm_config | grep cib-bootstrap-options-cluster-delay"
test_assert $CRM_EX_OK
desc="Set duplicate cluster option"
cmd="crm_attribute -n cluster-delay -v 40s -s duplicate"
test_assert $CRM_EX_OK
desc="Setting multiply defined cluster option should fail"
cmd="crm_attribute -n cluster-delay -v 30s"
test_assert $CRM_EX_MULTIPLE
desc="Set cluster option with -s"
cmd="crm_attribute -n cluster-delay -v 30s -s duplicate"
test_assert $CRM_EX_OK
desc="Delete cluster option with -i"
cmd="crm_attribute -n cluster-delay -D -i cib-bootstrap-options-cluster-delay"
test_assert $CRM_EX_OK
desc="Create node1 and bring it online"
cmd="crm_simulate --live-check --in-place --node-up=node1"
test_assert $CRM_EX_OK
desc="Create node attribute"
cmd="crm_attribute -n ram -v 1024M -N node1 -t nodes"
test_assert $CRM_EX_OK
desc="Query new node attribute"
cmd="cibadmin -Q -o nodes | grep node1-ram"
test_assert $CRM_EX_OK
desc="Set a transient (fail-count) node attribute"
cmd="crm_attribute -n fail-count-foo -v 3 -N node1 -t status"
test_assert $CRM_EX_OK
desc="Query a fail count"
cmd="crm_failcount --query -r foo -N node1"
test_assert $CRM_EX_OK
desc="Delete a transient (fail-count) node attribute"
cmd="crm_attribute -n fail-count-foo -D -N node1 -t status"
test_assert $CRM_EX_OK
desc="Digest calculation"
cmd="cibadmin -Q | cibadmin -5 -p 2>&1 > /dev/null"
test_assert $CRM_EX_OK
# This update will fail because it has version numbers
desc="Replace operation should fail"
cmd="cibadmin -R --xml-file $TMPORIG"
test_assert $CRM_EX_OLD
desc="Default standby value"
cmd="crm_standby -N node1 -G"
test_assert $CRM_EX_OK
desc="Set standby status"
cmd="crm_standby -N node1 -v true"
test_assert $CRM_EX_OK
desc="Query standby value"
cmd="crm_standby -N node1 -G"
test_assert $CRM_EX_OK
desc="Delete standby value"
cmd="crm_standby -N node1 -D"
test_assert $CRM_EX_OK
desc="Create a resource"
cmd="cibadmin -C -o resources --xml-text ''"
test_assert $CRM_EX_OK
desc="Create a resource meta attribute"
cmd="crm_resource -r dummy --meta -p is-managed -v false"
test_assert $CRM_EX_OK
desc="Query a resource meta attribute"
cmd="crm_resource -r dummy --meta -g is-managed"
test_assert $CRM_EX_OK
desc="Remove a resource meta attribute"
cmd="crm_resource -r dummy --meta -d is-managed"
test_assert $CRM_EX_OK
desc="Create another resource meta attribute"
cmd="crm_resource -r dummy --meta -p target-role -v Stopped"
test_assert $CRM_EX_OK 0
desc="Show why a resource is not running"
cmd="crm_resource -Y -r dummy"
test_assert $CRM_EX_OK 0
desc="Remove another resource meta attribute"
cmd="crm_resource -r dummy --meta -d target-role"
test_assert $CRM_EX_OK 0
desc="Create a resource attribute"
cmd="crm_resource -r dummy -p delay -v 10s"
test_assert $CRM_EX_OK
desc="List the configured resources"
cmd="crm_resource -L"
test_assert $CRM_EX_OK
desc="List IDs of instantiated resources"
cmd="crm_resource -l"
test_assert $CRM_EX_OK 0
desc="Show XML configuration of resource"
cmd="crm_resource -q -r dummy"
test_assert $CRM_EX_OK 0
desc="Require a destination when migrating a resource that is stopped"
cmd="crm_resource -r dummy -M"
test_assert $CRM_EX_USAGE
desc="Don't support migration to non-existent locations"
cmd="crm_resource -r dummy -M -N i.do.not.exist"
test_assert $CRM_EX_NOSUCH
desc="Create a fencing resource"
cmd="cibadmin -C -o resources --xml-text ''"
test_assert $CRM_EX_OK
desc="Bring resources online"
cmd="crm_simulate --live-check --in-place -S"
test_assert $CRM_EX_OK
desc="Try to move a resource to its existing location"
cmd="crm_resource -r dummy --move --node node1"
test_assert $CRM_EX_EXISTS
desc="Move a resource from its existing location"
cmd="crm_resource -r dummy --move"
test_assert $CRM_EX_OK
desc="Clear out constraints generated by --move"
cmd="crm_resource -r dummy --clear"
test_assert $CRM_EX_OK
desc="Default ticket granted state"
cmd="crm_ticket -t ticketA -G granted -d false"
test_assert $CRM_EX_OK
desc="Set ticket granted state"
cmd="crm_ticket -t ticketA -r --force"
test_assert $CRM_EX_OK
desc="Query ticket granted state"
cmd="crm_ticket -t ticketA -G granted"
test_assert $CRM_EX_OK
desc="Delete ticket granted state"
cmd="crm_ticket -t ticketA -D granted --force"
test_assert $CRM_EX_OK
desc="Make a ticket standby"
cmd="crm_ticket -t ticketA -s"
test_assert $CRM_EX_OK
desc="Query ticket standby state"
cmd="crm_ticket -t ticketA -G standby"
test_assert $CRM_EX_OK
desc="Activate a ticket"
cmd="crm_ticket -t ticketA -a"
test_assert $CRM_EX_OK
desc="Delete ticket standby state"
cmd="crm_ticket -t ticketA -D standby"
test_assert $CRM_EX_OK
desc="Ban a resource on unknown node"
cmd="crm_resource -r dummy -B -N host1"
test_assert $CRM_EX_NOSUCH
desc="Create two more nodes and bring them online"
cmd="crm_simulate --live-check --in-place --node-up=node2 --node-up=node3"
test_assert $CRM_EX_OK
desc="Ban dummy from node1"
cmd="crm_resource -r dummy -B -N node1"
test_assert $CRM_EX_OK
desc="Show where a resource is running"
cmd="crm_resource -r dummy -W"
test_assert $CRM_EX_OK 0
desc="Show constraints on a resource"
cmd="crm_resource -a -r dummy"
test_assert $CRM_EX_OK 0
desc="Ban dummy from node2"
cmd="crm_resource -r dummy -B -N node2"
test_assert $CRM_EX_OK
desc="Relocate resources due to ban"
cmd="crm_simulate --live-check --in-place -S"
test_assert $CRM_EX_OK
desc="Move dummy to node1"
cmd="crm_resource -r dummy -M -N node1"
test_assert $CRM_EX_OK
desc="Clear implicit constraints for dummy on node2"
cmd="crm_resource -r dummy -U -N node2"
test_assert $CRM_EX_OK
desc="Drop the status section"
cmd="cibadmin -R -o status --xml-text ''"
test_assert $CRM_EX_OK 0
desc="Create a clone"
cmd="cibadmin -C -o resources --xml-text ''"
test_assert $CRM_EX_OK 0
desc="Create a resource meta attribute"
cmd="crm_resource -r test-primitive --meta -p is-managed -v false"
test_assert $CRM_EX_OK
desc="Create a resource meta attribute in the primitive"
cmd="crm_resource -r test-primitive --meta -p is-managed -v false --force"
test_assert $CRM_EX_OK
desc="Update resource meta attribute with duplicates"
cmd="crm_resource -r test-clone --meta -p is-managed -v true"
test_assert $CRM_EX_OK
desc="Update resource meta attribute with duplicates (force clone)"
cmd="crm_resource -r test-clone --meta -p is-managed -v true --force"
test_assert $CRM_EX_OK
desc="Update child resource meta attribute with duplicates"
cmd="crm_resource -r test-primitive --meta -p is-managed -v false"
test_assert $CRM_EX_OK
desc="Delete resource meta attribute with duplicates"
cmd="crm_resource -r test-clone --meta -d is-managed"
test_assert $CRM_EX_OK
desc="Delete resource meta attribute in parent"
cmd="crm_resource -r test-primitive --meta -d is-managed"
test_assert $CRM_EX_OK
desc="Create a resource meta attribute in the primitive"
cmd="crm_resource -r test-primitive --meta -p is-managed -v false --force"
test_assert $CRM_EX_OK
desc="Update existing resource meta attribute"
cmd="crm_resource -r test-clone --meta -p is-managed -v true"
test_assert $CRM_EX_OK
desc="Create a resource meta attribute in the parent"
cmd="crm_resource -r test-clone --meta -p is-managed -v true --force"
test_assert $CRM_EX_OK
desc="Copy resources"
cmd="cibadmin -Q -o resources > $TMPXML"
test_assert $CRM_EX_OK 0
desc="Delete resource parent meta attribute (force)"
cmd="crm_resource -r test-clone --meta -d is-managed --force"
test_assert $CRM_EX_OK
desc="Restore duplicates"
cmd="cibadmin -R -o resources --xml-file $TMPXML"
test_assert $CRM_EX_OK
desc="Delete resource child meta attribute"
cmd="crm_resource -r test-primitive --meta -d is-managed"
test_assert $CRM_EX_OK
cibadmin -C -o resources --xml-text ' \
\
\
'
desc="Create a resource meta attribute in dummy1"
cmd="crm_resource -r dummy1 --meta -p is-managed -v true"
test_assert $CRM_EX_OK
desc="Create a resource meta attribute in dummy-group"
cmd="crm_resource -r dummy-group --meta -p is-managed -v false"
test_assert $CRM_EX_OK
cibadmin -D -o resource --xml-text ''
desc="Specify a lifetime when moving a resource"
cmd="crm_resource -r dummy --move --node node2 --lifetime=PT1H"
test_assert $CRM_EX_OK
desc="Try to move a resource previously moved with a lifetime"
cmd="crm_resource -r dummy --move --node node1"
test_assert $CRM_EX_OK
desc="Ban dummy from node1 for a short time"
cmd="crm_resource -r dummy -B -N node1 --lifetime=PT1S"
test_assert $CRM_EX_OK
desc="Remove expired constraints"
sleep 2
cmd="crm_resource --clear --expired"
test_assert $CRM_EX_OK
# Clear has already been tested elsewhere, but we need to get rid of the
# constraints so testing delete works. It won't delete if there's still
# a reference to the resource somewhere.
desc="Clear all implicit constraints for dummy"
cmd="crm_resource -r dummy -U"
test_assert $CRM_EX_OK
desc="Delete a resource"
cmd="crm_resource -D -r dummy -t primitive"
test_assert $CRM_EX_OK
unset CIB_shadow
unset CIB_shadow_dir
rm -f "$TMPXML" "$TMPORIG"
desc="Create an XML patchset"
cmd="crm_diff -o $test_home/cli/crm_diff_old.xml -n $test_home/cli/crm_diff_new.xml"
test_assert $CRM_EX_ERROR 0
}
INVALID_PERIODS=(
"2019-01-01 00:00:00Z" # Start with no end
"2019-01-01 00:00:00Z/" # Start with only a trailing slash
"PT2S/P1M" # Two durations
"2019-13-01 00:00:00Z/P1M" # Out-of-range month
"20191077T15/P1M" # Out-of-range day
"2019-10-01T25:00:00Z/P1M" # Out-of-range hour
"2019-10-01T24:00:01Z/P1M" # Hour 24 with anything but :00:00
"PT5H/20191001T007000Z" # Out-of-range minute
"2019-10-01 00:00:80Z/P1M" # Out-of-range second
"2019-10-01 00:00:10 +25:00/P1M" # Out-of-range offset hour
"20191001T000010 -00:61/P1M" # Out-of-range offset minute
"P1Y/2019-02-29 00:00:00Z" # Feb. 29 in non-leap-year
"2019-01-01 00:00:00Z/P" # Duration with no values
"P1Z/2019-02-20 00:00:00Z" # Invalid duration unit
"P1YM/2019-02-20 00:00:00Z" # No number for duration unit
)
function test_dates() {
# Ensure invalid period specifications are rejected
for spec in '' "${INVALID_PERIODS[@]}"; do
desc="Invalid period - [$spec]"
cmd="iso8601 -p \"$spec\""
test_assert $CRM_EX_INVALID_PARAM 0
done
desc="2014-01-01 00:30:00 - 1 Hour"
cmd="iso8601 -d '2014-01-01 00:30:00Z' -D P-1H -E '2013-12-31 23:30:00Z'"
test_assert $CRM_EX_OK 0
desc="Valid date - Feb 29 in leap year"
cmd="iso8601 -d '2020-02-29 00:00:00Z' -E '2020-02-29 00:00:00Z'"
test_assert $CRM_EX_OK 0
desc="Valid date - using 'T' and offset"
cmd="iso8601 -d '20191201T131211 -05:00' -E '2019-12-01 18:12:11Z'"
test_assert $CRM_EX_OK 0
desc="24:00:00 equivalent to 00:00:00 of next day"
cmd="iso8601 -d '2019-12-31 24:00:00Z' -E '2020-01-01 00:00:00Z'"
test_assert $CRM_EX_OK 0
for y in 06 07 08 09 10 11 12 13 14 15 16 17 18 40; do
desc="20$y-W01-7"
cmd="iso8601 -d '20$y-W01-7 00Z'"
test_assert $CRM_EX_OK 0
desc="20$y-W01-7 - round-trip"
cmd="iso8601 -d '20$y-W01-7 00Z' -W -E '20$y-W01-7 00:00:00Z'"
test_assert $CRM_EX_OK 0
desc="20$y-W01-1"
cmd="iso8601 -d '20$y-W01-1 00Z'"
test_assert $CRM_EX_OK 0
desc="20$y-W01-1 - round-trip"
cmd="iso8601 -d '20$y-W01-1 00Z' -W -E '20$y-W01-1 00:00:00Z'"
test_assert $CRM_EX_OK 0
done
desc="2009-W53-07"
cmd="iso8601 -d '2009-W53-7 00:00:00Z' -W -E '2009-W53-7 00:00:00Z'"
test_assert $CRM_EX_OK 0
desc="epoch + 2 Years 5 Months 6 Minutes"
cmd="iso8601 -d 'epoch' -D P2Y5MT6M -E '1972-06-01 00:06:00Z'"
test_assert $CRM_EX_OK 0
desc="2009-01-31 + 1 Month"
cmd="iso8601 -d '20090131T000000Z' -D P1M -E '2009-02-28 00:00:00Z'"
test_assert $CRM_EX_OK 0
desc="2009-01-31 + 2 Months"
cmd="iso8601 -d '2009-01-31 00:00:00Z' -D P2M -E '2009-03-31 00:00:00Z'"
test_assert $CRM_EX_OK 0
desc="2009-01-31 + 3 Months"
cmd="iso8601 -d '2009-01-31 00:00:00Z' -D P3M -E '2009-04-30 00:00:00Z'"
test_assert $CRM_EX_OK 0
desc="2009-03-31 - 1 Month"
cmd="iso8601 -d '2009-03-31 01:00:00 +01:00' -D P-1M -E '2009-02-28 00:00:00Z'"
test_assert $CRM_EX_OK 0
desc="2038-01-01 + 3 Months"
cmd="iso8601 -d '2038-01-01 00:00:00Z' -D P3M -E '2038-04-01 00:00:00Z'"
test_assert $CRM_EX_OK 0
}
function test_acl_loop() {
local TMPXML
TMPXML="$1"
# Make sure we're rejecting things for the right reasons
export PCMK_trace_functions=pcmk__check_acl,pcmk__apply_creation_acl
export PCMK_stderr=1
CIB_user=root cibadmin --replace --xml-text ''
### no ACL ###
export CIB_user=unknownguy
desc="$CIB_user: Query configuration"
cmd="cibadmin -Q"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
desc="$CIB_user: Set enable-acl"
cmd="crm_attribute -n enable-acl -v false"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
desc="$CIB_user: Set stonith-enabled"
cmd="crm_attribute -n stonith-enabled -v false"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
desc="$CIB_user: Create a resource"
cmd="cibadmin -C -o resources --xml-text ''"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
### deny /cib permission ###
export CIB_user=l33t-haxor
desc="$CIB_user: Query configuration"
cmd="cibadmin -Q"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
desc="$CIB_user: Set enable-acl"
cmd="crm_attribute -n enable-acl -v false"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
desc="$CIB_user: Set stonith-enabled"
cmd="crm_attribute -n stonith-enabled -v false"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
desc="$CIB_user: Create a resource"
cmd="cibadmin -C -o resources --xml-text ''"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
### observer role ###
export CIB_user=niceguy
desc="$CIB_user: Query configuration"
cmd="cibadmin -Q"
test_assert $CRM_EX_OK 0
desc="$CIB_user: Set enable-acl"
cmd="crm_attribute -n enable-acl -v false"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
desc="$CIB_user: Set stonith-enabled"
cmd="crm_attribute -n stonith-enabled -v false"
test_assert $CRM_EX_OK
desc="$CIB_user: Create a resource"
cmd="cibadmin -C -o resources --xml-text ''"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
export CIB_user=root
desc="$CIB_user: Query configuration"
cmd="cibadmin -Q"
test_assert $CRM_EX_OK 0
desc="$CIB_user: Set stonith-enabled"
cmd="crm_attribute -n stonith-enabled -v true"
test_assert $CRM_EX_OK
desc="$CIB_user: Create a resource"
cmd="cibadmin -C -o resources --xml-text ''"
test_assert $CRM_EX_OK
### deny /cib permission ###
export CIB_user=l33t-haxor
desc="$CIB_user: Create a resource meta attribute"
cmd="crm_resource -r dummy --meta -p target-role -v Stopped"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
desc="$CIB_user: Query a resource meta attribute"
cmd="crm_resource -r dummy --meta -g target-role"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
desc="$CIB_user: Remove a resource meta attribute"
cmd="crm_resource -r dummy --meta -d target-role"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
### observer role ###
export CIB_user=niceguy
desc="$CIB_user: Create a resource meta attribute"
cmd="crm_resource -r dummy --meta -p target-role -v Stopped"
test_assert $CRM_EX_OK
desc="$CIB_user: Query a resource meta attribute"
cmd="crm_resource -r dummy --meta -g target-role"
test_assert $CRM_EX_OK
desc="$CIB_user: Remove a resource meta attribute"
cmd="crm_resource -r dummy --meta -d target-role"
test_assert $CRM_EX_OK
desc="$CIB_user: Create a resource meta attribute"
cmd="crm_resource -r dummy --meta -p target-role -v Started"
test_assert $CRM_EX_OK
### read //meta_attributes ###
export CIB_user=badidea
desc="$CIB_user: Query configuration - implied deny"
cmd="cibadmin -Q"
test_assert $CRM_EX_OK 0
### deny /cib, read //meta_attributes ###
export CIB_user=betteridea
desc="$CIB_user: Query configuration - explicit deny"
cmd="cibadmin -Q"
test_assert $CRM_EX_OK 0
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --delete --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
### observer role ###
export CIB_user=niceguy
desc="$CIB_user: Replace - remove acls"
cmd="cibadmin --replace --xml-file $TMPXML"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -C -o resources --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - create resource"
cmd="cibadmin --replace --xml-file $TMPXML"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" crm_attribute -n enable-acl -v false
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - modify attribute (deny)"
cmd="cibadmin --replace --xml-file $TMPXML"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --replace --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - delete attribute (deny)"
cmd="cibadmin --replace --xml-file $TMPXML"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - create attribute (deny)"
cmd="cibadmin --replace --xml-file $TMPXML"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
### admin role ###
CIB_user=bob
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - create attribute (direct allow)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_OK 0
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - modify attribute (direct allow)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_OK 0
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --replace -o resources --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - delete attribute (direct allow)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_OK 0
### super_user role ###
export CIB_user=joe
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - create attribute (inherited allow)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_OK 0
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - modify attribute (inherited allow)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_OK 0
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --replace -o resources --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - delete attribute (inherited allow)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_OK 0
### rsc_writer role ###
export CIB_user=mike
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - create attribute (allow overrides deny)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_OK 0
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - modify attribute (allow overrides deny)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_OK 0
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --replace -o resources --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - delete attribute (allow overrides deny)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_OK 0
### rsc_denied role ###
export CIB_user=chris
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - create attribute (deny overrides allow)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
# Set as root since setting as chris failed
CIB_user=root cibadmin --modify --xml-text ''
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - modify attribute (deny overrides allow)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
# Set as root since setting as chris failed
CIB_user=root cibadmin --modify --xml-text ''
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --replace -o resources --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - delete attribute (deny overrides allow)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
}
function test_acls() {
local SHADOWPATH
local TMPXML
TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.acls.xml.XXXXXXXXXX)
export CIB_shadow_dir="${shadow_dir}"
$VALGRIND_CMD crm_shadow --batch --force --create-empty $shadow --validate-with pacemaker-1.3 2>&1
export CIB_shadow=$shadow
cat < "$TMPXML"
EOF
desc="Configure some ACLs"
cmd="cibadmin -M -o acls --xml-file $TMPXML"
test_assert $CRM_EX_OK
desc="Enable ACLs"
cmd="crm_attribute -n enable-acl -v true"
test_assert $CRM_EX_OK
desc="Set cluster option"
cmd="crm_attribute -n no-quorum-policy -v ignore"
test_assert $CRM_EX_OK
desc="New ACL"
cmd="cibadmin --create -o acls --xml-text ''"
test_assert $CRM_EX_OK
desc="Another ACL"
cmd="cibadmin --create -o acls --xml-text ''"
test_assert $CRM_EX_OK
desc="Updated ACL"
cmd="cibadmin --replace -o acls --xml-text ''"
test_assert $CRM_EX_OK
test_acl_loop "$TMPXML"
printf "\n\n !#!#!#!#! Upgrading to latest CIB schema and re-testing !#!#!#!#!\n"
printf "\nUpgrading to latest CIB schema and re-testing\n" 1>&2
export CIB_user=root
desc="$CIB_user: Upgrade to latest CIB schema"
cmd="cibadmin --upgrade --force -V"
test_assert $CRM_EX_OK
SHADOWPATH="$(crm_shadow --file)"
# sed -i isn't portable :-(
cp -p "$SHADOWPATH" "${SHADOWPATH}.$$" # to keep permissions
sed -e 's/epoch=.2/epoch=\"6/g' -e 's/admin_epoch=.1/admin_epoch=\"0/g' \
"$SHADOWPATH" > "${SHADOWPATH}.$$"
mv -- "${SHADOWPATH}.$$" "$SHADOWPATH"
test_acl_loop "$TMPXML"
unset CIB_shadow_dir
rm -f "$TMPXML"
}
function test_validity() {
local TMPGOOD
local TMPBAD
TMPGOOD=$(mktemp ${TMPDIR:-/tmp}/cts-cli.validity.good.xml.XXXXXXXXXX)
TMPBAD=$(mktemp ${TMPDIR:-/tmp}/cts-cli.validity.bad.xml.XXXXXXXXXX)
export CIB_shadow_dir="${shadow_dir}"
$VALGRIND_CMD crm_shadow --batch --force --create-empty $shadow --validate-with pacemaker-1.2 2>&1
export CIB_shadow=$shadow
export PCMK_trace_functions=apply_upgrade,update_validation,cli_config_update
export PCMK_stderr=1
cibadmin -C -o resources --xml-text ''
cibadmin -C -o resources --xml-text ''
cibadmin -C -o constraints --xml-text ''
cibadmin -Q > "$TMPGOOD"
desc="Try to make resulting CIB invalid (enum violation)"
cmd="cibadmin -M -o constraints --xml-text ''"
test_assert $CRM_EX_CONFIG
sed 's|"start"|"break"|' "$TMPGOOD" > "$TMPBAD"
desc="Run crm_simulate with invalid CIB (enum violation)"
cmd="crm_simulate -x $TMPBAD -S"
test_assert $CRM_EX_CONFIG 0
desc="Try to make resulting CIB invalid (unrecognized validate-with)"
cmd="cibadmin -M --xml-text ''"
test_assert $CRM_EX_CONFIG
sed 's|"pacemaker-1.2"|"pacemaker-9999.0"|' "$TMPGOOD" > "$TMPBAD"
desc="Run crm_simulate with invalid CIB (unrecognized validate-with)"
cmd="crm_simulate -x $TMPBAD -S"
test_assert $CRM_EX_CONFIG 0
desc="Try to make resulting CIB invalid, but possibly recoverable (valid with X.Y+1)"
cmd="cibadmin -C -o configuration --xml-text ''"
test_assert $CRM_EX_CONFIG
sed 's|||' "$TMPGOOD" > "$TMPBAD"
desc="Run crm_simulate with invalid, but possibly recoverable CIB (valid with X.Y+1)"
cmd="crm_simulate -x $TMPBAD -S"
test_assert $CRM_EX_OK 0
sed 's|[ ][ ]*validate-with="[^"]*"||' "$TMPGOOD" > "$TMPBAD"
desc="Make resulting CIB valid, although without validate-with attribute"
cmd="cibadmin -R --xml-file $TMPBAD"
test_assert $CRM_EX_OK
desc="Run crm_simulate with valid CIB, but without validate-with attribute"
cmd="crm_simulate -x $TMPBAD -S"
test_assert $CRM_EX_OK 0
# this will just disable validation and accept the config, outputting
# validation errors
sed -e 's|[ ][ ]*validate-with="[^"]*"||' \
-e 's|\([ ][ ]*epoch="[^"]*\)"|\10"|' -e 's|"start"|"break"|' \
"$TMPGOOD" > "$TMPBAD"
desc="Make resulting CIB invalid, and without validate-with attribute"
cmd="cibadmin -R --xml-file $TMPBAD"
test_assert $CRM_EX_OK
desc="Run crm_simulate with invalid CIB, also without validate-with attribute"
cmd="crm_simulate -x $TMPBAD -S"
test_assert $CRM_EX_OK 0
unset CIB_shadow_dir
rm -f "$TMPGOOD" "$TMPBAD"
}
test_upgrade() {
local TMPXML
TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX)
export CIB_shadow_dir="${shadow_dir}"
$VALGRIND_CMD crm_shadow --batch --force --create-empty $shadow --validate-with pacemaker-2.10 2>&1
export CIB_shadow=$shadow
desc="Set stonith-enabled=false"
cmd="crm_attribute -n stonith-enabled -v false"
test_assert $CRM_EX_OK
cat < "$TMPXML"
EOF
desc="Configure the initial resource"
cmd="cibadmin -M -o resources --xml-file $TMPXML"
test_assert $CRM_EX_OK
desc="Upgrade to latest CIB schema (trigger 2.10.xsl + the wrapping)"
cmd="cibadmin --upgrade --force -V -V"
test_assert $CRM_EX_OK
desc="Query a resource instance attribute (shall survive)"
cmd="crm_resource -r mySmartFuse -g requires"
test_assert $CRM_EX_OK
unset CIB_shadow_dir
rm -f "$TMPXML"
}
test_rules() {
local TMPXML
export CIB_shadow_dir="${shadow_dir}"
$VALGRIND_CMD crm_shadow --batch --force --create-empty $shadow 2>&1
export CIB_shadow=$shadow
cibadmin -C -o resources --xml-text ''
TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX)
cat < "$TMPXML"
EOF
cibadmin -C -o constraints -x "$TMPXML"
rm -f "$TMPXML"
TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX)
cat < "$TMPXML"
EOF
cibadmin -C -o constraints -x "$TMPXML"
rm -f "$TMPXML"
if [ "$(uname)" == "FreeBSD" ]; then
tomorrow=$(date -v+1d +"%F %T %z")
else
tomorrow=$(date --date=tomorrow +"%F %T %z")
fi
TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX)
cat < "$TMPXML"
EOF
cibadmin -C -o constraints -x "$TMPXML"
rm -f "$TMPXML"
TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX)
cat < "$TMPXML"
EOF
cibadmin -C -o constraints -x "$TMPXML"
rm -f "$TMPXML"
TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX)
cat < "$TMPXML"
EOF
cibadmin -C -o constraints -x "$TMPXML"
rm -f "$TMPXML"
TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX)
cat < "$TMPXML"
EOF
cibadmin -C -o constraints -x "$TMPXML"
rm -f "$TMPXML"
TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX)
cat < "$TMPXML"
EOF
cibadmin -C -o constraints -x "$TMPXML"
rm -f "$TMPXML"
desc="Try to check a rule that doesn't exist"
cmd="crm_rule -c -r blahblah"
test_assert $CRM_EX_NOSUCH
desc="Try to check a rule that has too many date_expressions"
cmd="crm_rule -c -r cli-rule-too-many-date-expressions"
test_assert $CRM_EX_UNIMPLEMENT_FEATURE
desc="Verify basic rule is expired"
cmd="crm_rule -c -r cli-prefer-rule-dummy-expired"
test_assert $CRM_EX_EXPIRED
desc="Verify basic rule worked in the past"
cmd="crm_rule -c -r cli-prefer-rule-dummy-expired -d 20180101"
test_assert $CRM_EX_OK
desc="Verify basic rule is not yet in effect"
cmd="crm_rule -c -r cli-prefer-rule-dummy-not-yet"
test_assert $CRM_EX_NOT_YET_IN_EFFECT
desc="Verify date_spec rule with years has expired"
cmd="crm_rule -c -r cli-prefer-rule-dummy-date_spec-only-years"
test_assert $CRM_EX_EXPIRED
desc="Verify date_spec rule with years is in effect"
cmd="crm_rule -c -r cli-prefer-rule-dummy-date_spec-only-years -d 20190201"
test_assert $CRM_EX_OK
desc="Try to check a rule whose date_spec does not contain years="
cmd="crm_rule -c -r cli-prefer-rule-dummy-date_spec-without-years"
test_assert $CRM_EX_NOSUCH
desc="Try to check a rule whose date_spec contains years= and moon="
cmd="crm_rule -c -r cli-prefer-rule-dummy-date_spec-years-moon"
test_assert $CRM_EX_NOSUCH
desc="Try to check a rule with no date_expression"
cmd="crm_rule -c -r cli-no-date_expression-rule"
test_assert $CRM_EX_UNIMPLEMENT_FEATURE
unset CIB_shadow_dir
}
# Process command-line arguments
while [ $# -gt 0 ]; do
case "$1" in
-t)
tests="$2"
shift 2
;;
-V|--verbose)
verbose=1
shift
;;
-v|--valgrind)
export G_SLICE=always-malloc
VALGRIND_CMD="valgrind $VALGRIND_OPTS"
shift
;;
-s)
do_save=1
shift
;;
-p)
export PATH="$2:$PATH"
shift
;;
--help)
echo "$USAGE_TEXT"
exit $CRM_EX_OK
;;
*)
echo "error: unknown option $1"
echo
echo "$USAGE_TEXT"
exit $CRM_EX_USAGE
;;
esac
done
for t in $tests; do
case "$t" in
dates) ;;
tools) ;;
acls) ;;
validity) ;;
upgrade) ;;
rules) ;;
crm_mon) ;;
*)
echo "error: unknown test $t"
echo
echo "$USAGE_TEXT"
exit $CRM_EX_USAGE
;;
esac
done
# Check whether we're running from source directory
SRCDIR=$(dirname $test_home)
if [ -x "$SRCDIR/tools/crm_simulate" ]; then
export PATH="$SRCDIR/tools:$PATH"
echo "Using local binaries from: $SRCDIR/tools"
if [ -x "$SRCDIR/xml" ]; then
export PCMK_schema_directory="$SRCDIR/xml"
echo "Using local schemas from: $PCMK_schema_directory"
fi
fi
for t in $tests; do
echo "Testing $t"
TMPFILE=$(mktemp ${TMPDIR:-/tmp}/cts-cli.$t.XXXXXXXXXX)
eval TMPFILE_$t="$TMPFILE"
test_$t > "$TMPFILE"
# last-run= and last-rc-change= are always numeric in the CIB. However,
# for the crm_mon test we also need to compare against the XML output of
# the crm_mon program. There, these are shown as human readable strings
# (like the output of the `date` command).
sed -e 's/cib-last-written.*>/>/'\
-e 's/ last-run=\"[A-Za-z0-9: ]*\"//'\
-e 's/Last updated: .*/Last updated:/' \
-e 's/Last change: .*/Last change:/' \
-e 's/(version .*)/(version)/' \
-e 's/last_update time=\".*\"/last_update time=\"\"/' \
-e 's/last_change time=\".*\"/last_change time=\"\"/' \
-e 's/ version=\".*\" / version=\"\" /' \
-e 's/request=\".*crm_mon/request=\"crm_mon/' \
-e 's/crm_feature_set="[^"]*" //'\
-e 's/validate-with="[^"]*" //'\
-e 's/Created new pacemaker-.* configuration/Created new pacemaker configuration/'\
-e 's/.*\(pcmk__.*\)@.*\.c:[0-9][0-9]*)/\1/g' \
-e 's/.*\(unpack_.*\)@.*\.c:[0-9][0-9]*)/\1/g' \
-e 's/.*\(update_validation\)@.*\.c:[0-9][0-9]*)/\1/g' \
-e 's/.*\(apply_upgrade\)@.*\.c:[0-9][0-9]*)/\1/g' \
-e 's/ last-rc-change=\"[A-Za-z0-9: ]*\"//'\
-e 's|^/tmp/cts-cli\.validity\.bad.xml\.[^:]*:|validity.bad.xml:|'\
-e 's/^Entity: line [0-9][0-9]*: //'\
-e 's/\(validation ([0-9][0-9]* of \)[0-9][0-9]*\().*\)/\1X\2/' \
-e 's/^Migration will take effect until: .*/Migration will take effect until:/' \
-e 's/ end=\"[0-9][-+: 0-9]*Z*\"/ end=\"\"/' \
-e 's/ start=\"[0-9][-+: 0-9]*Z*\"/ start=\"\"/' \
-e 's/^Error checking rule: Device not configured/Error checking rule: No such device or address/' \
"$TMPFILE" > "${TMPFILE}.$$"
mv -- "${TMPFILE}.$$" "$TMPFILE"
if [ $do_save -eq 1 ]; then
cp "$TMPFILE" $test_home/cli/regression.$t.exp
fi
done
rm -rf "${shadow_dir}"
failed=0
if [ $verbose -eq 1 ]; then
echo -e "\n\nResults"
fi
for t in $tests; do
eval TMPFILE="\$TMPFILE_$t"
if [ $verbose -eq 1 ]; then
diff -wu $test_home/cli/regression.$t.exp "$TMPFILE"
else
diff -w $test_home/cli/regression.$t.exp "$TMPFILE" >/dev/null 2>&1
fi
if [ $? -ne 0 ]; then
failed=1
fi
done
echo -e "\n\nSummary"
for t in $tests; do
eval TMPFILE="\$TMPFILE_$t"
grep -e '^\* \(Passed\|Failed\)' "$TMPFILE"
done
if [ $num_errors -ne 0 ]; then
echo "$num_errors tests failed; see output in:"
for t in $tests; do
eval TMPFILE="\$TMPFILE_$t"
echo " $TMPFILE"
done
exit $CRM_EX_ERROR
elif [ $failed -eq 1 ]; then
echo "$num_passed tests passed but output was unexpected; see output in:"
for t in $tests; do
eval TMPFILE="\$TMPFILE_$t"
echo " $TMPFILE"
done
exit $CRM_EX_DIGEST
else
echo $num_passed tests passed
for t in $tests; do
eval TMPFILE="\$TMPFILE_$t"
rm -f "$TMPFILE"
done
crm_shadow --force --delete $shadow >/dev/null 2>&1
exit $CRM_EX_OK
fi
diff --git a/cts/cts-scheduler.in b/cts/cts-scheduler.in
index c7718fa092..703010409b 100644
--- a/cts/cts-scheduler.in
+++ b/cts/cts-scheduler.in
@@ -1,1515 +1,1517 @@
#!@PYTHON@
""" Regression tests for Pacemaker's scheduler
"""
# Pacemaker targets compatibility with Python 2.7 and 3.2+
from __future__ import print_function, unicode_literals, absolute_import, division
__copyright__ = "Copyright 2004-2020 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import io
import os
import re
import sys
import stat
import shlex
import shutil
import argparse
import subprocess
import platform
DESC = """Regression tests for Pacemaker's scheduler"""
# Each entry in TESTS is a group of tests, where each test consists of a
# test base name, test description, and additional test arguments.
# Test groups will be separated by newlines in output.
TESTS = [
[
[ "simple1", "Offline" ],
[ "simple2", "Start" ],
[ "simple3", "Start 2" ],
[ "simple4", "Start Failed" ],
[ "simple6", "Stop Start" ],
[ "simple7", "Shutdown" ],
#[ "simple8", "Stonith" ],
#[ "simple9", "Lower version" ],
#[ "simple10", "Higher version" ],
[ "simple11", "Priority (ne)" ],
[ "simple12", "Priority (eq)" ],
[ "simple8", "Stickiness" ],
],
[
[ "group1", "Group" ],
[ "group2", "Group + Native" ],
[ "group3", "Group + Group" ],
[ "group4", "Group + Native (nothing)" ],
[ "group5", "Group + Native (move)" ],
[ "group6", "Group + Group (move)" ],
[ "group7", "Group colocation" ],
[ "group13", "Group colocation (cant run)" ],
[ "group8", "Group anti-colocation" ],
[ "group9", "Group recovery" ],
[ "group10", "Group partial recovery" ],
[ "group11", "Group target_role" ],
[ "group14", "Group stop (graph terminated)" ],
[ "group15", "Negative group colocation" ],
[ "bug-1573", "Partial stop of a group with two children" ],
[ "bug-1718", "Mandatory group ordering - Stop group_FUN" ],
[ "bug-lf-2613", "Move group on failure" ],
[ "bug-lf-2619", "Move group on clone failure" ],
[ "group-fail", "Ensure stop order is preserved for partially active groups" ],
[ "group-unmanaged", "No need to restart r115 because r114 is unmanaged" ],
[ "group-unmanaged-stopped", "Make sure r115 is stopped when r114 fails" ],
[ "group-dependents", "Account for the location preferences of things colocated with a group" ],
[ "group-stop-ordering", "Ensure blocked group member stop does not force other member stops" ],
],
[
[ "rsc_dep1", "Must not" ],
[ "rsc_dep3", "Must" ],
[ "rsc_dep5", "Must not 3" ],
[ "rsc_dep7", "Must 3" ],
[ "rsc_dep10", "Must (but cant)" ],
[ "rsc_dep2", "Must (running)" ],
[ "rsc_dep8", "Must (running : alt)" ],
[ "rsc_dep4", "Must (running + move)" ],
[ "asymmetric", "Asymmetric - require explicit location constraints" ],
],
[
[ "orphan-0", "Orphan ignore" ],
[ "orphan-1", "Orphan stop" ],
[ "orphan-2", "Orphan stop, remove failcount" ],
],
[
[ "params-0", "Params: No change" ],
[ "params-1", "Params: Changed" ],
[ "params-2", "Params: Resource definition" ],
[ "params-3", "Params: Restart instead of reload if start pending" ],
[ "params-4", "Params: Reload" ],
[ "params-5", "Params: Restart based on probe digest" ],
[ "novell-251689", "Resource definition change + target_role=stopped" ],
[ "bug-lf-2106", "Restart all anonymous clone instances after config change" ],
[ "params-6", "Params: Detect reload in previously migrated resource" ],
[ "nvpair-id-ref", "Support id-ref in nvpair with optional name" ],
[ "not-reschedule-unneeded-monitor",
"Do not reschedule unneeded monitors while resource definitions have changed" ],
[ "reload-becomes-restart", "Cancel reload if restart becomes required" ],
],
[
[ "target-0", "Target Role : baseline" ],
[ "target-1", "Target Role : master" ],
[ "target-2", "Target Role : invalid" ],
],
[
[ "base-score", "Set a node's default score for all nodes" ],
],
[
[ "date-1", "Dates", [ "-t", "2005-020" ] ],
[ "date-2", "Date Spec - Pass", [ "-t", "2005-020T12:30" ] ],
[ "date-3", "Date Spec - Fail", [ "-t", "2005-020T11:30" ] ],
[ "origin", "Timing of recurring operations", [ "-t", "2014-05-07 00:28:00" ] ],
[ "probe-0", "Probe (anon clone)" ],
[ "probe-1", "Pending Probe" ],
[ "probe-2", "Correctly re-probe cloned groups" ],
[ "probe-3", "Probe (pending node)" ],
[ "probe-4", "Probe (pending node + stopped resource)" ],
[ "standby", "Standby" ],
[ "comments", "Comments" ],
],
[
[ "one-or-more-0", "Everything starts" ],
[ "one-or-more-1", "Nothing starts because of A" ],
[ "one-or-more-2", "D can start because of C" ],
[ "one-or-more-3", "D cannot start because of B and C" ],
[ "one-or-more-4", "D cannot start because of target-role" ],
[ "one-or-more-5", "Start A and F even though C and D are stopped" ],
[ "one-or-more-6", "Leave A running even though B is stopped" ],
[ "one-or-more-7", "Leave A running even though C is stopped" ],
[ "bug-5140-require-all-false", "Allow basegrp:0 to stop" ],
[ "clone-require-all-1", "clone B starts node 3 and 4" ],
[ "clone-require-all-2", "clone B remains stopped everywhere" ],
[ "clone-require-all-3", "clone B stops everywhere because A stops everywhere" ],
[ "clone-require-all-4", "clone B remains on node 3 and 4 with only one instance of A remaining" ],
[ "clone-require-all-5", "clone B starts on node 1 3 and 4" ],
[ "clone-require-all-6", "clone B remains active after shutting down instances of A" ],
[ "clone-require-all-7",
"clone A and B both start at the same time. all instances of A start before B" ],
[ "clone-require-all-no-interleave-1", "C starts everywhere after A and B" ],
[ "clone-require-all-no-interleave-2",
"C starts on nodes 1, 2, and 4 with only one active instance of B" ],
[ "clone-require-all-no-interleave-3",
"C remains active when instance of B is stopped on one node and started on another" ],
[ "one-or-more-unrunnable-instances", "Avoid dependencies on instances that won't ever be started" ],
],
[
[ "location-date-rules-1", "Use location constraints with ineffective date-based rules" ],
[ "location-date-rules-2", "Use location constraints with effective date-based rules" ],
[ "nvpair-date-rules-1", "Use nvpair blocks with a variety of date-based rules" ],
[ "rule-dbl-as-auto-number-match",
"Floating-point rule values default to number comparison: match" ],
[ "rule-dbl-as-auto-number-no-match",
"Floating-point rule values default to number comparison: no "
"match" ],
[ "rule-dbl-as-integer-match",
"Floating-point rule values set to integer comparison: match" ],
[ "rule-dbl-as-integer-no-match",
"Floating-point rule values set to integer comparison: no match" ],
[ "rule-dbl-as-number-match",
"Floating-point rule values set to number comparison: match" ],
[ "rule-dbl-as-number-no-match",
"Floating-point rule values set to number comparison: no match" ],
[ "rule-dbl-parse-fail-default-str-match",
"Floating-point rule values fail to parse, default to string "
"comparison: match" ],
[ "rule-dbl-parse-fail-default-str-no-match",
"Floating-point rule values fail to parse, default to string "
"comparison: no match" ],
[ "rule-int-as-auto-integer-match",
"Integer rule values default to integer comparison: match" ],
[ "rule-int-as-auto-integer-no-match",
"Integer rule values default to integer comparison: no match" ],
[ "rule-int-as-integer-match",
"Integer rule values set to integer comparison: match" ],
[ "rule-int-as-integer-no-match",
"Integer rule values set to integer comparison: no match" ],
[ "rule-int-as-number-match",
"Integer rule values set to number comparison: match" ],
[ "rule-int-as-number-no-match",
"Integer rule values set to number comparison: no match" ],
[ "rule-int-parse-fail-default-str-match",
"Integer rule values fail to parse, default to string "
"comparison: match" ],
[ "rule-int-parse-fail-default-str-no-match",
"Integer rule values fail to parse, default to string "
"comparison: no match" ],
],
[
[ "order1", "Order start 1" ],
[ "order2", "Order start 2" ],
[ "order3", "Order stop" ],
[ "order4", "Order (multiple)" ],
[ "order5", "Order (move)" ],
[ "order6", "Order (move w/ restart)" ],
[ "order7", "Order (mandatory)" ],
[ "order-optional", "Order (score=0)" ],
[ "order-required", "Order (score=INFINITY)" ],
[ "bug-lf-2171", "Prevent group start when clone is stopped" ],
[ "order-clone", "Clone ordering should be able to prevent startup of dependent clones" ],
[ "order-sets", "Ordering for resource sets" ],
[ "order-serialize", "Serialize resources without inhibiting migration" ],
[ "order-serialize-set", "Serialize a set of resources without inhibiting migration" ],
[ "clone-order-primitive", "Order clone start after a primitive" ],
[ "clone-order-16instances", "Verify ordering of 16 cloned resources" ],
[ "order-optional-keyword", "Order (optional keyword)" ],
[ "order-mandatory", "Order (mandatory keyword)" ],
[ "bug-lf-2493",
"Don't imply colocation requirements when applying ordering constraints with clones" ],
[ "ordered-set-basic-startup", "Constraint set with default order settings" ],
[ "ordered-set-natural", "Allow natural set ordering" ],
[ "order-wrong-kind", "Order (error)" ],
],
[
[ "coloc-loop", "Colocation - loop" ],
[ "coloc-many-one", "Colocation - many-to-one" ],
[ "coloc-list", "Colocation - many-to-one with list" ],
[ "coloc-group", "Colocation - groups" ],
[ "coloc-slave-anti", "Anti-colocation with slave shouldn't prevent master colocation" ],
[ "coloc-attr", "Colocation based on node attributes" ],
[ "coloc-negative-group", "Negative colocation with a group" ],
[ "coloc-intra-set", "Intra-set colocation" ],
[ "bug-lf-2435", "Colocation sets with a negative score" ],
[ "coloc-clone-stays-active",
"Ensure clones don't get stopped/demoted because a dependent must stop" ],
[ "coloc_fp_logic", "Verify floating point calculations in colocation are working" ],
[ "colo_master_w_native",
"cl#5070 - Verify promotion order is affected when colocating master to native rsc" ],
[ "colo_slave_w_native",
"cl#5070 - Verify promotion order is affected when colocating slave to native rsc" ],
[ "anti-colocation-order",
"cl#5187 - Prevent resources in an anti-colocation from even temporarily running on a same node" ],
[ "anti-colocation-master", "Organize order of actions for master resources in anti-colocations" ],
[ "anti-colocation-slave", "Organize order of actions for slave resources in anti-colocations" ],
[ "enforce-colo1", "Always enforce B with A INFINITY" ],
[ "complex_enforce_colo", "Always enforce B with A INFINITY. (make sure heat-engine stops)" ],
[ "coloc-dependee-should-stay", "Stickiness outweighs group colocation" ],
[ "coloc-dependee-should-move", "Group colocation outweighs stickiness" ],
],
[
[ "rsc-sets-seq-true", "Resource Sets - sequential=false" ],
[ "rsc-sets-seq-false", "Resource Sets - sequential=true" ],
[ "rsc-sets-clone", "Resource Sets - Clone" ],
[ "rsc-sets-master", "Resource Sets - Master" ],
[ "rsc-sets-clone-1", "Resource Sets - Clone (lf#2404)" ],
],
[
[ "attrs1", "string: eq (and)" ],
[ "attrs2", "string: lt / gt (and)" ],
[ "attrs3", "string: ne (or)" ],
[ "attrs4", "string: exists" ],
[ "attrs5", "string: not_exists" ],
[ "attrs6", "is_dc: true" ],
[ "attrs7", "is_dc: false" ],
[ "attrs8", "score_attribute" ],
[ "per-node-attrs", "Per node resource parameters" ],
],
[
[ "mon-rsc-1", "Schedule Monitor - start" ],
[ "mon-rsc-2", "Schedule Monitor - move" ],
[ "mon-rsc-3", "Schedule Monitor - pending start" ],
[ "mon-rsc-4", "Schedule Monitor - move/pending start" ],
],
[
[ "rec-rsc-0", "Resource Recover - no start" ],
[ "rec-rsc-1", "Resource Recover - start" ],
[ "rec-rsc-2", "Resource Recover - monitor" ],
[ "rec-rsc-3", "Resource Recover - stop - ignore" ],
[ "rec-rsc-4", "Resource Recover - stop - block" ],
[ "rec-rsc-5", "Resource Recover - stop - fence" ],
[ "rec-rsc-6", "Resource Recover - multiple - restart" ],
[ "rec-rsc-7", "Resource Recover - multiple - stop" ],
[ "rec-rsc-8", "Resource Recover - multiple - block" ],
[ "rec-rsc-9", "Resource Recover - group/group" ],
[ "monitor-recovery", "on-fail=block + resource recovery detected by recurring monitor" ],
[ "stop-failure-no-quorum", "Stop failure without quorum" ],
[ "stop-failure-no-fencing", "Stop failure without fencing available" ],
[ "stop-failure-with-fencing", "Stop failure with fencing available" ],
[ "multiple-active-block-group", "Support of multiple-active=block for resource groups" ],
[ "multiple-monitor-one-failed",
"Consider resource failed if any of the configured monitor operations failed" ],
],
[
[ "quorum-1", "No quorum - ignore" ],
[ "quorum-2", "No quorum - freeze" ],
[ "quorum-3", "No quorum - stop" ],
[ "quorum-4", "No quorum - start anyway" ],
[ "quorum-5", "No quorum - start anyway (group)" ],
[ "quorum-6", "No quorum - start anyway (clone)" ],
[ "bug-cl-5212", "No promotion with no-quorum-policy=freeze" ],
[ "suicide-needed-inquorate", "no-quorum-policy=suicide: suicide necessary" ],
[ "suicide-not-needed-initial-quorum",
"no-quorum-policy=suicide: suicide not necessary at initial quorum" ],
[ "suicide-not-needed-never-quorate",
"no-quorum-policy=suicide: suicide not necessary if never quorate" ],
[ "suicide-not-needed-quorate", "no-quorum-policy=suicide: suicide necessary if quorate" ],
],
[
[ "rec-node-1", "Node Recover - Startup - no fence" ],
[ "rec-node-2", "Node Recover - Startup - fence" ],
[ "rec-node-3", "Node Recover - HA down - no fence" ],
[ "rec-node-4", "Node Recover - HA down - fence" ],
[ "rec-node-5", "Node Recover - CRM down - no fence" ],
[ "rec-node-6", "Node Recover - CRM down - fence" ],
[ "rec-node-7", "Node Recover - no quorum - ignore" ],
[ "rec-node-8", "Node Recover - no quorum - freeze" ],
[ "rec-node-9", "Node Recover - no quorum - stop" ],
[ "rec-node-10", "Node Recover - no quorum - stop w/fence" ],
[ "rec-node-11", "Node Recover - CRM down w/ group - fence" ],
[ "rec-node-12", "Node Recover - nothing active - fence" ],
[ "rec-node-13", "Node Recover - failed resource + shutdown - fence" ],
[ "rec-node-15", "Node Recover - unknown lrm section" ],
[ "rec-node-14", "Serialize all stonith's" ],
],
[
[ "multi1", "Multiple Active (stop/start)" ],
],
[
[ "migrate-begin", "Normal migration" ],
[ "migrate-success", "Completed migration" ],
[ "migrate-partial-1", "Completed migration, missing stop on source" ],
[ "migrate-partial-2", "Successful migrate_to only" ],
[ "migrate-partial-3", "Successful migrate_to only, target down" ],
[ "migrate-partial-4", "Migrate from the correct host after migrate_to+migrate_from" ],
[ "bug-5186-partial-migrate", "Handle partial migration when src node loses membership" ],
[ "migrate-fail-2", "Failed migrate_from" ],
[ "migrate-fail-3", "Failed migrate_from + stop on source" ],
[ "migrate-fail-4",
"Failed migrate_from + stop on target - ideally we wouldn't need to re-stop on target" ],
[ "migrate-fail-5", "Failed migrate_from + stop on source and target" ],
[ "migrate-fail-6", "Failed migrate_to" ],
[ "migrate-fail-7", "Failed migrate_to + stop on source" ],
[ "migrate-fail-8",
"Failed migrate_to + stop on target - ideally we wouldn't need to re-stop on target" ],
[ "migrate-fail-9", "Failed migrate_to + stop on source and target" ],
[ "migration-ping-pong", "Old migrate_to failure + successful migrate_from on same node" ],
[ "migrate-stop", "Migration in a stopping stack" ],
[ "migrate-start", "Migration in a starting stack" ],
[ "migrate-stop_start", "Migration in a restarting stack" ],
[ "migrate-stop-complex", "Migration in a complex stopping stack" ],
[ "migrate-start-complex", "Migration in a complex starting stack" ],
[ "migrate-stop-start-complex", "Migration in a complex moving stack" ],
[ "migrate-shutdown", "Order the post-migration 'stop' before node shutdown" ],
[ "migrate-1", "Migrate (migrate)" ],
[ "migrate-2", "Migrate (stable)" ],
[ "migrate-3", "Migrate (failed migrate_to)" ],
[ "migrate-4", "Migrate (failed migrate_from)" ],
[ "novell-252693", "Migration in a stopping stack" ],
[ "novell-252693-2", "Migration in a starting stack" ],
[ "novell-252693-3", "Non-Migration in a starting and stopping stack" ],
[ "bug-1820", "Migration in a group" ],
[ "bug-1820-1", "Non-migration in a group" ],
[ "migrate-5", "Primitive migration with a clone" ],
[ "migrate-fencing", "Migration after Fencing" ],
[ "migrate-both-vms", "Migrate two VMs that have no colocation" ],
[ "migration-behind-migrating-remote", "Migrate resource behind migrating remote connection" ],
[ "1-a-then-bm-move-b", "Advanced migrate logic. A then B. migrate B" ],
[ "2-am-then-b-move-a", "Advanced migrate logic, A then B, migrate A without stopping B" ],
[ "3-am-then-bm-both-migrate", "Advanced migrate logic. A then B. migrate both" ],
[ "4-am-then-bm-b-not-migratable", "Advanced migrate logic, A then B, B not migratable" ],
[ "5-am-then-bm-a-not-migratable", "Advanced migrate logic. A then B. move both, a not migratable" ],
[ "6-migrate-group", "Advanced migrate logic, migrate a group" ],
[ "7-migrate-group-one-unmigratable",
"Advanced migrate logic, migrate group mixed with allow-migrate true/false" ],
[ "8-am-then-bm-a-migrating-b-stopping",
"Advanced migrate logic, A then B, A migrating, B stopping" ],
[ "9-am-then-bm-b-migrating-a-stopping",
"Advanced migrate logic, A then B, B migrate, A stopping" ],
[ "10-a-then-bm-b-move-a-clone",
"Advanced migrate logic, A clone then B, migrate B while stopping A" ],
[ "11-a-then-bm-b-move-a-clone-starting",
"Advanced migrate logic, A clone then B, B moving while A is start/stopping" ],
[ "a-promote-then-b-migrate", "A promote then B start. migrate B" ],
[ "a-demote-then-b-migrate", "A demote then B stop. migrate B" ],
# @TODO: If pacemaker implements versioned attributes, uncomment this test
#[ "migrate-versioned", "Disable migration for versioned resources" ],
[ "bug-lf-2422", "Dependency on partially active group - stop ocfs:*" ],
],
[
[ "clone-anon-probe-1", "Probe the correct (anonymous) clone instance for each node" ],
[ "clone-anon-probe-2", "Avoid needless re-probing of anonymous clones" ],
[ "clone-anon-failcount", "Merge failcounts for anonymous clones" ],
[ "force-anon-clone-max", "Update clone-max properly when forcing a clone to be anonymous" ],
[ "anon-instance-pending", "Assign anonymous clone instance numbers properly when action pending" ],
[ "inc0", "Incarnation start" ],
[ "inc1", "Incarnation start order" ],
[ "inc2", "Incarnation silent restart, stop, move" ],
[ "inc3", "Inter-incarnation ordering, silent restart, stop, move" ],
[ "inc4", "Inter-incarnation ordering, silent restart, stop, move (ordered)" ],
[ "inc5", "Inter-incarnation ordering, silent restart, stop, move (restart 1)" ],
[ "inc6", "Inter-incarnation ordering, silent restart, stop, move (restart 2)" ],
[ "inc7", "Clone colocation" ],
[ "inc8", "Clone anti-colocation" ],
[ "inc9", "Non-unique clone" ],
[ "inc10", "Non-unique clone (stop)" ],
[ "inc11", "Primitive colocation with clones" ],
[ "inc12", "Clone shutdown" ],
[ "cloned-group", "Make sure only the correct number of cloned groups are started" ],
[ "cloned-group-stop", "Ensure stopping qpidd also stops glance and cinder" ],
[ "clone-no-shuffle", "Don't prioritize allocation of instances that must be moved" ],
[ "clone-max-zero", "Orphan processing with clone-max=0" ],
[ "clone-anon-dup",
"Bug LF#2087 - Correctly parse the state of anonymous clones that are active more than once per node" ],
[ "bug-lf-2160", "Don't shuffle clones due to colocation" ],
[ "bug-lf-2213", "clone-node-max enforcement for cloned groups" ],
[ "bug-lf-2153", "Clone ordering constraints" ],
[ "bug-lf-2361", "Ensure clones observe mandatory ordering constraints if the LHS is unrunnable" ],
[ "bug-lf-2317", "Avoid needless restart of primitive depending on a clone" ],
[ "clone-colocate-instance-1", "Colocation with a specific clone instance (negative example)" ],
[ "clone-colocate-instance-2", "Colocation with a specific clone instance" ],
[ "clone-order-instance", "Ordering with specific clone instances" ],
[ "bug-lf-2453", "Enforce mandatory clone ordering without colocation" ],
[ "bug-lf-2508", "Correctly reconstruct the status of anonymous cloned groups" ],
[ "bug-lf-2544", "Balanced clone placement" ],
[ "bug-lf-2445", "Redistribute clones with node-max > 1 and stickiness = 0" ],
[ "bug-lf-2574", "Avoid clone shuffle" ],
[ "bug-lf-2581", "Avoid group restart due to unrelated clone (re)start" ],
[ "bug-cl-5168", "Don't shuffle clones" ],
[ "bug-cl-5170", "Prevent clone from starting with on-fail=block" ],
[ "clone-fail-block-colocation", "Move colocated group when failed clone has on-fail=block" ],
[ "clone-interleave-1",
"Clone-3 cannot start on pcmk-1 due to interleaved ordering (no colocation)" ],
[ "clone-interleave-2", "Clone-3 must stop on pcmk-1 due to interleaved ordering (no colocation)" ],
[ "clone-interleave-3",
"Clone-3 must be recovered on pcmk-1 due to interleaved ordering (no colocation)" ],
[ "rebalance-unique-clones", "Rebalance unique clone instances with no stickiness" ],
[ "clone-requires-quorum-recovery", "Clone with requires=quorum on failed node needing recovery" ],
[ "clone-requires-quorum",
"Clone with requires=quorum with presumed-inactive instance on failed node" ],
],
[
[ "cloned_start_one", "order first clone then clone... first clone_min=2" ],
[ "cloned_start_two", "order first clone then clone... first clone_min=2" ],
[ "cloned_stop_one", "order first clone then clone... first clone_min=2" ],
[ "cloned_stop_two", "order first clone then clone... first clone_min=2" ],
[ "clone_min_interleave_start_one",
"order first clone then clone... first clone_min=2 and then has interleave=true" ],
[ "clone_min_interleave_start_two",
"order first clone then clone... first clone_min=2 and then has interleave=true" ],
[ "clone_min_interleave_stop_one",
"order first clone then clone... first clone_min=2 and then has interleave=true" ],
[ "clone_min_interleave_stop_two",
"order first clone then clone... first clone_min=2 and then has interleave=true" ],
[ "clone_min_start_one", "order first clone then primitive... first clone_min=2" ],
[ "clone_min_start_two", "order first clone then primitive... first clone_min=2" ],
[ "clone_min_stop_all", "order first clone then primitive... first clone_min=2" ],
[ "clone_min_stop_one", "order first clone then primitive... first clone_min=2" ],
[ "clone_min_stop_two", "order first clone then primitive... first clone_min=2" ],
],
[
[ "unfence-startup", "Clean unfencing" ],
[ "unfence-definition", "Unfencing when the agent changes" ],
[ "unfence-parameters", "Unfencing when the agent parameters changes" ],
[ "unfence-device", "Unfencing when a cluster has only fence devices" ],
],
[
[ "master-0", "Stopped -> Slave" ],
[ "master-1", "Stopped -> Promote" ],
[ "master-2", "Stopped -> Promote : notify" ],
[ "master-3", "Stopped -> Promote : master location" ],
[ "master-4", "Started -> Promote : master location" ],
[ "master-5", "Promoted -> Promoted" ],
[ "master-6", "Promoted -> Promoted (2)" ],
[ "master-7", "Promoted -> Fenced" ],
[ "master-8", "Promoted -> Fenced -> Moved" ],
[ "master-9", "Stopped + Promotable + No quorum" ],
[ "master-10", "Stopped -> Promotable : notify with monitor" ],
[ "master-11", "Stopped -> Promote : colocation" ],
[ "novell-239082", "Demote/Promote ordering" ],
[ "novell-239087", "Stable master placement" ],
[ "master-12", "Promotion based solely on rsc_location constraints" ],
[ "master-13", "Include preferences of colocated resources when placing master" ],
[ "master-demote", "Ordering when actions depends on demoting a slave resource" ],
[ "master-ordering", "Prevent resources from starting that need a master" ],
[ "bug-1765", "Master-Master Colocation (do not stop the slaves)" ],
[ "master-group", "Promotion of cloned groups" ],
[ "bug-lf-1852", "Don't shuffle master/slave instances unnecessarily" ],
[ "master-failed-demote", "Don't retry failed demote actions" ],
[ "master-failed-demote-2", "Don't retry failed demote actions (notify=false)" ],
[ "master-depend",
"Ensure resources that depend on the master don't get allocated until the master does" ],
[ "master-reattach", "Re-attach to a running master" ],
[ "master-allow-start", "Don't include master score if it would prevent allocation" ],
[ "master-colocation",
"Allow master instances placemaker to be influenced by colocation constraints" ],
[ "master-pseudo", "Make sure promote/demote pseudo actions are created correctly" ],
[ "master-role", "Prevent target-role from promoting more than master-max instances" ],
[ "bug-lf-2358", "Master-Master anti-colocation" ],
[ "master-promotion-constraint", "Mandatory master colocation constraints" ],
[ "unmanaged-master", "Ensure role is preserved for unmanaged resources" ],
[ "master-unmanaged-monitor", "Start the correct monitor operation for unmanaged masters" ],
[ "master-demote-2", "Demote does not clear past failure" ],
[ "master-move", "Move master based on failure of colocated group" ],
[ "master-probed-score", "Observe the promotion score of probed resources" ],
[ "colocation_constraint_stops_master",
"cl#5054 - Ensure master is demoted when stopped by colocation constraint" ],
[ "colocation_constraint_stops_slave",
"cl#5054 - Ensure slave is not demoted when stopped by colocation constraint" ],
[ "order_constraint_stops_master",
"cl#5054 - Ensure master is demoted when stopped by order constraint" ],
[ "order_constraint_stops_slave",
"cl#5054 - Ensure slave is not demoted when stopped by order constraint" ],
[ "master_monitor_restart", "cl#5072 - Ensure master monitor operation will start after promotion" ],
[ "bug-rh-880249", "Handle replacement of an m/s resource with a primitive" ],
[ "bug-5143-ms-shuffle", "Prevent master shuffling due to promotion score" ],
[ "master-demote-block", "Block promotion if demote fails with on-fail=block" ],
[ "master-dependent-ban",
"Don't stop instances from being active because a dependent is banned from that host" ],
[ "master-stop", "Stop instances due to location constraint with role=Started" ],
[ "master-partially-demoted-group", "Allow partially demoted group to finish demoting" ],
[ "bug-cl-5213", "Ensure role colocation with -INFINITY is enforced" ],
[ "bug-cl-5219", "Allow unrelated resources with a common colocation target to remain promoted" ],
[ "master-asymmetrical-order",
"Fix the behaviors of multi-state resources with asymmetrical ordering" ],
[ "master-notify", "Master promotion with notifies" ],
[ "master-score-startup", "Use permanent master scores without LRM history" ],
[ "failed-demote-recovery", "Recover resource in slave role after demote fails" ],
[ "failed-demote-recovery-master", "Recover resource in master role after demote fails" ],
[ "on_fail_demote1", "Recovery with on-fail=\"demote\" on healthy cluster, remote, guest, and bundle nodes" ],
[ "on_fail_demote2", "Recovery with on-fail=\"demote\" with promotion on different node" ],
[ "on_fail_demote3", "Recovery with on-fail=\"demote\" with no promotion" ],
[ "on_fail_demote4", "Recovery with on-fail=\"demote\" on failed cluster, remote, guest, and bundle nodes" ],
[ "no_quorum_demote", "Promotable demotion and primitive stop with no-quorum-policy=\"demote\"" ],
[ "no-promote-on-unrunnable-guest", "Don't select bundle instance for promotion when container can't run" ],
],
[
[ "history-1", "Correctly parse stateful-1 resource state" ],
],
[
[ "managed-0", "Managed (reference)" ],
[ "managed-1", "Not managed - down" ],
[ "managed-2", "Not managed - up" ],
[ "bug-5028", "Shutdown should block if anything depends on an unmanaged resource" ],
[ "bug-5028-detach", "Ensure detach still works" ],
[ "bug-5028-bottom",
"Ensure shutdown still blocks if the blocked resource is at the bottom of the stack" ],
[ "unmanaged-stop-1",
"cl#5155 - Block the stop of resources if any depending resource is unmanaged" ],
[ "unmanaged-stop-2",
"cl#5155 - Block the stop of resources if the first resource in a mandatory stop order is unmanaged" ],
[ "unmanaged-stop-3",
"cl#5155 - Block the stop of resources if any depending resource in a group is unmanaged" ],
[ "unmanaged-stop-4",
"cl#5155 - Block the stop of resources if any depending resource in the middle of a group is unmanaged" ],
[ "unmanaged-block-restart",
"Block restart of resources if any dependent resource in a group is unmanaged" ],
],
[
[ "interleave-0", "Interleave (reference)" ],
[ "interleave-1", "coloc - not interleaved" ],
[ "interleave-2", "coloc - interleaved" ],
[ "interleave-3", "coloc - interleaved (2)" ],
[ "interleave-pseudo-stop", "Interleaved clone during stonith" ],
[ "interleave-stop", "Interleaved clone during stop" ],
[ "interleave-restart", "Interleaved clone during dependency restart" ],
],
[
[ "notify-0", "Notify reference" ],
[ "notify-1", "Notify simple" ],
[ "notify-2", "Notify simple, confirm" ],
[ "notify-3", "Notify move, confirm" ],
[ "novell-239079", "Notification priority" ],
#[ "notify-2", "Notify - 764" ],
[ "notifs-for-unrunnable", "Don't schedule notifications for an unrunnable action" ],
[ "route-remote-notify", "Route remote notify actions through correct cluster node" ],
[ "notify-behind-stopping-remote", "Don't schedule notifications behind stopped remote" ],
],
[
[ "594", "OSDL #594 - Unrunnable actions scheduled in transition" ],
[ "662", "OSDL #662 - Two resources start on one node when incarnation_node_max = 1" ],
[ "696", "OSDL #696 - CRM starts stonith RA without monitor" ],
[ "726", "OSDL #726 - Attempting to schedule rsc_posic041_monitor_5000 _after_ a stop" ],
[ "735", "OSDL #735 - Correctly detect that rsc_hadev1 is stopped on hadev3" ],
[ "764", "OSDL #764 - Missing monitor op for DoFencing:child_DoFencing:1" ],
[ "797", "OSDL #797 - Assert triggered: task_id_i > max_call_id" ],
[ "829", "OSDL #829" ],
[ "994",
"OSDL #994 - Stopping the last resource in a resource group causes the entire group to be restarted" ],
[ "994-2", "OSDL #994 - with a dependent resource" ],
[ "1360", "OSDL #1360 - Clone stickiness" ],
[ "1484", "OSDL #1484 - on_fail=stop" ],
[ "1494", "OSDL #1494 - Clone stability" ],
[ "unrunnable-1", "Unrunnable" ],
[ "unrunnable-2", "Unrunnable 2" ],
[ "stonith-0", "Stonith loop - 1" ],
[ "stonith-1", "Stonith loop - 2" ],
[ "stonith-2", "Stonith loop - 3" ],
[ "stonith-3", "Stonith startup" ],
[ "stonith-4", "Stonith node state" ],
[ "dc-fence-ordering", "DC needs fencing while other nodes are shutting down" ],
[ "bug-1572-1", "Recovery of groups depending on master/slave" ],
[ "bug-1572-2", "Recovery of groups depending on master/slave when the master is never re-promoted" ],
[ "bug-1685", "Depends-on-master ordering" ],
[ "bug-1822", "Don't promote partially active groups" ],
[ "bug-pm-11", "New resource added to a m/s group" ],
[ "bug-pm-12", "Recover only the failed portion of a cloned group" ],
[ "bug-n-387749", "Don't shuffle clone instances" ],
[ "bug-n-385265",
"Don't ignore the failure stickiness of group children - resource_idvscommon should stay stopped" ],
[ "bug-n-385265-2",
"Ensure groups are migrated instead of remaining partially active on the current node" ],
[ "bug-lf-1920", "Correctly handle probes that find active resources" ],
[ "bnc-515172", "Location constraint with multiple expressions" ],
[ "colocate-primitive-with-clone", "Optional colocation with a clone" ],
[ "use-after-free-merge", "Use-after-free in native_merge_weights" ],
[ "bug-lf-2551", "STONITH ordering for stop" ],
[ "bug-lf-2606", "Stonith implies demote" ],
[ "bug-lf-2474", "Ensure resource op timeout takes precedence over op_defaults" ],
[ "bug-suse-707150", "Prevent vm-01 from starting due to colocation/ordering" ],
[ "bug-5014-A-start-B-start", "Verify when A starts B starts using symmetrical=false" ],
[ "bug-5014-A-stop-B-started",
"Verify when A stops B does not stop if it has already started using symmetric=false" ],
[ "bug-5014-A-stopped-B-stopped",
"Verify when A is stopped and B has not started, B does not start before A using symmetric=false" ],
[ "bug-5014-CthenAthenB-C-stopped",
"Verify when C then A is symmetrical=true, A then B is symmetric=false, and C is stopped that nothing starts" ],
[ "bug-5014-CLONE-A-start-B-start",
"Verify when A starts B starts using clone resources with symmetric=false" ],
[ "bug-5014-CLONE-A-stop-B-started",
"Verify when A stops B does not stop if it has already started using clone resources with symmetric=false" ],
[ "bug-5014-GROUP-A-start-B-start",
"Verify when A starts B starts when using group resources with symmetric=false" ],
[ "bug-5014-GROUP-A-stopped-B-started",
"Verify when A stops B does not stop if it has already started using group resources with symmetric=false" ],
[ "bug-5014-GROUP-A-stopped-B-stopped",
"Verify when A is stopped and B has not started, B does not start before A using group resources with symmetric=false" ],
[ "bug-5014-ordered-set-symmetrical-false",
"Verify ordered sets work with symmetrical=false" ],
[ "bug-5014-ordered-set-symmetrical-true",
"Verify ordered sets work with symmetrical=true" ],
[ "bug-5007-masterslave_colocation",
"Verify use of colocation scores other than INFINITY and -INFINITY work on multi-state resources" ],
[ "bug-5038", "Prevent restart of anonymous clones when clone-max decreases" ],
[ "bug-5025-1", "Automatically clean up failcount after resource config change with reload" ],
[ "bug-5025-2", "Make sure clear failcount action isn't set when config does not change" ],
[ "bug-5025-3", "Automatically clean up failcount after resource config change with restart" ],
[ "bug-5025-4", "Clear failcount when last failure is a start op and rsc attributes changed" ],
[ "failcount", "Ensure failcounts are correctly expired" ],
[ "failcount-block", "Ensure failcounts are not expired when on-fail=block is present" ],
[ "per-op-failcount", "Ensure per-operation failcount is handled and not passed to fence agent" ],
[ "on-fail-ignore", "Ensure on-fail=ignore works even beyond migration-threshold" ],
[ "monitor-onfail-restart", "bug-5058 - Monitor failure with on-fail set to restart" ],
[ "monitor-onfail-stop", "bug-5058 - Monitor failure wiht on-fail set to stop" ],
[ "bug-5059", "No need to restart p_stateful1:*" ],
[ "bug-5069-op-enabled", "Test on-fail=ignore with failure when monitor is enabled" ],
[ "bug-5069-op-disabled", "Test on-fail-ignore with failure when monitor is disabled" ],
[ "obsolete-lrm-resource", "cl#5115 - Do not use obsolete lrm_resource sections" ],
[ "expire-non-blocked-failure",
"Ignore failure-timeout only if the failed operation has on-fail=block" ],
[ "asymmetrical-order-move", "Respect asymmetrical ordering when trying to move resources" ],
[ "asymmetrical-order-restart", "Respect asymmetrical ordering when restarting dependent resource" ],
[ "start-then-stop-with-unfence", "Avoid graph loop with start-then-stop constraint plus unfencing" ],
[ "order-expired-failure", "Order failcount cleanup after remote fencing" ],
[ "ignore_stonith_rsc_order1",
"cl#5056- Ignore order constraint between stonith and non-stonith rsc" ],
[ "ignore_stonith_rsc_order2",
"cl#5056- Ignore order constraint with group rsc containing mixed stonith and non-stonith" ],
[ "ignore_stonith_rsc_order3", "cl#5056- Ignore order constraint, stonith clone and mixed group" ],
[ "ignore_stonith_rsc_order4",
"cl#5056- Ignore order constraint, stonith clone and clone with nested mixed group" ],
[ "honor_stonith_rsc_order1",
"cl#5056- Honor order constraint, stonith clone and pure stonith group(single rsc)" ],
[ "honor_stonith_rsc_order2",
"cl#5056- Honor order constraint, stonith clone and pure stonith group(multiple rsc)" ],
[ "honor_stonith_rsc_order3",
"cl#5056- Honor order constraint, stonith clones with nested pure stonith group" ],
[ "honor_stonith_rsc_order4",
"cl#5056- Honor order constraint, between two native stonith rscs" ],
[ "multiply-active-stonith", "Multiply active stonith" ],
[ "probe-timeout", "cl#5099 - Default probe timeout" ],
[ "order-first-probes",
"cl#5301 - respect order constraints when relevant resources are being probed" ],
[ "concurrent-fencing", "Allow performing fencing operations in parallel" ],
[ "priority-fencing-delay", "Delay fencing targeting the more significant node" ],
],
[
[ "systemhealth1", "System Health () #1" ],
[ "systemhealth2", "System Health () #2" ],
[ "systemhealth3", "System Health () #3" ],
[ "systemhealthn1", "System Health (None) #1" ],
[ "systemhealthn2", "System Health (None) #2" ],
[ "systemhealthn3", "System Health (None) #3" ],
[ "systemhealthm1", "System Health (Migrate On Red) #1" ],
[ "systemhealthm2", "System Health (Migrate On Red) #2" ],
[ "systemhealthm3", "System Health (Migrate On Red) #3" ],
[ "systemhealtho1", "System Health (Only Green) #1" ],
[ "systemhealtho2", "System Health (Only Green) #2" ],
[ "systemhealtho3", "System Health (Only Green) #3" ],
[ "systemhealthp1", "System Health (Progessive) #1" ],
[ "systemhealthp2", "System Health (Progessive) #2" ],
[ "systemhealthp3", "System Health (Progessive) #3" ],
],
[
[ "utilization", "Placement Strategy - utilization" ],
[ "minimal", "Placement Strategy - minimal" ],
[ "balanced", "Placement Strategy - balanced" ],
],
[
[ "placement-stickiness", "Optimized Placement Strategy - stickiness" ],
[ "placement-priority", "Optimized Placement Strategy - priority" ],
[ "placement-location", "Optimized Placement Strategy - location" ],
[ "placement-capacity", "Optimized Placement Strategy - capacity" ],
],
[
[ "utilization-order1", "Utilization Order - Simple" ],
[ "utilization-order2", "Utilization Order - Complex" ],
[ "utilization-order3", "Utilization Order - Migrate" ],
[ "utilization-order4", "Utilization Order - Live Migration (bnc#695440)" ],
[ "utilization-shuffle",
"Don't displace prmExPostgreSQLDB2 on act2, Start prmExPostgreSQLDB1 on act3" ],
[ "load-stopped-loop", "Avoid transition loop due to load_stopped (cl#5044)" ],
[ "load-stopped-loop-2",
"cl#5235 - Prevent graph loops that can be introduced by load_stopped -> migrate_to ordering" ],
],
[
[ "colocated-utilization-primitive-1", "Colocated Utilization - Primitive" ],
[ "colocated-utilization-primitive-2", "Colocated Utilization - Choose the most capable node" ],
[ "colocated-utilization-group", "Colocated Utilization - Group" ],
[ "colocated-utilization-clone", "Colocated Utilization - Clone" ],
[ "utilization-check-allowed-nodes",
"Only check the capacities of the nodes that can run the resource" ],
],
[
[ "reprobe-target_rc", "Ensure correct target_rc for reprobe of inactive resources" ],
[ "node-maintenance-1", "cl#5128 - Node maintenance" ],
[ "node-maintenance-2", "cl#5128 - Node maintenance (coming out of maintenance mode)" ],
[ "shutdown-maintenance-node", "Do not fence a maintenance node if it shuts down cleanly" ],
[ "rsc-maintenance", "Per-resource maintenance" ],
],
[
[ "not-installed-agent", "The resource agent is missing" ],
[ "not-installed-tools", "Something the resource agent needs is missing" ],
],
[
[ "stopped-monitor-00", "Stopped Monitor - initial start" ],
[ "stopped-monitor-01", "Stopped Monitor - failed started" ],
[ "stopped-monitor-02", "Stopped Monitor - started multi-up" ],
[ "stopped-monitor-03", "Stopped Monitor - stop started" ],
[ "stopped-monitor-04", "Stopped Monitor - failed stop" ],
[ "stopped-monitor-05", "Stopped Monitor - start unmanaged" ],
[ "stopped-monitor-06", "Stopped Monitor - unmanaged multi-up" ],
[ "stopped-monitor-07", "Stopped Monitor - start unmanaged multi-up" ],
[ "stopped-monitor-08", "Stopped Monitor - migrate" ],
[ "stopped-monitor-09", "Stopped Monitor - unmanage started" ],
[ "stopped-monitor-10", "Stopped Monitor - unmanaged started multi-up" ],
[ "stopped-monitor-11", "Stopped Monitor - stop unmanaged started" ],
[ "stopped-monitor-12", "Stopped Monitor - unmanaged started multi-up (target-role=Stopped)" ],
[ "stopped-monitor-20", "Stopped Monitor - initial stop" ],
[ "stopped-monitor-21", "Stopped Monitor - stopped single-up" ],
[ "stopped-monitor-22", "Stopped Monitor - stopped multi-up" ],
[ "stopped-monitor-23", "Stopped Monitor - start stopped" ],
[ "stopped-monitor-24", "Stopped Monitor - unmanage stopped" ],
[ "stopped-monitor-25", "Stopped Monitor - unmanaged stopped multi-up" ],
[ "stopped-monitor-26", "Stopped Monitor - start unmanaged stopped" ],
[ "stopped-monitor-27", "Stopped Monitor - unmanaged stopped multi-up (target-role=Started)" ],
[ "stopped-monitor-30", "Stopped Monitor - new node started" ],
[ "stopped-monitor-31", "Stopped Monitor - new node stopped" ],
],
[
# This is a combo test to check:
# - probe timeout defaults to the minimum-interval monitor's
# - duplicate recurring operations are ignored
# - if timeout spec is bad, the default timeout is used
# - failure is blocked with on-fail=block even if ISO8601 interval is specified
# - started/stopped role monitors are started/stopped on right nodes
[ "intervals", "Recurring monitor interval handling" ],
],
[
[ "ticket-primitive-1", "Ticket - Primitive (loss-policy=stop, initial)" ],
[ "ticket-primitive-2", "Ticket - Primitive (loss-policy=stop, granted)" ],
[ "ticket-primitive-3", "Ticket - Primitive (loss-policy-stop, revoked)" ],
[ "ticket-primitive-4", "Ticket - Primitive (loss-policy=demote, initial)" ],
[ "ticket-primitive-5", "Ticket - Primitive (loss-policy=demote, granted)" ],
[ "ticket-primitive-6", "Ticket - Primitive (loss-policy=demote, revoked)" ],
[ "ticket-primitive-7", "Ticket - Primitive (loss-policy=fence, initial)" ],
[ "ticket-primitive-8", "Ticket - Primitive (loss-policy=fence, granted)" ],
[ "ticket-primitive-9", "Ticket - Primitive (loss-policy=fence, revoked)" ],
[ "ticket-primitive-10", "Ticket - Primitive (loss-policy=freeze, initial)" ],
[ "ticket-primitive-11", "Ticket - Primitive (loss-policy=freeze, granted)" ],
[ "ticket-primitive-12", "Ticket - Primitive (loss-policy=freeze, revoked)" ],
[ "ticket-primitive-13", "Ticket - Primitive (loss-policy=stop, standby, granted)" ],
[ "ticket-primitive-14", "Ticket - Primitive (loss-policy=stop, granted, standby)" ],
[ "ticket-primitive-15", "Ticket - Primitive (loss-policy=stop, standby, revoked)" ],
[ "ticket-primitive-16", "Ticket - Primitive (loss-policy=demote, standby, granted)" ],
[ "ticket-primitive-17", "Ticket - Primitive (loss-policy=demote, granted, standby)" ],
[ "ticket-primitive-18", "Ticket - Primitive (loss-policy=demote, standby, revoked)" ],
[ "ticket-primitive-19", "Ticket - Primitive (loss-policy=fence, standby, granted)" ],
[ "ticket-primitive-20", "Ticket - Primitive (loss-policy=fence, granted, standby)" ],
[ "ticket-primitive-21", "Ticket - Primitive (loss-policy=fence, standby, revoked)" ],
[ "ticket-primitive-22", "Ticket - Primitive (loss-policy=freeze, standby, granted)" ],
[ "ticket-primitive-23", "Ticket - Primitive (loss-policy=freeze, granted, standby)" ],
[ "ticket-primitive-24", "Ticket - Primitive (loss-policy=freeze, standby, revoked)" ],
],
[
[ "ticket-group-1", "Ticket - Group (loss-policy=stop, initial)" ],
[ "ticket-group-2", "Ticket - Group (loss-policy=stop, granted)" ],
[ "ticket-group-3", "Ticket - Group (loss-policy-stop, revoked)" ],
[ "ticket-group-4", "Ticket - Group (loss-policy=demote, initial)" ],
[ "ticket-group-5", "Ticket - Group (loss-policy=demote, granted)" ],
[ "ticket-group-6", "Ticket - Group (loss-policy=demote, revoked)" ],
[ "ticket-group-7", "Ticket - Group (loss-policy=fence, initial)" ],
[ "ticket-group-8", "Ticket - Group (loss-policy=fence, granted)" ],
[ "ticket-group-9", "Ticket - Group (loss-policy=fence, revoked)" ],
[ "ticket-group-10", "Ticket - Group (loss-policy=freeze, initial)" ],
[ "ticket-group-11", "Ticket - Group (loss-policy=freeze, granted)" ],
[ "ticket-group-12", "Ticket - Group (loss-policy=freeze, revoked)" ],
[ "ticket-group-13", "Ticket - Group (loss-policy=stop, standby, granted)" ],
[ "ticket-group-14", "Ticket - Group (loss-policy=stop, granted, standby)" ],
[ "ticket-group-15", "Ticket - Group (loss-policy=stop, standby, revoked)" ],
[ "ticket-group-16", "Ticket - Group (loss-policy=demote, standby, granted)" ],
[ "ticket-group-17", "Ticket - Group (loss-policy=demote, granted, standby)" ],
[ "ticket-group-18", "Ticket - Group (loss-policy=demote, standby, revoked)" ],
[ "ticket-group-19", "Ticket - Group (loss-policy=fence, standby, granted)" ],
[ "ticket-group-20", "Ticket - Group (loss-policy=fence, granted, standby)" ],
[ "ticket-group-21", "Ticket - Group (loss-policy=fence, standby, revoked)" ],
[ "ticket-group-22", "Ticket - Group (loss-policy=freeze, standby, granted)" ],
[ "ticket-group-23", "Ticket - Group (loss-policy=freeze, granted, standby)" ],
[ "ticket-group-24", "Ticket - Group (loss-policy=freeze, standby, revoked)" ],
],
[
[ "ticket-clone-1", "Ticket - Clone (loss-policy=stop, initial)" ],
[ "ticket-clone-2", "Ticket - Clone (loss-policy=stop, granted)" ],
[ "ticket-clone-3", "Ticket - Clone (loss-policy-stop, revoked)" ],
[ "ticket-clone-4", "Ticket - Clone (loss-policy=demote, initial)" ],
[ "ticket-clone-5", "Ticket - Clone (loss-policy=demote, granted)" ],
[ "ticket-clone-6", "Ticket - Clone (loss-policy=demote, revoked)" ],
[ "ticket-clone-7", "Ticket - Clone (loss-policy=fence, initial)" ],
[ "ticket-clone-8", "Ticket - Clone (loss-policy=fence, granted)" ],
[ "ticket-clone-9", "Ticket - Clone (loss-policy=fence, revoked)" ],
[ "ticket-clone-10", "Ticket - Clone (loss-policy=freeze, initial)" ],
[ "ticket-clone-11", "Ticket - Clone (loss-policy=freeze, granted)" ],
[ "ticket-clone-12", "Ticket - Clone (loss-policy=freeze, revoked)" ],
[ "ticket-clone-13", "Ticket - Clone (loss-policy=stop, standby, granted)" ],
[ "ticket-clone-14", "Ticket - Clone (loss-policy=stop, granted, standby)" ],
[ "ticket-clone-15", "Ticket - Clone (loss-policy=stop, standby, revoked)" ],
[ "ticket-clone-16", "Ticket - Clone (loss-policy=demote, standby, granted)" ],
[ "ticket-clone-17", "Ticket - Clone (loss-policy=demote, granted, standby)" ],
[ "ticket-clone-18", "Ticket - Clone (loss-policy=demote, standby, revoked)" ],
[ "ticket-clone-19", "Ticket - Clone (loss-policy=fence, standby, granted)" ],
[ "ticket-clone-20", "Ticket - Clone (loss-policy=fence, granted, standby)" ],
[ "ticket-clone-21", "Ticket - Clone (loss-policy=fence, standby, revoked)" ],
[ "ticket-clone-22", "Ticket - Clone (loss-policy=freeze, standby, granted)" ],
[ "ticket-clone-23", "Ticket - Clone (loss-policy=freeze, granted, standby)" ],
[ "ticket-clone-24", "Ticket - Clone (loss-policy=freeze, standby, revoked)" ],
],
[
[ "ticket-master-1", "Ticket - Master (loss-policy=stop, initial)" ],
[ "ticket-master-2", "Ticket - Master (loss-policy=stop, granted)" ],
[ "ticket-master-3", "Ticket - Master (loss-policy-stop, revoked)" ],
[ "ticket-master-4", "Ticket - Master (loss-policy=demote, initial)" ],
[ "ticket-master-5", "Ticket - Master (loss-policy=demote, granted)" ],
[ "ticket-master-6", "Ticket - Master (loss-policy=demote, revoked)" ],
[ "ticket-master-7", "Ticket - Master (loss-policy=fence, initial)" ],
[ "ticket-master-8", "Ticket - Master (loss-policy=fence, granted)" ],
[ "ticket-master-9", "Ticket - Master (loss-policy=fence, revoked)" ],
[ "ticket-master-10", "Ticket - Master (loss-policy=freeze, initial)" ],
[ "ticket-master-11", "Ticket - Master (loss-policy=freeze, granted)" ],
[ "ticket-master-12", "Ticket - Master (loss-policy=freeze, revoked)" ],
[ "ticket-master-13", "Ticket - Master (loss-policy=stop, standby, granted)" ],
[ "ticket-master-14", "Ticket - Master (loss-policy=stop, granted, standby)" ],
[ "ticket-master-15", "Ticket - Master (loss-policy=stop, standby, revoked)" ],
[ "ticket-master-16", "Ticket - Master (loss-policy=demote, standby, granted)" ],
[ "ticket-master-17", "Ticket - Master (loss-policy=demote, granted, standby)" ],
[ "ticket-master-18", "Ticket - Master (loss-policy=demote, standby, revoked)" ],
[ "ticket-master-19", "Ticket - Master (loss-policy=fence, standby, granted)" ],
[ "ticket-master-20", "Ticket - Master (loss-policy=fence, granted, standby)" ],
[ "ticket-master-21", "Ticket - Master (loss-policy=fence, standby, revoked)" ],
[ "ticket-master-22", "Ticket - Master (loss-policy=freeze, standby, granted)" ],
[ "ticket-master-23", "Ticket - Master (loss-policy=freeze, granted, standby)" ],
[ "ticket-master-24", "Ticket - Master (loss-policy=freeze, standby, revoked)" ],
],
[
[ "ticket-rsc-sets-1", "Ticket - Resource sets (1 ticket, initial)" ],
[ "ticket-rsc-sets-2", "Ticket - Resource sets (1 ticket, granted)" ],
[ "ticket-rsc-sets-3", "Ticket - Resource sets (1 ticket, revoked)" ],
[ "ticket-rsc-sets-4", "Ticket - Resource sets (2 tickets, initial)" ],
[ "ticket-rsc-sets-5", "Ticket - Resource sets (2 tickets, granted)" ],
[ "ticket-rsc-sets-6", "Ticket - Resource sets (2 tickets, granted)" ],
[ "ticket-rsc-sets-7", "Ticket - Resource sets (2 tickets, revoked)" ],
[ "ticket-rsc-sets-8", "Ticket - Resource sets (1 ticket, standby, granted)" ],
[ "ticket-rsc-sets-9", "Ticket - Resource sets (1 ticket, granted, standby)" ],
[ "ticket-rsc-sets-10", "Ticket - Resource sets (1 ticket, standby, revoked)" ],
[ "ticket-rsc-sets-11", "Ticket - Resource sets (2 tickets, standby, granted)" ],
[ "ticket-rsc-sets-12", "Ticket - Resource sets (2 tickets, standby, granted)" ],
[ "ticket-rsc-sets-13", "Ticket - Resource sets (2 tickets, granted, standby)" ],
[ "ticket-rsc-sets-14", "Ticket - Resource sets (2 tickets, standby, revoked)" ],
[ "cluster-specific-params", "Cluster-specific instance attributes based on rules" ],
[ "site-specific-params", "Site-specific instance attributes based on rules" ],
],
[
[ "template-1", "Template - 1" ],
[ "template-2", "Template - 2" ],
[ "template-3", "Template - 3 (merge operations)" ],
[ "template-coloc-1", "Template - Colocation 1" ],
[ "template-coloc-2", "Template - Colocation 2" ],
[ "template-coloc-3", "Template - Colocation 3" ],
[ "template-order-1", "Template - Order 1" ],
[ "template-order-2", "Template - Order 2" ],
[ "template-order-3", "Template - Order 3" ],
[ "template-ticket", "Template - Ticket" ],
[ "template-rsc-sets-1", "Template - Resource Sets 1" ],
[ "template-rsc-sets-2", "Template - Resource Sets 2" ],
[ "template-rsc-sets-3", "Template - Resource Sets 3" ],
[ "template-rsc-sets-4", "Template - Resource Sets 4" ],
[ "template-clone-primitive", "Cloned primitive from template" ],
[ "template-clone-group", "Cloned group from template" ],
[ "location-sets-templates", "Resource sets and templates - Location" ],
[ "tags-coloc-order-1", "Tags - Colocation and Order (Simple)" ],
[ "tags-coloc-order-2", "Tags - Colocation and Order (Resource Sets with Templates)" ],
[ "tags-location", "Tags - Location" ],
[ "tags-ticket", "Tags - Ticket" ],
],
[
[ "container-1", "Container - initial" ],
[ "container-2", "Container - monitor failed" ],
[ "container-3", "Container - stop failed" ],
[ "container-4", "Container - reached migration-threshold" ],
[ "container-group-1", "Container in group - initial" ],
[ "container-group-2", "Container in group - monitor failed" ],
[ "container-group-3", "Container in group - stop failed" ],
[ "container-group-4", "Container in group - reached migration-threshold" ],
[ "container-is-remote-node", "Place resource within container when container is remote-node" ],
[ "bug-rh-1097457", "Kill user defined container/contents ordering" ],
[ "bug-cl-5247", "Graph loop when recovering m/s resource in a container" ],
[ "bundle-order-startup", "Bundle startup ordering" ],
[ "bundle-order-partial-start",
"Bundle startup ordering when some dependencies are already running" ],
[ "bundle-order-partial-start-2",
"Bundle startup ordering when some dependencies and the container are already running" ],
[ "bundle-order-stop", "Bundle stop ordering" ],
[ "bundle-order-partial-stop", "Bundle startup ordering when some dependencies are already stopped" ],
[ "bundle-order-stop-on-remote", "Stop nested resource after bringing up the connection" ],
[ "bundle-order-startup-clone", "Prevent startup because bundle isn't promoted" ],
[ "bundle-order-startup-clone-2", "Bundle startup with clones" ],
[ "bundle-order-stop-clone", "Stop bundle because clone is stopping" ],
[ "bundle-nested-colocation", "Colocation of nested connection resources" ],
[ "bundle-order-fencing",
"Order pseudo bundle fencing after parent node fencing if both are happening" ],
[ "bundle-probe-order-1", "order 1" ],
[ "bundle-probe-order-2", "order 2" ],
[ "bundle-probe-order-3", "order 3" ],
[ "bundle-probe-remotes", "Ensure remotes get probed too" ],
[ "bundle-replicas-change", "Change bundle from 1 replica to multiple" ],
[ "nested-remote-recovery", "Recover bundle's container hosted on remote node" ],
],
[
[ "whitebox-fail1", "Fail whitebox container rsc" ],
[ "whitebox-fail2", "Fail cluster connection to guest node" ],
[ "whitebox-fail3", "Failed containers should not run nested on remote nodes" ],
[ "whitebox-start", "Start whitebox container with resources assigned to it" ],
[ "whitebox-stop", "Stop whitebox container with resources assigned to it" ],
[ "whitebox-move", "Move whitebox container with resources assigned to it" ],
[ "whitebox-asymmetric", "Verify connection rsc opts-in based on container resource" ],
[ "whitebox-ms-ordering", "Verify promote/demote can not occur before connection is established" ],
[ "whitebox-ms-ordering-move", "Stop/Start cycle within a moving container" ],
[ "whitebox-orphaned", "Properly shutdown orphaned whitebox container" ],
[ "whitebox-orphan-ms", "Properly tear down orphan ms resources on remote-nodes" ],
[ "whitebox-unexpectedly-running", "Recover container nodes the cluster did not start" ],
[ "whitebox-migrate1", "Migrate both container and connection resource" ],
[ "whitebox-imply-stop-on-fence",
"imply stop action on container node rsc when host node is fenced" ],
[ "whitebox-nested-group", "Verify guest remote-node works nested in a group" ],
[ "guest-node-host-dies", "Verify guest node is recovered if host goes away" ],
[ "guest-node-cleanup", "Order guest node connection recovery after container probe" ],
[ "guest-host-not-fenceable", "Actions on guest node are unrunnable if host is unclean and cannot be fenced" ],
],
[
[ "remote-startup-probes", "Baremetal remote-node startup probes" ],
[ "remote-startup", "Startup a newly discovered remote-nodes with no status" ],
[ "remote-fence-unclean", "Fence unclean baremetal remote-node" ],
[ "remote-fence-unclean2",
"Fence baremetal remote-node after cluster node fails and connection can not be recovered" ],
[ "remote-fence-unclean-3", "Probe failed remote nodes (triggers fencing)" ],
[ "remote-move", "Move remote-node connection resource" ],
[ "remote-disable", "Disable a baremetal remote-node" ],
[ "remote-probe-disable", "Probe then stop a baremetal remote-node" ],
[ "remote-orphaned", "Properly shutdown orphaned connection resource" ],
[ "remote-orphaned2",
"verify we can handle orphaned remote connections with active resources on the remote" ],
[ "remote-recover", "Recover connection resource after cluster-node fails" ],
[ "remote-stale-node-entry",
"Make sure we properly handle leftover remote-node entries in the node section" ],
[ "remote-partial-migrate",
"Make sure partial migrations are handled before ops on the remote node" ],
[ "remote-partial-migrate2",
"Make sure partial migration target is prefered for remote connection" ],
[ "remote-recover-fail", "Make sure start failure causes fencing if rsc are active on remote" ],
[ "remote-start-fail",
"Make sure a start failure does not result in fencing if no active resources are on remote" ],
[ "remote-unclean2",
"Make monitor failure always results in fencing, even if no rsc are active on remote" ],
[ "remote-fence-before-reconnect", "Fence before clearing recurring monitor failure" ],
[ "remote-recovery", "Recover remote connections before attempting demotion" ],
[ "remote-recover-connection", "Optimistically recovery of only the connection" ],
[ "remote-recover-all", "Fencing when the connection has no home" ],
[ "remote-recover-no-resources", "Fencing when the connection has no home and no active resources" ],
[ "remote-recover-unknown",
"Fencing when the connection has no home and the remote has no operation history" ],
[ "remote-reconnect-delay", "Waiting for remote reconnect interval to expire" ],
[ "remote-connection-unrecoverable",
"Remote connection host must be fenced, with connection unrecoverable" ],
],
[
[ "resource-discovery", "Exercises resource-discovery location constraint option" ],
[ "rsc-discovery-per-node", "Disable resource discovery per node" ],
[ "shutdown-lock", "Ensure shutdown lock works properly" ],
[ "shutdown-lock-expiration", "Ensure shutdown lock expiration works properly" ],
],
[
[ "op-defaults", "Test op_defaults conditional expressions" ],
[ "op-defaults-2", "Test op_defaults AND'ed conditional expressions" ],
[ "op-defaults-3", "Test op_defaults precedence" ],
[ "rsc-defaults", "Test rsc_defaults conditional expressions" ],
[ "rsc-defaults-2", "Test rsc_defaults conditional expressions without type" ],
],
+ [ [ "stop-all-resources", "Test stop-all-resources=true "],
+ ],
# @TODO: If pacemaker implements versioned attributes, uncomment these tests
#[
# [ "versioned-resources", "Start resources with #ra-version rules" ],
# [ "restart-versioned", "Restart resources on #ra-version change" ],
# [ "reload-versioned", "Reload resources on #ra-version change" ],
#],
#[
# [ "versioned-operations-1", "Use #ra-version to configure operations of native resources" ],
# [ "versioned-operations-2", "Use #ra-version to configure operations of stonith resources" ],
# [ "versioned-operations-3", "Use #ra-version to configure operations of master/slave resources" ],
# [ "versioned-operations-4", "Use #ra-version to configure operations of groups of the resources" ],
#],
]
TESTS_64BIT = [
[
[ "year-2038", "Check handling of timestamps beyond 2038-01-19 03:14:08 UTC" ],
],
]
# Constants substituted in the build process
class BuildVars(object):
SBINDIR = "@sbindir@"
BUILDDIR = "@abs_top_builddir@"
CRM_SCHEMA_DIRECTORY = "@CRM_SCHEMA_DIRECTORY@"
# These values must be kept in sync with include/crm/crm.h
class CrmExit(object):
OK = 0
ERROR = 1
NOT_INSTALLED = 5
NOINPUT = 66
def is_executable(path):
""" Check whether a file at a given path is executable. """
try:
return os.stat(path)[stat.ST_MODE] & stat.S_IXUSR
except OSError:
return False
def diff(file1, file2, **kwargs):
""" Call diff on two files """
return subprocess.call([ "diff", "-u", "-N", "--ignore-all-space",
"--ignore-blank-lines", file1, file2 ], **kwargs)
def sort_file(filename):
""" Sort a file alphabetically """
with io.open(filename, "rt") as f:
lines = sorted(f)
with io.open(filename, "wt") as f:
f.writelines(lines)
def remove_files(filenames):
""" Remove a list of files """
for filename in filenames:
try:
os.remove(filename)
except OSError:
pass
def normalize(filename):
""" Remove text from a file that isn't important for comparison """
if not hasattr(normalize, "patterns"):
normalize.patterns = [
re.compile(r'crm_feature_set="[^"]*"'),
re.compile(r'batch-limit="[0-9]*"')
]
if os.path.isfile(filename):
with io.open(filename, "rt") as f:
lines = f.readlines()
with io.open(filename, "wt") as f:
for line in lines:
for pattern in normalize.patterns:
line = pattern.sub("", line)
f.write(line)
def cat(filename, dest=sys.stdout):
""" Copy a file to a destination file descriptor """
with io.open(filename, "rt") as f:
shutil.copyfileobj(f, dest)
class CtsScheduler(object):
""" Regression tests for Pacemaker's scheduler """
def _parse_args(self, argv):
""" Parse command-line arguments """
parser = argparse.ArgumentParser(description=DESC)
parser.add_argument('-V', '--verbose', action='count',
help='Display any differences from expected output')
parser.add_argument('--run', metavar='TEST',
help=('Run only single specified test (any further '
'arguments will be passed to crm_simulate)'))
parser.add_argument('--update', action='store_true',
help='Update expected results with actual results')
parser.add_argument('-b', '--binary', metavar='PATH',
help='Specify path to crm_simulate')
parser.add_argument('-i', '--io-dir', metavar='PATH',
help='Specify path to regression test data directory')
parser.add_argument('-o', '--out-dir', metavar='PATH',
help='Specify where intermediate and output files should go')
parser.add_argument('-v', '--valgrind', action='store_true',
help='Run all commands under valgrind')
parser.add_argument('--valgrind-dhat', action='store_true',
help='Run all commands under valgrind with heap analyzer')
parser.add_argument('--valgrind-skip-output', action='store_true',
help='If running under valgrind, do not display output')
parser.add_argument('--testcmd-options', metavar='OPTIONS', default='',
help='Additional options for command under test')
# argparse can't handle "everything after --run TEST", so grab that
self.single_test_args = []
narg = 0
for arg in argv:
narg = narg + 1
if arg == '--run':
(argv, self.single_test_args) = (argv[:narg+1], argv[narg+1:])
break
self.args = parser.parse_args(argv[1:])
def _error(self, s):
print(" * ERROR: %s" % s)
def _failed(self, s):
print(" * FAILED: %s" % s)
def _get_valgrind_cmd(self):
""" Return command arguments needed (or not) to run valgrind """
if self.args.valgrind:
os.environ['G_SLICE'] = "always-malloc"
return [
"valgrind",
"-q",
"--gen-suppressions=all",
"--time-stamp=yes",
"--trace-children=no",
"--show-reachable=no",
"--leak-check=full",
"--num-callers=20",
"--suppressions=%s/valgrind-pcmk.suppressions" % (self.test_home)
]
if self.args.valgrind_dhat:
os.environ['G_SLICE'] = "always-malloc"
return [
"valgrind",
"--tool=exp-dhat",
"--time-stamp=yes",
"--trace-children=no",
"--show-top-n=100",
"--num-callers=4"
]
return []
def _get_simulator_cmd(self):
""" Locate the simulation binary """
if self.args.binary is None:
self.args.binary = BuildVars.BUILDDIR + "/tools/crm_simulate"
if not is_executable(self.args.binary):
self.args.binary = BuildVars.SBINDIR + "/crm_simulate"
if not is_executable(self.args.binary):
# @TODO it would be more pythonic to raise an exception
self._error("Test binary " + self.args.binary + " not found")
sys.exit(CrmExit.NOT_INSTALLED)
return [ self.args.binary ] + shlex.split(self.args.testcmd_options)
def set_schema_env(self):
""" Ensure schema directory environment variable is set, if possible """
try:
return os.environ['PCMK_schema_directory']
except KeyError:
for d in [ os.path.join(BuildVars.BUILDDIR, "xml"),
BuildVars.CRM_SCHEMA_DIRECTORY ]:
if os.path.isdir(d):
os.environ['PCMK_schema_directory'] = d
return d
return None
def __init__(self, argv=sys.argv):
self._parse_args(argv)
# Where this executable lives
self.test_home = os.path.dirname(os.path.realpath(argv[0]))
# Where test data resides
if self.args.io_dir is None:
self.args.io_dir = os.path.join(self.test_home, "scheduler")
# Where to store generated files
if self.args.out_dir is None:
self.args.out_dir = self.args.io_dir
self.failed_filename = os.path.join(self.test_home, ".regression.failed.diff")
else:
self.failed_filename = os.path.join(self.args.out_dir, ".regression.failed.diff")
os.environ['CIB_shadow_dir'] = self.args.out_dir
self.failed_file = None
# Single test mode (if requested)
try:
# User can give test base name or file name of a test input
self.args.run = os.path.splitext(os.path.basename(self.args.run))[0]
except (AttributeError, TypeError):
pass # --run was not specified
self.set_schema_env()
# Arguments needed (or not) to run commands
self.valgrind_args = self._get_valgrind_cmd()
self.simulate_args = self._get_simulator_cmd()
# Test counters
self.num_failed = 0
self.num_tests = 0
def _compare_files(self, filename1, filename2):
""" Add any file differences to failed results """
with io.open("/dev/null", "wt") as dev_null:
if diff(filename1, filename2, stdout=dev_null) != 0:
diff(filename1, filename2, stdout=self.failed_file, stderr=dev_null)
self.failed_file.write("\n");
return True
return False
def run_one(self, test_name, test_desc, test_args=[]):
""" Run one scheduler test """
print(" Test %-25s %s" % ((test_name + ":"), test_desc))
did_fail = False
self.num_tests = self.num_tests + 1
# Test inputs
input_filename = "%s/%s.xml" % (self.args.io_dir, test_name)
expected_filename = "%s/%s.exp" % (self.args.io_dir, test_name)
dot_expected_filename = "%s/%s.dot" % (self.args.io_dir, test_name)
scores_filename = "%s/%s.scores" % (self.args.io_dir, test_name)
summary_filename = "%s/%s.summary" % (self.args.io_dir, test_name)
stderr_expected_filename = "%s/%s.stderr" % (self.args.io_dir, test_name)
# (Intermediate) test outputs
output_filename = "%s/%s.out" % (self.args.out_dir, test_name)
dot_output_filename = "%s/%s.pe.dot" % (self.args.out_dir, test_name)
score_output_filename = "%s/%s.scores.pe" % (self.args.out_dir, test_name)
summary_output_filename = "%s/%s.summary.pe" % (self.args.out_dir, test_name)
stderr_output_filename = "%s/%s.stderr.pe" % (self.args.out_dir, test_name)
valgrind_output_filename = "%s/%s.valgrind" % (self.args.out_dir, test_name)
# Common arguments for running test
test_cmd = []
if self.valgrind_args:
test_cmd = self.valgrind_args + [ "--log-file=%s" % valgrind_output_filename ]
test_cmd = test_cmd + self.simulate_args
# @TODO It would be more pythonic to raise exceptions for errors,
# then perhaps it would be nice to make a single-test class
# Ensure necessary test inputs exist
if not os.path.isfile(input_filename):
self._error("No input")
self.num_failed = self.num_failed + 1
return CrmExit.NOINPUT
if not self.args.update and not os.path.isfile(expected_filename):
self._error("no stored output")
return CrmExit.NOINPUT
# Run simulation to generate summary output
if self.args.run: # Single test mode
test_cmd_full = test_cmd + [ '-x', input_filename, '-S' ] + test_args
print(" ".join(test_cmd_full))
else:
# @TODO Why isn't test_args added here?
test_cmd_full = test_cmd + [ '-x', input_filename, '-S' ]
with io.open(summary_output_filename, "wt") as f:
subprocess.call(test_cmd_full, stdout=f, stderr=subprocess.STDOUT, env=os.environ)
if self.args.run:
cat(summary_output_filename)
# Re-run simulation to generate dot, graph, and scores
test_cmd_full = test_cmd + [
'-x', input_filename,
'-D', dot_output_filename,
'-G', output_filename,
'-sSQ' ] + test_args
with io.open(stderr_output_filename, "wt") as f_stderr, \
io.open(score_output_filename, "wt") as f_score:
rc = subprocess.call(test_cmd_full, stdout=f_score, stderr=f_stderr, env=os.environ)
# Check for test command failure
if rc != CrmExit.OK:
self._failed("Test returned: %d" % rc)
did_fail = True
print(" ".join(test_cmd_full))
# Check for valgrind errors
if self.valgrind_args and not self.args.valgrind_skip_output:
if os.stat(valgrind_output_filename).st_size > 0:
self._failed("Valgrind reported errors")
did_fail = True
cat(valgrind_output_filename)
remove_files([ valgrind_output_filename ])
# Check for core dump
if os.path.isfile("core"):
self._failed("Core-file detected: core." + test_name)
did_fail = True
os.rename("core", "%s/core.%s" % (self.test_home, test_name))
# Check any stderr output
if os.path.isfile(stderr_expected_filename):
if self._compare_files(stderr_expected_filename, stderr_output_filename):
self._failed("stderr changed")
did_fail = True
elif os.stat(stderr_output_filename).st_size > 0:
self._failed("Output was written to stderr")
did_fail = True
cat(stderr_output_filename)
remove_files([ stderr_output_filename ])
# Check whether output graph exists, and normalize it
if (not os.path.isfile(output_filename)
or os.stat(output_filename).st_size == 0):
self._error("No graph produced")
did_fail = True
self.num_failed = self.num_failed + 1
remove_files([ output_filename ])
return CrmExit.ERROR
normalize(output_filename)
# Check whether dot output exists, and sort it
if (not os.path.isfile(dot_output_filename) or
os.stat(dot_output_filename).st_size == 0):
self._error("No dot-file summary produced")
did_fail = True
self.num_failed = self.num_failed + 1
remove_files([ dot_output_filename, output_filename ])
return CrmExit.ERROR
with io.open(dot_output_filename, "rt") as f:
first_line = f.readline() # "digraph" line with opening brace
lines = f.readlines()
last_line = lines[-1] # closing brace
del lines[-1]
lines = sorted(set(lines)) # unique sort
with io.open(dot_output_filename, "wt") as f:
f.write(first_line)
f.writelines(lines)
f.write(last_line)
# Check whether score output exists, and sort it
if (not os.path.isfile(score_output_filename)
or os.stat(score_output_filename).st_size == 0):
self._error("No allocation scores produced")
did_fail = True
self.num_failed = self.num_failed + 1
remove_files([ score_output_filename, output_filename ])
return CrmExit.ERROR
else:
sort_file(score_output_filename)
if self.args.update:
shutil.copyfile(output_filename, expected_filename)
shutil.copyfile(dot_output_filename, dot_expected_filename)
shutil.copyfile(score_output_filename, scores_filename)
shutil.copyfile(summary_output_filename, summary_filename)
print(" Updated expected outputs")
if self._compare_files(summary_filename, summary_output_filename):
self._failed("summary changed")
did_fail = True
if self._compare_files(dot_expected_filename, dot_output_filename):
self._failed("dot-file summary changed")
did_fail = True
else:
remove_files([ dot_output_filename ])
if self._compare_files(expected_filename, output_filename):
self._failed("xml-file changed")
did_fail = True
if self._compare_files(scores_filename, score_output_filename):
self._failed("scores-file changed")
did_fail = True
remove_files([ output_filename,
score_output_filename,
summary_output_filename])
if did_fail:
self.num_failed = self.num_failed + 1
return CrmExit.ERROR
return CrmExit.OK
def run_all(self):
""" Run all defined tests """
if platform.architecture()[0] == "64bit":
TESTS.extend(TESTS_64BIT)
for group in TESTS:
for test in group:
try:
args = test[2]
except IndexError:
args = []
self.run_one(test[0], test[1], args)
print()
def _print_summary(self):
""" Print a summary of parameters for this test run """
print("Test home is:\t" + self.test_home)
print("Test binary is:\t" + self.args.binary)
if 'PCMK_schema_directory' in os.environ:
print("Schema home is:\t" + os.environ['PCMK_schema_directory'])
if self.valgrind_args != []:
print("Activating memory testing with valgrind")
print()
def _test_results(self):
if self.num_failed == 0:
return CrmExit.OK
if os.path.isfile(self.failed_filename) and os.stat(self.failed_filename).st_size != 0:
if self.args.verbose:
self._error("Results of %d failed tests (out of %d):" %
(self.num_failed, self.num_tests))
cat(self.failed_filename)
else:
self._error("Results of %d failed tests (out of %d) are in %s" %
(self.num_failed, self.num_tests, self.failed_filename))
self._error("Use -V to display them after running the tests")
else:
self._error("%d (of %d) tests failed (no diff results)" %
(self.num_failed, self.num_tests))
if os.path.isfile(self.failed_filename):
os.remove(self.failed_filename)
return CrmExit.ERROR
def run(self):
""" Run test(s) as specified """
self._print_summary()
# Zero out the error log
self.failed_file = io.open(self.failed_filename, "wt")
if self.args.run is None:
print("Performing the following tests from " + self.args.io_dir)
print()
self.run_all()
print()
self.failed_file.close()
rc = self._test_results()
else:
rc = self.run_one(self.args.run, "Single shot", self.single_test_args)
self.failed_file.close()
cat(self.failed_filename)
return rc
if __name__ == "__main__":
sys.exit(CtsScheduler().run())
# vim: set filetype=python expandtab tabstop=4 softtabstop=4 shiftwidth=4 textwidth=120:
diff --git a/cts/scheduler/stop-all-resources.dot b/cts/scheduler/stop-all-resources.dot
new file mode 100644
index 0000000000..897b0ad9f6
--- /dev/null
+++ b/cts/scheduler/stop-all-resources.dot
@@ -0,0 +1,38 @@
+ digraph "g" {
+"Email_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"]
+"Email_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"]
+"Fencing_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"]
+"Fencing_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"]
+"Public-IP_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"]
+"Public-IP_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"]
+"dummy_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"]
+"dummy_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"]
+"httpd-bundle-0_monitor_0 cluster01" [ style=dashed color="red" fontcolor="black"]
+"httpd-bundle-0_monitor_0 cluster02" [ style=dashed color="red" fontcolor="black"]
+"httpd-bundle-1_monitor_0 cluster01" [ style=dashed color="red" fontcolor="black"]
+"httpd-bundle-1_monitor_0 cluster02" [ style=dashed color="red" fontcolor="black"]
+"httpd-bundle-2_monitor_0 cluster01" [ style=dashed color="red" fontcolor="black"]
+"httpd-bundle-2_monitor_0 cluster02" [ style=dashed color="red" fontcolor="black"]
+"httpd-bundle-docker-0_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"]
+"httpd-bundle-docker-0_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"]
+"httpd-bundle-docker-1_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"]
+"httpd-bundle-docker-1_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"]
+"httpd-bundle-docker-2_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"]
+"httpd-bundle-docker-2_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"]
+"httpd-bundle-ip-192.168.122.131_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"]
+"httpd-bundle-ip-192.168.122.131_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"]
+"httpd-bundle-ip-192.168.122.132_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"]
+"httpd-bundle-ip-192.168.122.132_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"]
+"httpd-bundle-ip-192.168.122.133_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"]
+"httpd-bundle-ip-192.168.122.133_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"]
+"inactive-dhcpd:0_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"]
+"inactive-dhcpd:0_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"]
+"inactive-dummy-1_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"]
+"inactive-dummy-1_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"]
+"inactive-dummy-2_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"]
+"inactive-dummy-2_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"]
+"mysql-proxy:0_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"]
+"mysql-proxy:0_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"]
+"ping:0_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"]
+"ping:0_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"]
+}
diff --git a/cts/scheduler/stop-all-resources.exp b/cts/scheduler/stop-all-resources.exp
new file mode 100644
index 0000000000..478a28b302
--- /dev/null
+++ b/cts/scheduler/stop-all-resources.exp
@@ -0,0 +1,272 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/cts/scheduler/stop-all-resources.scores b/cts/scheduler/stop-all-resources.scores
new file mode 100644
index 0000000000..5669c11b56
--- /dev/null
+++ b/cts/scheduler/stop-all-resources.scores
@@ -0,0 +1,166 @@
+Allocation scores:
+pcmk__bundle_allocate: httpd-bundle allocation score on cluster01: 0
+pcmk__bundle_allocate: httpd-bundle allocation score on cluster02: 0
+pcmk__bundle_allocate: httpd-bundle-0 allocation score on cluster01: 0
+pcmk__bundle_allocate: httpd-bundle-0 allocation score on cluster02: 0
+pcmk__bundle_allocate: httpd-bundle-1 allocation score on cluster01: 0
+pcmk__bundle_allocate: httpd-bundle-1 allocation score on cluster02: 0
+pcmk__bundle_allocate: httpd-bundle-2 allocation score on cluster01: 0
+pcmk__bundle_allocate: httpd-bundle-2 allocation score on cluster02: 0
+pcmk__bundle_allocate: httpd-bundle-clone allocation score on cluster01: 0
+pcmk__bundle_allocate: httpd-bundle-clone allocation score on cluster02: 0
+pcmk__bundle_allocate: httpd-bundle-clone allocation score on httpd-bundle-0: -INFINITY
+pcmk__bundle_allocate: httpd-bundle-clone allocation score on httpd-bundle-1: -INFINITY
+pcmk__bundle_allocate: httpd-bundle-clone allocation score on httpd-bundle-2: -INFINITY
+pcmk__bundle_allocate: httpd-bundle-docker-0 allocation score on cluster01: 0
+pcmk__bundle_allocate: httpd-bundle-docker-0 allocation score on cluster02: 0
+pcmk__bundle_allocate: httpd-bundle-docker-1 allocation score on cluster01: 0
+pcmk__bundle_allocate: httpd-bundle-docker-1 allocation score on cluster02: 0
+pcmk__bundle_allocate: httpd-bundle-docker-2 allocation score on cluster01: 0
+pcmk__bundle_allocate: httpd-bundle-docker-2 allocation score on cluster02: 0
+pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.131 allocation score on cluster01: 0
+pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.131 allocation score on cluster02: 0
+pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.132 allocation score on cluster01: 0
+pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.132 allocation score on cluster02: 0
+pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.133 allocation score on cluster01: 0
+pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.133 allocation score on cluster02: 0
+pcmk__bundle_allocate: httpd:0 allocation score on httpd-bundle-0: 500
+pcmk__bundle_allocate: httpd:1 allocation score on httpd-bundle-1: 500
+pcmk__bundle_allocate: httpd:2 allocation score on httpd-bundle-2: 500
+pcmk__clone_allocate: httpd-bundle-clone allocation score on cluster01: -INFINITY
+pcmk__clone_allocate: httpd-bundle-clone allocation score on cluster02: -INFINITY
+pcmk__clone_allocate: httpd-bundle-clone allocation score on httpd-bundle-0: 0
+pcmk__clone_allocate: httpd-bundle-clone allocation score on httpd-bundle-1: 0
+pcmk__clone_allocate: httpd-bundle-clone allocation score on httpd-bundle-2: 0
+pcmk__clone_allocate: httpd:0 allocation score on httpd-bundle-0: INFINITY
+pcmk__clone_allocate: httpd:1 allocation score on httpd-bundle-1: INFINITY
+pcmk__clone_allocate: httpd:2 allocation score on httpd-bundle-2: INFINITY
+pcmk__clone_allocate: inactive-clone allocation score on cluster01: 0
+pcmk__clone_allocate: inactive-clone allocation score on cluster02: 0
+pcmk__clone_allocate: inactive-dhcpd:0 allocation score on cluster01: 0
+pcmk__clone_allocate: inactive-dhcpd:0 allocation score on cluster02: 0
+pcmk__clone_allocate: inactive-dhcpd:1 allocation score on cluster01: 0
+pcmk__clone_allocate: inactive-dhcpd:1 allocation score on cluster02: 0
+pcmk__clone_allocate: mysql-clone-group allocation score on cluster01: 0
+pcmk__clone_allocate: mysql-clone-group allocation score on cluster02: 0
+pcmk__clone_allocate: mysql-group:0 allocation score on cluster01: 0
+pcmk__clone_allocate: mysql-group:0 allocation score on cluster02: 0
+pcmk__clone_allocate: mysql-group:1 allocation score on cluster01: 0
+pcmk__clone_allocate: mysql-group:1 allocation score on cluster02: 0
+pcmk__clone_allocate: mysql-group:2 allocation score on cluster01: 0
+pcmk__clone_allocate: mysql-group:2 allocation score on cluster02: 0
+pcmk__clone_allocate: mysql-group:3 allocation score on cluster01: 0
+pcmk__clone_allocate: mysql-group:3 allocation score on cluster02: 0
+pcmk__clone_allocate: mysql-group:4 allocation score on cluster01: 0
+pcmk__clone_allocate: mysql-group:4 allocation score on cluster02: 0
+pcmk__clone_allocate: mysql-proxy:0 allocation score on cluster01: 0
+pcmk__clone_allocate: mysql-proxy:0 allocation score on cluster02: 0
+pcmk__clone_allocate: mysql-proxy:1 allocation score on cluster01: 0
+pcmk__clone_allocate: mysql-proxy:1 allocation score on cluster02: 0
+pcmk__clone_allocate: mysql-proxy:2 allocation score on cluster01: 0
+pcmk__clone_allocate: mysql-proxy:2 allocation score on cluster02: 0
+pcmk__clone_allocate: mysql-proxy:3 allocation score on cluster01: 0
+pcmk__clone_allocate: mysql-proxy:3 allocation score on cluster02: 0
+pcmk__clone_allocate: mysql-proxy:4 allocation score on cluster01: 0
+pcmk__clone_allocate: mysql-proxy:4 allocation score on cluster02: 0
+pcmk__clone_allocate: ping-clone allocation score on cluster01: 0
+pcmk__clone_allocate: ping-clone allocation score on cluster02: 0
+pcmk__clone_allocate: ping:0 allocation score on cluster01: 0
+pcmk__clone_allocate: ping:0 allocation score on cluster02: 0
+pcmk__clone_allocate: ping:1 allocation score on cluster01: 0
+pcmk__clone_allocate: ping:1 allocation score on cluster02: 0
+pcmk__group_allocate: Email allocation score on cluster01: 0
+pcmk__group_allocate: Email allocation score on cluster02: 0
+pcmk__group_allocate: Public-IP allocation score on cluster01: 0
+pcmk__group_allocate: Public-IP allocation score on cluster02: 0
+pcmk__group_allocate: exim-group allocation score on cluster01: 0
+pcmk__group_allocate: exim-group allocation score on cluster02: 0
+pcmk__group_allocate: inactive-dummy-1 allocation score on cluster01: 0
+pcmk__group_allocate: inactive-dummy-1 allocation score on cluster02: 0
+pcmk__group_allocate: inactive-dummy-2 allocation score on cluster01: 0
+pcmk__group_allocate: inactive-dummy-2 allocation score on cluster02: 0
+pcmk__group_allocate: inactive-group allocation score on cluster01: 0
+pcmk__group_allocate: inactive-group allocation score on cluster02: 0
+pcmk__group_allocate: mysql-group:0 allocation score on cluster01: 0
+pcmk__group_allocate: mysql-group:0 allocation score on cluster02: 0
+pcmk__group_allocate: mysql-group:1 allocation score on cluster01: 0
+pcmk__group_allocate: mysql-group:1 allocation score on cluster02: 0
+pcmk__group_allocate: mysql-group:2 allocation score on cluster01: 0
+pcmk__group_allocate: mysql-group:2 allocation score on cluster02: 0
+pcmk__group_allocate: mysql-group:3 allocation score on cluster01: 0
+pcmk__group_allocate: mysql-group:3 allocation score on cluster02: 0
+pcmk__group_allocate: mysql-group:4 allocation score on cluster01: 0
+pcmk__group_allocate: mysql-group:4 allocation score on cluster02: 0
+pcmk__group_allocate: mysql-proxy:0 allocation score on cluster01: 0
+pcmk__group_allocate: mysql-proxy:0 allocation score on cluster02: 0
+pcmk__group_allocate: mysql-proxy:1 allocation score on cluster01: 0
+pcmk__group_allocate: mysql-proxy:1 allocation score on cluster02: 0
+pcmk__group_allocate: mysql-proxy:2 allocation score on cluster01: 0
+pcmk__group_allocate: mysql-proxy:2 allocation score on cluster02: 0
+pcmk__group_allocate: mysql-proxy:3 allocation score on cluster01: 0
+pcmk__group_allocate: mysql-proxy:3 allocation score on cluster02: 0
+pcmk__group_allocate: mysql-proxy:4 allocation score on cluster01: 0
+pcmk__group_allocate: mysql-proxy:4 allocation score on cluster02: 0
+pcmk__native_allocate: Email allocation score on cluster01: -INFINITY
+pcmk__native_allocate: Email allocation score on cluster02: -INFINITY
+pcmk__native_allocate: Fencing allocation score on cluster01: 0
+pcmk__native_allocate: Fencing allocation score on cluster02: 0
+pcmk__native_allocate: Public-IP allocation score on cluster01: 0
+pcmk__native_allocate: Public-IP allocation score on cluster02: 0
+pcmk__native_allocate: dummy allocation score on cluster01: 0
+pcmk__native_allocate: dummy allocation score on cluster02: 0
+pcmk__native_allocate: httpd-bundle-0 allocation score on cluster01: 0
+pcmk__native_allocate: httpd-bundle-0 allocation score on cluster02: 0
+pcmk__native_allocate: httpd-bundle-1 allocation score on cluster01: 0
+pcmk__native_allocate: httpd-bundle-1 allocation score on cluster02: 0
+pcmk__native_allocate: httpd-bundle-2 allocation score on cluster01: 0
+pcmk__native_allocate: httpd-bundle-2 allocation score on cluster02: 0
+pcmk__native_allocate: httpd-bundle-docker-0 allocation score on cluster01: 0
+pcmk__native_allocate: httpd-bundle-docker-0 allocation score on cluster02: 0
+pcmk__native_allocate: httpd-bundle-docker-1 allocation score on cluster01: 0
+pcmk__native_allocate: httpd-bundle-docker-1 allocation score on cluster02: 0
+pcmk__native_allocate: httpd-bundle-docker-2 allocation score on cluster01: 0
+pcmk__native_allocate: httpd-bundle-docker-2 allocation score on cluster02: 0
+pcmk__native_allocate: httpd-bundle-ip-192.168.122.131 allocation score on cluster01: -INFINITY
+pcmk__native_allocate: httpd-bundle-ip-192.168.122.131 allocation score on cluster02: -INFINITY
+pcmk__native_allocate: httpd-bundle-ip-192.168.122.132 allocation score on cluster01: -INFINITY
+pcmk__native_allocate: httpd-bundle-ip-192.168.122.132 allocation score on cluster02: -INFINITY
+pcmk__native_allocate: httpd-bundle-ip-192.168.122.133 allocation score on cluster01: -INFINITY
+pcmk__native_allocate: httpd-bundle-ip-192.168.122.133 allocation score on cluster02: -INFINITY
+pcmk__native_allocate: httpd:0 allocation score on httpd-bundle-0: INFINITY
+pcmk__native_allocate: httpd:1 allocation score on httpd-bundle-1: INFINITY
+pcmk__native_allocate: httpd:2 allocation score on httpd-bundle-2: INFINITY
+pcmk__native_allocate: inactive-dhcpd:0 allocation score on cluster01: -INFINITY
+pcmk__native_allocate: inactive-dhcpd:0 allocation score on cluster02: -INFINITY
+pcmk__native_allocate: inactive-dhcpd:0 allocation score on httpd-bundle-0: -INFINITY
+pcmk__native_allocate: inactive-dhcpd:0 allocation score on httpd-bundle-1: -INFINITY
+pcmk__native_allocate: inactive-dhcpd:0 allocation score on httpd-bundle-2: -INFINITY
+pcmk__native_allocate: inactive-dhcpd:1 allocation score on cluster01: -INFINITY
+pcmk__native_allocate: inactive-dhcpd:1 allocation score on cluster02: -INFINITY
+pcmk__native_allocate: inactive-dhcpd:1 allocation score on httpd-bundle-0: -INFINITY
+pcmk__native_allocate: inactive-dhcpd:1 allocation score on httpd-bundle-1: -INFINITY
+pcmk__native_allocate: inactive-dhcpd:1 allocation score on httpd-bundle-2: -INFINITY
+pcmk__native_allocate: inactive-dummy-1 allocation score on cluster01: -INFINITY
+pcmk__native_allocate: inactive-dummy-1 allocation score on cluster02: -INFINITY
+pcmk__native_allocate: inactive-dummy-1 allocation score on httpd-bundle-0: -INFINITY
+pcmk__native_allocate: inactive-dummy-1 allocation score on httpd-bundle-1: -INFINITY
+pcmk__native_allocate: inactive-dummy-1 allocation score on httpd-bundle-2: -INFINITY
+pcmk__native_allocate: inactive-dummy-2 allocation score on cluster01: -INFINITY
+pcmk__native_allocate: inactive-dummy-2 allocation score on cluster02: -INFINITY
+pcmk__native_allocate: inactive-dummy-2 allocation score on httpd-bundle-0: -INFINITY
+pcmk__native_allocate: inactive-dummy-2 allocation score on httpd-bundle-1: -INFINITY
+pcmk__native_allocate: inactive-dummy-2 allocation score on httpd-bundle-2: -INFINITY
+pcmk__native_allocate: mysql-proxy:0 allocation score on cluster01: 0
+pcmk__native_allocate: mysql-proxy:0 allocation score on cluster02: 0
+pcmk__native_allocate: mysql-proxy:1 allocation score on cluster01: 0
+pcmk__native_allocate: mysql-proxy:1 allocation score on cluster02: 0
+pcmk__native_allocate: mysql-proxy:2 allocation score on cluster01: 0
+pcmk__native_allocate: mysql-proxy:2 allocation score on cluster02: 0
+pcmk__native_allocate: mysql-proxy:3 allocation score on cluster01: 0
+pcmk__native_allocate: mysql-proxy:3 allocation score on cluster02: 0
+pcmk__native_allocate: mysql-proxy:4 allocation score on cluster01: 0
+pcmk__native_allocate: mysql-proxy:4 allocation score on cluster02: 0
+pcmk__native_allocate: ping:0 allocation score on cluster01: 0
+pcmk__native_allocate: ping:0 allocation score on cluster02: 0
+pcmk__native_allocate: ping:1 allocation score on cluster01: 0
+pcmk__native_allocate: ping:1 allocation score on cluster02: 0
diff --git a/cts/scheduler/stop-all-resources.summary b/cts/scheduler/stop-all-resources.summary
new file mode 100644
index 0000000000..fa4ca66344
--- /dev/null
+++ b/cts/scheduler/stop-all-resources.summary
@@ -0,0 +1,80 @@
+4 of 27 resource instances DISABLED and 0 BLOCKED from further action due to failure
+
+Current cluster status:
+Online: [ cluster01 cluster02 ]
+
+ Clone Set: ping-clone [ping]
+ Stopped: [ cluster01 cluster02 ]
+ Fencing (stonith:fence_xvm): Stopped
+ dummy (ocf::pacemaker:Dummy): Stopped
+ Clone Set: inactive-clone [inactive-dhcpd]
+ Stopped (disabled): [ cluster01 cluster02 ]
+ Resource Group: inactive-group
+ inactive-dummy-1 (ocf::pacemaker:Dummy): Stopped (disabled)
+ inactive-dummy-2 (ocf::pacemaker:Dummy): Stopped (disabled)
+ Container bundle set: httpd-bundle [pcmk:http]
+ httpd-bundle-0 (192.168.122.131) (ocf::heartbeat:apache): Stopped
+ httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped
+ httpd-bundle-2 (192.168.122.133) (ocf::heartbeat:apache): Stopped
+ Resource Group: exim-group
+ Public-IP (ocf::heartbeat:IPaddr): Stopped
+ Email (lsb:exim): Stopped
+ Clone Set: mysql-clone-group [mysql-group]
+ Stopped: [ cluster01 cluster02 ]
+
+Transition Summary:
+
+Executing cluster transition:
+ * Resource action: ping:0 monitor on cluster02
+ * Resource action: ping:0 monitor on cluster01
+ * Resource action: Fencing monitor on cluster02
+ * Resource action: Fencing monitor on cluster01
+ * Resource action: dummy monitor on cluster02
+ * Resource action: dummy monitor on cluster01
+ * Resource action: inactive-dhcpd:0 monitor on cluster02
+ * Resource action: inactive-dhcpd:0 monitor on cluster01
+ * Resource action: inactive-dummy-1 monitor on cluster02
+ * Resource action: inactive-dummy-1 monitor on cluster01
+ * Resource action: inactive-dummy-2 monitor on cluster02
+ * Resource action: inactive-dummy-2 monitor on cluster01
+ * Resource action: httpd-bundle-ip-192.168.122.131 monitor on cluster02
+ * Resource action: httpd-bundle-ip-192.168.122.131 monitor on cluster01
+ * Resource action: httpd-bundle-docker-0 monitor on cluster02
+ * Resource action: httpd-bundle-docker-0 monitor on cluster01
+ * Resource action: httpd-bundle-ip-192.168.122.132 monitor on cluster02
+ * Resource action: httpd-bundle-ip-192.168.122.132 monitor on cluster01
+ * Resource action: httpd-bundle-docker-1 monitor on cluster02
+ * Resource action: httpd-bundle-docker-1 monitor on cluster01
+ * Resource action: httpd-bundle-ip-192.168.122.133 monitor on cluster02
+ * Resource action: httpd-bundle-ip-192.168.122.133 monitor on cluster01
+ * Resource action: httpd-bundle-docker-2 monitor on cluster02
+ * Resource action: httpd-bundle-docker-2 monitor on cluster01
+ * Resource action: Public-IP monitor on cluster02
+ * Resource action: Public-IP monitor on cluster01
+ * Resource action: Email monitor on cluster02
+ * Resource action: Email monitor on cluster01
+ * Resource action: mysql-proxy:0 monitor on cluster02
+ * Resource action: mysql-proxy:0 monitor on cluster01
+
+Revised cluster status:
+Online: [ cluster01 cluster02 ]
+
+ Clone Set: ping-clone [ping]
+ Stopped: [ cluster01 cluster02 ]
+ Fencing (stonith:fence_xvm): Stopped
+ dummy (ocf::pacemaker:Dummy): Stopped
+ Clone Set: inactive-clone [inactive-dhcpd]
+ Stopped (disabled): [ cluster01 cluster02 ]
+ Resource Group: inactive-group
+ inactive-dummy-1 (ocf::pacemaker:Dummy): Stopped (disabled)
+ inactive-dummy-2 (ocf::pacemaker:Dummy): Stopped (disabled)
+ Container bundle set: httpd-bundle [pcmk:http]
+ httpd-bundle-0 (192.168.122.131) (ocf::heartbeat:apache): Stopped
+ httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped
+ httpd-bundle-2 (192.168.122.133) (ocf::heartbeat:apache): Stopped
+ Resource Group: exim-group
+ Public-IP (ocf::heartbeat:IPaddr): Stopped
+ Email (lsb:exim): Stopped
+ Clone Set: mysql-clone-group [mysql-group]
+ Stopped: [ cluster01 cluster02 ]
+
diff --git a/cts/scheduler/stop-all-resources.xml b/cts/scheduler/stop-all-resources.xml
new file mode 100644
index 0000000000..6ecd4d6d73
--- /dev/null
+++ b/cts/scheduler/stop-all-resources.xml
@@ -0,0 +1,107 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/include/crm/common/output_internal.h b/include/crm/common/output_internal.h
index de2defff1c..2874259b62 100644
--- a/include/crm/common/output_internal.h
+++ b/include/crm/common/output_internal.h
@@ -1,767 +1,767 @@
/*
* Copyright 2019-2020 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef CRM_OUTPUT__H
# define CRM_OUTPUT__H
#ifdef __cplusplus
extern "C" {
#endif
/**
* \file
* \brief Formatted output for pacemaker tools
*/
# include
# include
# include
# include
# include
# include
-# define PCMK__API_VERSION "2.2"
+# define PCMK__API_VERSION "2.3"
#if defined(PCMK__WITH_ATTRIBUTE_OUTPUT_ARGS)
# define PCMK__OUTPUT_ARGS(ARGS...) __attribute__((output_args(ARGS)))
#else
# define PCMK__OUTPUT_ARGS(ARGS...)
#endif
typedef struct pcmk__output_s pcmk__output_t;
/*!
* \internal
* \brief The type of a function that creates a ::pcmk__output_t.
*
* Instances of this type are passed to pcmk__register_format(), stored in an
* internal data structure, and later accessed by pcmk__output_new(). For
* examples, see pcmk__mk_xml_output() and pcmk__mk_text_output().
*
* \param[in] argv The list of command line arguments.
*/
typedef pcmk__output_t * (*pcmk__output_factory_t)(char **argv);
/*!
* \internal
* \brief The type of a custom message formatting function.
*
* These functions are defined by various libraries to support formatting of
* types aside from the basic types provided by a ::pcmk__output_t.
*
* The meaning of the return value will be different for each message.
* In general, however, 0 should be returned on success and a positive value
* on error.
*
* \note These functions must not call va_start or va_end - that is done
* automatically before the custom formatting function is called.
*/
typedef int (*pcmk__message_fn_t)(pcmk__output_t *out, va_list args);
/*!
* \internal
* \brief Internal type for tracking custom messages.
*
* Each library can register functions that format custom message types. These
* are commonly used to handle some library-specific type. Registration is
* done by first defining a table of ::pcmk__message_entry_t structures and
* then passing that table to pcmk__register_messages(). Separate handlers
* can be defined for the same message, but for different formats (xml vs.
* text). Unknown formats will be ignored.
*
* Additionally, a "default" value for fmt_table can be used. In this case,
* fn will be registered for all supported formats. It is also possible to
* register a default and then override that registration with a format-specific
* function if necessary.
*
* \note The ::pcmk__message_entry_t table is processed in one pass, in order,
* from top to bottom. This means later entries with the same message_id will
* override previous ones. Thus, any default entry must come before any
* format-specific entries for the same message_id.
*/
typedef struct pcmk__message_entry_s {
/*!
* \brief The message to be handled.
*
* This must be the same ID that is passed to the message function of
* a ::pcmk__output_t. Unknown message IDs will be ignored.
*/
const char *message_id;
/*!
* \brief The format type this handler is for.
*
* This name must match the fmt_name of the currently active formatter in
* order for the registered function to be called. It is valid to have
* multiple entries for the same message_id but with different fmt_name
* values.
*/
const char *fmt_name;
/*!
* \brief The function to be called for message_id given a match on
* fmt_name. See comments on ::pcmk__message_fn_t.
*/
pcmk__message_fn_t fn;
} pcmk__message_entry_t;
/*!
* \internal
* \brief This structure contains everything needed to add support for a
* single output formatter to a command line program.
*/
typedef struct pcmk__supported_format_s {
/*!
* \brief The name of this output formatter, which should match the
* fmt_name parameter in some ::pcmk__output_t structure.
*/
const char *name;
/*!
* \brief A function that creates a ::pcmk__output_t.
*/
pcmk__output_factory_t create;
/*!
* \brief Format-specific command line options. This can be NULL if
* no command line options should be supported.
*/
GOptionEntry *options;
} pcmk__supported_format_t;
/* The following three blocks need to be updated each time a new base formatter
* is added.
*/
extern GOptionEntry pcmk__html_output_entries[];
extern GOptionEntry pcmk__log_output_entries[];
extern GOptionEntry pcmk__none_output_entries[];
extern GOptionEntry pcmk__text_output_entries[];
extern GOptionEntry pcmk__xml_output_entries[];
pcmk__output_t *pcmk__mk_html_output(char **argv);
pcmk__output_t *pcmk__mk_log_output(char **argv);
pcmk__output_t *pcmk__mk_none_output(char **argv);
pcmk__output_t *pcmk__mk_text_output(char **argv);
pcmk__output_t *pcmk__mk_xml_output(char **argv);
#define PCMK__SUPPORTED_FORMAT_HTML { "html", pcmk__mk_html_output, pcmk__html_output_entries }
#define PCMK__SUPPORTED_FORMAT_LOG { "log", pcmk__mk_log_output, pcmk__log_output_entries }
#define PCMK__SUPPORTED_FORMAT_NONE { "none", pcmk__mk_none_output, pcmk__none_output_entries }
#define PCMK__SUPPORTED_FORMAT_TEXT { "text", pcmk__mk_text_output, pcmk__text_output_entries }
#define PCMK__SUPPORTED_FORMAT_XML { "xml", pcmk__mk_xml_output, pcmk__xml_output_entries }
/*!
* \brief This structure contains everything that makes up a single output
* formatter.
*
* Instances of this structure may be created by calling pcmk__output_new()
* with the name of the desired formatter. They should later be freed with
* pcmk__output_free().
*/
struct pcmk__output_s {
/*!
* \brief The name of this output formatter.
*/
const char *fmt_name;
/*!
* \brief Should this formatter supress most output?
*
* \note This setting is not respected by all formatters. In general,
* machine-readable output formats will not support this while
* user-oriented formats will. Callers should use is_quiet()
* to test whether to print or not.
*/
bool quiet;
/*!
* \brief A copy of the request that generated this output.
*
* In the case of command line usage, this would be the command line
* arguments. For other use cases, it could be different.
*/
gchar *request;
/*!
* \brief Where output should be written.
*
* This could be a file handle, or stdout or stderr. This is really only
* useful internally.
*/
FILE *dest;
/*!
* \brief Custom messages that are currently registered on this formatter.
*
* Keys are the string message IDs, values are ::pcmk__message_fn_t function
* pointers.
*/
GHashTable *messages;
/*!
* \brief Implementation-specific private data.
*
* Each individual formatter may have some private data useful in its
* implementation. This points to that data. Callers should not rely on
* its contents or structure.
*/
void *priv;
/*!
* \internal
* \brief Take whatever actions are necessary to prepare out for use. This is
* called by pcmk__output_new(). End users should not need to call this.
*
* \note For formatted output implementers - This function should be written in
* such a way that it can be called repeatedly on an already initialized
* object without causing problems, or on a previously finished object
* without crashing.
*
* \param[in,out] out The output functions structure.
*
* \return true on success, false on error.
*/
bool (*init) (pcmk__output_t *out);
/*!
* \internal
* \brief Free the private formatter-specific data.
*
* This is called from pcmk__output_free() and does not typically need to be
* called directly.
*
* \param[in,out] out The output functions structure.
*/
void (*free_priv) (pcmk__output_t *out);
/*!
* \internal
* \brief Take whatever actions are necessary to end formatted output.
*
* This could include flushing output to a file, but does not include freeing
* anything. The finish method can potentially be fairly complicated, adding
* additional information to the internal data structures or doing whatever
* else. It is therefore suggested that finish only be called once.
*
* \note The print parameter will only affect those formatters that do all
* their output at the end. Console-oriented formatters typically print
* a line at a time as they go, so this parameter will not affect them.
* Structured formatters will honor it, however.
*
* \note The copy_dest parameter does not apply to all formatters. Console-
* oriented formatters do not build up a structure as they go, and thus
* do not have anything to return. Structured formatters will honor it,
* however. Note that each type of formatter will return a different
* type of value in this parameter. To use this parameter, call this
* function like so:
*
* \code
* xmlNode *dest = NULL;
* out->finish(out, exit_code, false, (void **) &dest);
* \endcode
*
* \param[in,out] out The output functions structure.
* \param[in] exit_status The exit value of the whole program.
* \param[in] print Whether this function should write any output.
* \param[out] copy_dest A destination to store a copy of the internal
* data structure for this output, or NULL if no
* copy is required. The caller should free this
* memory when done with it.
*/
void (*finish) (pcmk__output_t *out, crm_exit_t exit_status, bool print,
void **copy_dest);
/*!
* \internal
* \brief Finalize output and then immediately set back up to start a new set
* of output.
*
* This is conceptually the same as calling finish and then init, though in
* practice more be happening behind the scenes.
*
* \note This function differs from finish in that no exit_status is added.
* The idea is that the program is not shutting down, so there is not
* yet a final exit code. Call finish on the last time through if this
* is needed.
*
* \param[in,out] out The output functions structure.
*/
void (*reset) (pcmk__output_t *out);
/*!
* \internal
* \brief Register a custom message.
*
* \param[in,out] out The output functions structure.
* \param[in] message_id The name of the message to register. This name
* will be used as the message_id parameter to the
* message function in order to call the custom
* format function.
* \param[in] fn The custom format function to call for message_id.
*/
void (*register_message) (pcmk__output_t *out, const char *message_id,
pcmk__message_fn_t fn);
/*!
* \internal
* \brief Call a previously registered custom message.
*
* \param[in,out] out The output functions structure.
* \param[in] message_id The name of the message to call. This name must
* be the same as the message_id parameter of some
* previous call to register_message.
* \param[in] ... Arguments to be passed to the registered function.
*
* \return A standard Pacemaker return code. Generally: 0 if a function was
* registered for the message, that function was called, and returned
* successfully; EINVAL if no function was registered; or pcmk_rc_no_output
* if a function was called but produced no output.
*/
int (*message) (pcmk__output_t *out, const char *message_id, ...);
/*!
* \internal
* \brief Format the output of a completed subprocess.
*
* \param[in,out] out The output functions structure.
* \param[in] exit_status The exit value of the subprocess.
* \param[in] proc_stdout stdout from the completed subprocess.
* \param[in] proc_stderr stderr from the completed subprocess.
*/
void (*subprocess_output) (pcmk__output_t *out, int exit_status,
const char *proc_stdout, const char *proc_stderr);
/*!
* \internal
* \brief Format version information. This is useful for the --version
* argument of command line tools.
*
* \param[in,out] out The output functions structure.
* \param[in] extended Add additional version information.
*/
void (*version) (pcmk__output_t *out, bool extended);
/*!
* \internal
* \brief Format an informational message that should be shown to
* to an interactive user. Not all formatters will do this.
*
* \note A newline will automatically be added to the end of the format
* string, so callers should not include a newline.
*
* \param[in,out] out The output functions structure.
* \param[in] buf The message to be printed.
* \param[in] ... Arguments to be formatted.
*/
void (*info) (pcmk__output_t *out, const char *format, ...) G_GNUC_PRINTF(2, 3);
/*!
* \internal
* \brief Format an error message that should be shown to an interactive
* user. Not all formatters will do this.
*
* \note A newline will automatically be added to the end of the format
* string, so callers should not include a newline.
*
* \param[in,out] out The output functions structure.
* \param[in] buf The message to be printed.
* \param[in] ... Arguments to be formatted.
*/
void (*err) (pcmk__output_t *out, const char *format, ...) G_GNUC_PRINTF(2, 3);
/*!
* \internal
* \brief Format already formatted XML.
*
* \param[in,out] out The output functions structure.
* \param[in] name A name to associate with the XML.
* \param[in] buf The XML in a string.
*/
void (*output_xml) (pcmk__output_t *out, const char *name, const char *buf);
/*!
* \internal
* \brief Start a new list of items.
*
* \note For text output, this corresponds to another level of indentation. For
* XML output, this corresponds to wrapping any following output in another
* layer of tags.
*
* \note If singular_noun and plural_noun are non-NULL, calling end_list will
* result in a summary being added.
*
* \param[in,out] out The output functions structure.
* \param[in] singular_noun When outputting the summary for a list with
* one item, the noun to use.
* \param[in] plural_noun When outputting the summary for a list with
* more than one item, the noun to use.
* \param[in] format The format string.
* \param[in] ... Arguments to be formatted.
*/
void (*begin_list) (pcmk__output_t *out, const char *singular_noun,
const char *plural_noun, const char *format, ...)
G_GNUC_PRINTF(4, 5);
/*!
* \internal
* \brief Format a single item in a list.
*
* \param[in,out] out The output functions structure.
* \param[in] name A name to associate with this item.
* \param[in] format The format string.
* \param[in] ... Arguments to be formatted.
*/
void (*list_item) (pcmk__output_t *out, const char *name, const char *format, ...)
G_GNUC_PRINTF(3, 4);
/*!
* \internal
* \brief Increment the internal counter of the current list's length.
*
* Typically, this counter is maintained behind the scenes as a side effect
* of calling list_item(). However, custom functions that maintain lists
* some other way will need to manage this counter manually. This is
* useful for implementing custom message functions and should not be
* needed otherwise.
*
* \param[in,out] out The output functions structure.
*/
void (*increment_list) (pcmk__output_t *out);
/*!
* \internal
* \brief Conclude a list.
*
* \note If begin_list was called with non-NULL for both the singular_noun
* and plural_noun arguments, this function will output a summary.
* Otherwise, no summary will be added.
*
* \param[in,out] out The output functions structure.
*/
void (*end_list) (pcmk__output_t *out);
/*!
* \internal
* \brief Should anything be printed to the user?
*
* \note This takes into account both the \p quiet value as well as the
* current formatter.
*
* \param[in] out The output functions structure.
*
* \return true if output should be supressed, false otherwise.
*/
bool (*is_quiet) (pcmk__output_t *out);
};
/*!
* \internal
* \brief Call a formatting function for a previously registered message.
*
* \note This function is for implementing custom formatters. It should not
* be called directly. Instead, call out->message.
*
* \param[in,out] out The output functions structure.
* \param[in] message_id The message to be handled. Unknown messages
* will be ignored.
* \param[in] ... Arguments to be passed to the registered function.
*/
int
pcmk__call_message(pcmk__output_t *out, const char *message_id, ...);
/*!
* \internal
* \brief Free a ::pcmk__output_t structure that was previously created by
* pcmk__output_new().
*
* \note While the create and finish functions are designed in such a way that
* they can be called repeatedly, this function will completely free the
* memory of the object. Once this function has been called, producing
* more output requires starting over from pcmk__output_new().
*
* \param[in,out] out The output structure.
*/
void pcmk__output_free(pcmk__output_t *out);
/*!
* \internal
* \brief Create a new ::pcmk__output_t structure.
*
* \param[in,out] out The destination of the new ::pcmk__output_t.
* \param[in] fmt_name How should output be formatted?
* \param[in] filename Where should formatted output be written to? This
* can be a filename (which will be overwritten if it
* already exists), or NULL or "-" for stdout. For no
* output, pass a filename of "/dev/null".
* \param[in] argv The list of command line arguments.
*
* \return Standard Pacemaker return code
*/
int pcmk__output_new(pcmk__output_t **out, const char *fmt_name,
const char *filename, char **argv);
/*!
* \internal
* \brief Register a new output formatter, making it available for use
* the same as a base formatter.
*
* \param[in,out] group A ::GOptionGroup that formatted output related command
* line arguments should be added to. This can be NULL
* for use outside of command line programs.
* \param[in] name The name of the format. This will be used to select a
* format from command line options and for displaying help.
* \param[in] create A function that creates a ::pcmk__output_t.
* \param[in] options Format-specific command line options. These will be
* added to the context. This argument can also be NULL.
*
* \return 0 on success or an error code on error.
*/
int
pcmk__register_format(GOptionGroup *group, const char *name,
pcmk__output_factory_t create, GOptionEntry *options);
/*!
* \internal
* \brief Register an entire table of output formatters at once.
*
* \param[in,out] group A ::GOptionGroup that formatted output related command
* line arguments should be added to. This can be NULL
* for use outside of command line programs.
* \param[in] table An array of ::pcmk__supported_format_t which should
* all be registered. This array must be NULL-terminated.
*
*/
void
pcmk__register_formats(GOptionGroup *group, pcmk__supported_format_t *table);
/*!
* \internal
* \brief Unregister a previously registered table of custom formatting
* functions and destroy the internal data structures associated with them.
*/
void
pcmk__unregister_formats(void);
/*!
* \internal
* \brief Register a function to handle a custom message.
*
* \note This function is for implementing custom formatters. It should not
* be called directly. Instead, call out->register_message.
*
* \param[in,out] out The output functions structure.
* \param[in] message_id The message to be handled.
* \param[in] fn The custom format function to call for message_id.
*/
void
pcmk__register_message(pcmk__output_t *out, const char *message_id,
pcmk__message_fn_t fn);
/*!
* \internal
* \brief Register an entire table of custom formatting functions at once.
*
* This table can contain multiple formatting functions for the same message ID
* if they are for different format types.
*
* \param[in,out] out The output functions structure.
* \param[in] table An array of ::pcmk__message_entry_t values which should
* all be registered. This array must be NULL-terminated.
*/
void
pcmk__register_messages(pcmk__output_t *out, pcmk__message_entry_t *table);
/* Functions that are useful for implementing custom message formatters */
/*!
* \internal
* \brief A printf-like function.
*
* This function writes to out->dest and indents the text to the current level
* of the text formatter's nesting. This should be used when implementing
* custom message functions instead of printf.
*
* \param[in,out] out The output functions structure.
*/
void
pcmk__indented_printf(pcmk__output_t *out, const char *format, ...) G_GNUC_PRINTF(2, 3);
/*!
* \internal
* \brief A vprintf-like function.
*
* This function is like pcmk__indented_printf(), except it takes a va_list instead
* of a list of arguments. This should be used when implementing custom message
* functions instead of vprintf.
*
* \param[in,out] out The output functions structure.
* \param[in] format The format string.
* \param[in] args A list of arguments to apply to the format string.
*/
void
pcmk__indented_vprintf(pcmk__output_t *out, const char *format, va_list args) G_GNUC_PRINTF(2, 0);
/*!
* \internal
* \brief Create and return a new XML node with the given name, as a child of the
* current list parent. The new node is then added as the new list parent,
* meaning all subsequent nodes will be its children. This is used when
* implementing custom functions.
*
* \param[in,out] out The output functions structure.
* \param[in] name The name of the node to be created.
*/
xmlNodePtr
pcmk__output_xml_create_parent(pcmk__output_t *out, const char *name);
/*!
* \internal
* \brief Add the given node as a child of the current list parent. This is
* used when implementing custom message functions.
*
* \param[in,out] out The output functions structure.
* \param[in] node An XML node to be added as a child.
*/
void
pcmk__output_xml_add_node(pcmk__output_t *out, xmlNodePtr node);
/*!
* \internal
* \brief Create and return a new XML node with the given name, as a child of the
* current list parent. This is used when implementing custom functions.
*
* \param[in,out] out The output functions structure.
* \param[in] name The name of the node to be created.
*/
xmlNodePtr
pcmk__output_create_xml_node(pcmk__output_t *out, const char *name);
/*!
* \internal
* \brief Like pcmk__output_create_xml_node(), but add the given text content to the
* new node.
*
* \param[in,out] out The output functions structure.
* \param[in] name The name of the node to be created.
* \param[in] content The text content of the node.
*/
xmlNodePtr
pcmk__output_create_xml_text_node(pcmk__output_t *out, const char *name, const char *content);
/*!
* \internal
* \brief Push a parent XML node onto the stack. This is used when implementing
* custom message functions.
*
* The XML output formatter maintains an internal stack to keep track of which nodes
* are parents in order to build up the tree structure. This function can be used
* to temporarily push a new node onto the stack. After calling this function, any
* other formatting functions will have their nodes added as children of this new
* parent.
*
* \param[in,out] out The output functions structure.
* \param[in] node The node to be added/
*/
void
pcmk__output_xml_push_parent(pcmk__output_t *out, xmlNodePtr node);
/*!
* \internal
* \brief Pop a parent XML node onto the stack. This is used when implementing
* custom message functions.
*
* This function removes a parent node from the stack. See pcmk__xml_push_parent()
* for more details.
*
* \note Little checking is done with this function. Be sure you only pop parents
* that were previously pushed. In general, it is best to keep the code between
* push and pop simple.
*
* \param[in,out] out The output functions structure.
*/
void
pcmk__output_xml_pop_parent(pcmk__output_t *out);
/*!
* \internal
* \brief Peek a parent XML node onto the stack. This is used when implementing
* custom message functions.
*
* This function peeks a parent node on stack. See pcmk__xml_push_parent()
* for more details. It has no side-effect and can be called for an empty stack.
*
* \note Little checking is done with this function.
*
* \param[in,out] out The output functions structure.
*
* \return NULL if stack is empty, otherwise the parent of the stack.
*/
xmlNodePtr
pcmk__output_xml_peek_parent(pcmk__output_t *out);
/*!
* \internal
* \brief Create a new XML node consisting of the provided text inside an HTML
* element node of the given name.
*
* \param[in,out] out The output functions structure.
* \param[in] element_name The name of the new HTML element.
* \param[in] id The CSS ID selector to apply to this element.
* If NULL, no ID is added.
* \param[in] class_name The CSS class selector to apply to this element.
* If NULL, no class is added.
* \param[in] text The text content of the node.
*/
xmlNodePtr
pcmk__output_create_html_node(pcmk__output_t *out, const char *element_name, const char *id,
const char *class_name, const char *text);
/*!
* \internal
* \brief Add an HTML tag to the section.
*
* The arguments after name are a NULL-terminated list of keys and values,
* all of which will be added as attributes to the given tag. For instance,
* the following code would generate the tag "":
*
* \code
* pcmk__html_add_header("meta", "http-equiv", "refresh", "content", "19", NULL);
* \endcode
*
* \param[in] name The HTML tag for the new node.
* \param[in] ... A NULL-terminated key/value list of attributes.
*/
void
pcmk__html_add_header(const char *name, ...)
G_GNUC_NULL_TERMINATED;
#define PCMK__OUTPUT_SPACER_IF(out_obj, cond) \
if (cond) { \
out_obj->info(out_obj, "%s", ""); \
}
#define PCMK__OUTPUT_LIST_HEADER(out_obj, cond, retcode, title...) \
if (retcode == pcmk_rc_no_output) { \
PCMK__OUTPUT_SPACER_IF(out_obj, cond); \
retcode = pcmk_rc_ok; \
out_obj->begin_list(out_obj, NULL, NULL, title); \
}
#define PCMK__OUTPUT_LIST_FOOTER(out_obj, retcode) \
if (retcode == pcmk_rc_ok) { \
out_obj->end_list(out_obj); \
}
#ifdef __cplusplus
}
#endif
#endif
diff --git a/lib/pengine/clone.c b/lib/pengine/clone.c
index 5f26d99c11..6bdd8aa1ce 100644
--- a/lib/pengine/clone.c
+++ b/lib/pengine/clone.c
@@ -1,1189 +1,1192 @@
/*
* Copyright 2004-2020 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#define VARIANT_CLONE 1
#include "./variant.h"
void
pe__force_anon(const char *standard, pe_resource_t *rsc, const char *rid,
pe_working_set_t *data_set)
{
if (pe_rsc_is_clone(rsc)) {
clone_variant_data_t *clone_data = NULL;
get_clone_variant_data(clone_data, rsc);
pe_warn("Ignoring " XML_RSC_ATTR_UNIQUE " for %s because %s resources "
"such as %s can be used only as anonymous clones",
rsc->id, standard, rid);
clone_data->clone_node_max = 1;
clone_data->clone_max = QB_MIN(clone_data->clone_max,
g_list_length(data_set->nodes));
}
}
pe_resource_t *
find_clone_instance(pe_resource_t * rsc, const char *sub_id, pe_working_set_t * data_set)
{
char *child_id = NULL;
pe_resource_t *child = NULL;
const char *child_base = NULL;
clone_variant_data_t *clone_data = NULL;
get_clone_variant_data(clone_data, rsc);
child_base = ID(clone_data->xml_obj_child);
child_id = crm_strdup_printf("%s:%s", child_base, sub_id);
child = pe_find_resource(rsc->children, child_id);
free(child_id);
return child;
}
pe_resource_t *
pe__create_clone_child(pe_resource_t *rsc, pe_working_set_t *data_set)
{
gboolean as_orphan = FALSE;
char *inc_num = NULL;
char *inc_max = NULL;
pe_resource_t *child_rsc = NULL;
xmlNode *child_copy = NULL;
clone_variant_data_t *clone_data = NULL;
get_clone_variant_data(clone_data, rsc);
CRM_CHECK(clone_data->xml_obj_child != NULL, return FALSE);
if (clone_data->total_clones >= clone_data->clone_max) {
// If we've already used all available instances, this is an orphan
as_orphan = TRUE;
}
// Allocate instance numbers in numerical order (starting at 0)
inc_num = crm_itoa(clone_data->total_clones);
inc_max = crm_itoa(clone_data->clone_max);
child_copy = copy_xml(clone_data->xml_obj_child);
crm_xml_add(child_copy, XML_RSC_ATTR_INCARNATION, inc_num);
if (common_unpack(child_copy, &child_rsc, rsc, data_set) == FALSE) {
pe_err("Failed unpacking resource %s", crm_element_value(child_copy, XML_ATTR_ID));
child_rsc = NULL;
goto bail;
}
/* child_rsc->globally_unique = rsc->globally_unique; */
CRM_ASSERT(child_rsc);
clone_data->total_clones += 1;
pe_rsc_trace(child_rsc, "Setting clone attributes for: %s", child_rsc->id);
rsc->children = g_list_append(rsc->children, child_rsc);
if (as_orphan) {
pe__set_resource_flags_recursive(child_rsc, pe_rsc_orphan);
}
add_hash_param(child_rsc->meta, XML_RSC_ATTR_INCARNATION_MAX, inc_max);
pe_rsc_trace(rsc, "Added %s instance %s", rsc->id, child_rsc->id);
bail:
free(inc_num);
free(inc_max);
return child_rsc;
}
gboolean
clone_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
{
int lpc = 0;
xmlNode *a_child = NULL;
xmlNode *xml_obj = rsc->xml;
clone_variant_data_t *clone_data = NULL;
const char *ordered = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_ORDERED);
const char *max_clones = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION_MAX);
const char *max_clones_node = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION_NODEMAX);
pe_rsc_trace(rsc, "Processing resource %s...", rsc->id);
clone_data = calloc(1, sizeof(clone_variant_data_t));
rsc->variant_opaque = clone_data;
if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
const char *promoted_max = NULL;
const char *promoted_node_max = NULL;
promoted_max = g_hash_table_lookup(rsc->meta,
XML_RSC_ATTR_PROMOTED_MAX);
if (promoted_max == NULL) {
// @COMPAT deprecated since 2.0.0
promoted_max = g_hash_table_lookup(rsc->meta,
XML_RSC_ATTR_MASTER_MAX);
}
promoted_node_max = g_hash_table_lookup(rsc->meta,
XML_RSC_ATTR_PROMOTED_NODEMAX);
if (promoted_node_max == NULL) {
// @COMPAT deprecated since 2.0.0
promoted_node_max = g_hash_table_lookup(rsc->meta,
XML_RSC_ATTR_MASTER_NODEMAX);
}
clone_data->promoted_max = crm_parse_int(promoted_max, "1");
clone_data->promoted_node_max = crm_parse_int(promoted_node_max, "1");
}
// Implied by calloc()
/* clone_data->xml_obj_child = NULL; */
clone_data->clone_node_max = crm_parse_int(max_clones_node, "1");
if (max_clones) {
clone_data->clone_max = crm_parse_int(max_clones, "1");
} else if (pcmk__list_of_multiple(data_set->nodes)) {
clone_data->clone_max = g_list_length(data_set->nodes);
} else {
clone_data->clone_max = 1; /* Handy during crm_verify */
}
clone_data->ordered = crm_is_true(ordered);
if ((rsc->flags & pe_rsc_unique) == 0 && clone_data->clone_node_max > 1) {
pcmk__config_err("Ignoring " XML_RSC_ATTR_PROMOTED_MAX " for %s "
"because anonymous clones support only one instance "
"per node", rsc->id);
clone_data->clone_node_max = 1;
}
pe_rsc_trace(rsc, "Options for %s", rsc->id);
pe_rsc_trace(rsc, "\tClone max: %d", clone_data->clone_max);
pe_rsc_trace(rsc, "\tClone node max: %d", clone_data->clone_node_max);
pe_rsc_trace(rsc, "\tClone is unique: %s",
pe__rsc_bool_str(rsc, pe_rsc_unique));
pe_rsc_trace(rsc, "\tClone is promotable: %s",
pe__rsc_bool_str(rsc, pe_rsc_promotable));
// Clones may contain a single group or primitive
for (a_child = __xml_first_child_element(xml_obj); a_child != NULL;
a_child = __xml_next_element(a_child)) {
if (pcmk__str_any_of((const char *)a_child->name, XML_CIB_TAG_RESOURCE, XML_CIB_TAG_GROUP, NULL)) {
clone_data->xml_obj_child = a_child;
break;
}
}
if (clone_data->xml_obj_child == NULL) {
pcmk__config_err("%s has nothing to clone", rsc->id);
return FALSE;
}
/*
* Make clones ever so slightly sticky by default
*
* This helps ensure clone instances are not shuffled around the cluster
* for no benefit in situations when pre-allocation is not appropriate
*/
if (g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_STICKINESS) == NULL) {
add_hash_param(rsc->meta, XML_RSC_ATTR_STICKINESS, "1");
}
/* This ensures that the globally-unique value always exists for children to
* inherit when being unpacked, as well as in resource agents' environment.
*/
add_hash_param(rsc->meta, XML_RSC_ATTR_UNIQUE,
pe__rsc_bool_str(rsc, pe_rsc_unique));
if (clone_data->clone_max <= 0) {
/* Create one child instance so that unpack_find_resource() will hook up
* any orphans up to the parent correctly.
*/
if (pe__create_clone_child(rsc, data_set) == NULL) {
return FALSE;
}
} else {
// Create a child instance for each available instance number
for (lpc = 0; lpc < clone_data->clone_max; lpc++) {
if (pe__create_clone_child(rsc, data_set) == NULL) {
return FALSE;
}
}
}
pe_rsc_trace(rsc, "Added %d children to resource %s...", clone_data->clone_max, rsc->id);
return TRUE;
}
gboolean
clone_active(pe_resource_t * rsc, gboolean all)
{
GListPtr gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
gboolean child_active = child_rsc->fns->active(child_rsc, all);
if (all == FALSE && child_active) {
return TRUE;
} else if (all && child_active == FALSE) {
return FALSE;
}
}
if (all) {
return TRUE;
} else {
return FALSE;
}
}
static void
short_print(char *list, const char *prefix, const char *type, const char *suffix, long options, void *print_data)
{
if(suffix == NULL) {
suffix = "";
}
if (list) {
if (options & pe_print_html) {
status_print("");
}
status_print("%s%s: [%s ]%s", prefix, type, list, suffix);
if (options & pe_print_html) {
status_print("\n");
} else if (options & pe_print_suppres_nl) {
/* nothing */
} else if ((options & pe_print_printf) || (options & pe_print_ncurses)) {
status_print("\n");
}
}
}
static const char *
configured_role_str(pe_resource_t * rsc)
{
const char *target_role = g_hash_table_lookup(rsc->meta,
XML_RSC_ATTR_TARGET_ROLE);
if ((target_role == NULL) && rsc->children && rsc->children->data) {
target_role = g_hash_table_lookup(((pe_resource_t*)rsc->children->data)->meta,
XML_RSC_ATTR_TARGET_ROLE);
}
return target_role;
}
static enum rsc_role_e
configured_role(pe_resource_t * rsc)
{
const char *target_role = configured_role_str(rsc);
if (target_role) {
return text2role(target_role);
}
return RSC_ROLE_UNKNOWN;
}
static void
clone_print_xml(pe_resource_t * rsc, const char *pre_text, long options, void *print_data)
{
char *child_text = crm_strdup_printf("%s ", pre_text);
const char *target_role = configured_role_str(rsc);
GListPtr gIter = rsc->children;
status_print("%sid);
status_print("multi_state=\"%s\" ",
pe__rsc_bool_str(rsc, pe_rsc_promotable));
status_print("unique=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_unique));
status_print("managed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_managed));
status_print("failed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_failed));
status_print("failure_ignored=\"%s\" ",
pe__rsc_bool_str(rsc, pe_rsc_failure_ignored));
if (target_role) {
status_print("target_role=\"%s\" ", target_role);
}
status_print(">\n");
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
child_rsc->fns->print(child_rsc, child_text, options, print_data);
}
status_print("%s\n", pre_text);
free(child_text);
}
bool is_set_recursive(pe_resource_t * rsc, long long flag, bool any)
{
GListPtr gIter;
bool all = !any;
if (pcmk_is_set(rsc->flags, flag)) {
if(any) {
return TRUE;
}
} else if(all) {
return FALSE;
}
for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
if(is_set_recursive(gIter->data, flag, any)) {
if(any) {
return TRUE;
}
} else if(all) {
return FALSE;
}
}
if(all) {
return TRUE;
}
return FALSE;
}
void
clone_print(pe_resource_t * rsc, const char *pre_text, long options, void *print_data)
{
char *list_text = NULL;
char *child_text = NULL;
char *stopped_list = NULL;
GListPtr master_list = NULL;
GListPtr started_list = NULL;
GListPtr gIter = rsc->children;
clone_variant_data_t *clone_data = NULL;
int active_instances = 0;
if (pre_text == NULL) {
pre_text = " ";
}
if (options & pe_print_xml) {
clone_print_xml(rsc, pre_text, options, print_data);
return;
}
get_clone_variant_data(clone_data, rsc);
child_text = crm_strdup_printf("%s ", pre_text);
status_print("%sClone Set: %s [%s]%s%s%s",
pre_text ? pre_text : "", rsc->id, ID(clone_data->xml_obj_child),
pcmk_is_set(rsc->flags, pe_rsc_promotable)? " (promotable)" : "",
pcmk_is_set(rsc->flags, pe_rsc_unique)? " (unique)" : "",
pcmk_is_set(rsc->flags, pe_rsc_managed)? "" : " (unmanaged)");
if (options & pe_print_html) {
status_print("\n\n");
} else if ((options & pe_print_log) == 0) {
status_print("\n");
}
for (; gIter != NULL; gIter = gIter->next) {
gboolean print_full = FALSE;
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
gboolean partially_active = child_rsc->fns->active(child_rsc, FALSE);
if (options & pe_print_clone_details) {
print_full = TRUE;
}
if (pcmk_is_set(rsc->flags, pe_rsc_unique)) {
// Print individual instance when unique (except stopped orphans)
if (partially_active || !pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
print_full = TRUE;
}
// Everything else in this block is for anonymous clones
} else if (pcmk_is_set(options, pe_print_pending)
&& (child_rsc->pending_task != NULL)
&& strcmp(child_rsc->pending_task, "probe")) {
// Print individual instance when non-probe action is pending
print_full = TRUE;
} else if (partially_active == FALSE) {
// List stopped instances when requested (except orphans)
if (!pcmk_is_set(child_rsc->flags, pe_rsc_orphan)
&& !pcmk_is_set(options, pe_print_clone_active)) {
stopped_list = pcmk__add_word(stopped_list, child_rsc->id);
}
} else if (is_set_recursive(child_rsc, pe_rsc_orphan, TRUE)
|| is_set_recursive(child_rsc, pe_rsc_managed, FALSE) == FALSE
|| is_set_recursive(child_rsc, pe_rsc_failed, TRUE)) {
// Print individual instance when active orphaned/unmanaged/failed
print_full = TRUE;
} else if (child_rsc->fns->active(child_rsc, TRUE)) {
// Instance of fully active anonymous clone
pe_node_t *location = child_rsc->fns->location(child_rsc, NULL, TRUE);
if (location) {
// Instance is active on a single node
enum rsc_role_e a_role = child_rsc->fns->state(child_rsc, TRUE);
if (location->details->online == FALSE && location->details->unclean) {
print_full = TRUE;
} else if (a_role > RSC_ROLE_SLAVE) {
master_list = g_list_append(master_list, location);
} else {
started_list = g_list_append(started_list, location);
}
} else {
/* uncolocated group - bleh */
print_full = TRUE;
}
} else {
// Instance of partially active anonymous clone
print_full = TRUE;
}
if (print_full) {
if (options & pe_print_html) {
status_print("- \n");
}
child_rsc->fns->print(child_rsc, child_text, options, print_data);
if (options & pe_print_html) {
status_print("
\n");
}
}
}
/* Masters */
master_list = g_list_sort(master_list, sort_node_uname);
for (gIter = master_list; gIter; gIter = gIter->next) {
pe_node_t *host = gIter->data;
list_text = pcmk__add_word(list_text, host->details->uname);
active_instances++;
}
short_print(list_text, child_text, "Masters", NULL, options, print_data);
g_list_free(master_list);
free(list_text);
list_text = NULL;
/* Started/Slaves */
started_list = g_list_sort(started_list, sort_node_uname);
for (gIter = started_list; gIter; gIter = gIter->next) {
pe_node_t *host = gIter->data;
list_text = pcmk__add_word(list_text, host->details->uname);
active_instances++;
}
if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
enum rsc_role_e role = configured_role(rsc);
if(role == RSC_ROLE_SLAVE) {
short_print(list_text, child_text, "Slaves (target-role)", NULL, options, print_data);
} else {
short_print(list_text, child_text, "Slaves", NULL, options, print_data);
}
} else {
short_print(list_text, child_text, "Started", NULL, options, print_data);
}
g_list_free(started_list);
free(list_text);
list_text = NULL;
if (!pcmk_is_set(options, pe_print_clone_active)) {
const char *state = "Stopped";
enum rsc_role_e role = configured_role(rsc);
if (role == RSC_ROLE_STOPPED) {
state = "Stopped (disabled)";
}
if (!pcmk_is_set(rsc->flags, pe_rsc_unique)
&& (clone_data->clone_max > active_instances)) {
GListPtr nIter;
GListPtr list = g_hash_table_get_values(rsc->allowed_nodes);
/* Custom stopped list for non-unique clones */
free(stopped_list); stopped_list = NULL;
if (list == NULL) {
/* Clusters with symmetrical=false haven't calculated allowed_nodes yet
* If we've not probed for them yet, the Stopped list will be empty
*/
list = g_hash_table_get_values(rsc->known_on);
}
list = g_list_sort(list, sort_node_uname);
for (nIter = list; nIter != NULL; nIter = nIter->next) {
pe_node_t *node = (pe_node_t *)nIter->data;
if (pe_find_node(rsc->running_on, node->details->uname) == NULL) {
stopped_list = pcmk__add_word(stopped_list,
node->details->uname);
}
}
g_list_free(list);
}
short_print(stopped_list, child_text, state, NULL, options, print_data);
free(stopped_list);
}
if (options & pe_print_html) {
status_print("
\n");
}
free(child_text);
}
PCMK__OUTPUT_ARGS("clone", "unsigned int", "pe_resource_t *", "GListPtr", "GListPtr")
int
pe__clone_xml(pcmk__output_t *out, va_list args)
{
unsigned int options = va_arg(args, unsigned int);
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
GListPtr only_node = va_arg(args, GListPtr);
GListPtr only_rsc = va_arg(args, GListPtr);
GListPtr gIter = rsc->children;
int rc = pcmk_rc_no_output;
gboolean printed_header = FALSE;
gboolean print_everything = TRUE;
if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
return rc;
}
print_everything = pcmk__str_in_list(only_rsc, rsc_printable_id(rsc)) ||
(strstr(rsc->id, ":") != NULL && pcmk__str_in_list(only_rsc, rsc->id));
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
if (pcmk__rsc_filtered_by_node(child_rsc, only_node)) {
continue;
}
if (child_rsc->fns->is_filtered(child_rsc, only_rsc, print_everything)) {
continue;
}
if (!printed_header) {
printed_header = TRUE;
- rc = pe__name_and_nvpairs_xml(out, true, "clone", 7,
+ rc = pe__name_and_nvpairs_xml(out, true, "clone", 8,
"id", rsc->id,
"multi_state", pe__rsc_bool_str(rsc, pe_rsc_promotable),
"unique", pe__rsc_bool_str(rsc, pe_rsc_unique),
"managed", pe__rsc_bool_str(rsc, pe_rsc_managed),
+ "disabled", pe__resource_is_disabled(rsc) ? "true" : "false",
"failed", pe__rsc_bool_str(rsc, pe_rsc_failed),
"failure_ignored", pe__rsc_bool_str(rsc, pe_rsc_failure_ignored),
"target_role", configured_role_str(rsc));
CRM_ASSERT(rc == pcmk_rc_ok);
}
out->message(out, crm_map_element_name(child_rsc->xml), options,
child_rsc, only_node, only_rsc);
}
if (printed_header) {
pcmk__output_xml_pop_parent(out);
}
return rc;
}
PCMK__OUTPUT_ARGS("clone", "unsigned int", "pe_resource_t *", "GListPtr", "GListPtr")
int
pe__clone_html(pcmk__output_t *out, va_list args)
{
unsigned int options = va_arg(args, unsigned int);
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
GListPtr only_node = va_arg(args, GListPtr);
GListPtr only_rsc = va_arg(args, GListPtr);
char *list_text = NULL;
char *stopped_list = NULL;
GListPtr master_list = NULL;
GListPtr started_list = NULL;
GListPtr gIter = rsc->children;
clone_variant_data_t *clone_data = NULL;
int active_instances = 0;
int rc = pcmk_rc_no_output;
gboolean print_everything = TRUE;
get_clone_variant_data(clone_data, rsc);
if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
return rc;
}
print_everything = pcmk__str_in_list(only_rsc, rsc_printable_id(rsc)) ||
(strstr(rsc->id, ":") != NULL && pcmk__str_in_list(only_rsc, rsc->id));
- out->begin_list(out, NULL, NULL, "Clone Set: %s [%s]%s%s%s",
+ out->begin_list(out, NULL, NULL, "Clone Set: %s [%s]%s%s%s%s",
rsc->id, ID(clone_data->xml_obj_child),
pcmk_is_set(rsc->flags, pe_rsc_promotable) ? " (promotable)" : "",
pcmk_is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
- pcmk_is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)");
+ pcmk_is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)",
+ pe__resource_is_disabled(rsc) ? " (disabled)" : "");
rc = pcmk_rc_ok;
for (; gIter != NULL; gIter = gIter->next) {
gboolean print_full = FALSE;
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
gboolean partially_active = child_rsc->fns->active(child_rsc, FALSE);
if (pcmk__rsc_filtered_by_node(child_rsc, only_node)) {
continue;
}
if (child_rsc->fns->is_filtered(child_rsc, only_rsc, print_everything)) {
continue;
}
if (options & pe_print_clone_details) {
print_full = TRUE;
}
if (pcmk_is_set(rsc->flags, pe_rsc_unique)) {
// Print individual instance when unique (except stopped orphans)
if (partially_active || !pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
print_full = TRUE;
}
// Everything else in this block is for anonymous clones
} else if (pcmk_is_set(options, pe_print_pending)
&& (child_rsc->pending_task != NULL)
&& strcmp(child_rsc->pending_task, "probe")) {
// Print individual instance when non-probe action is pending
print_full = TRUE;
} else if (partially_active == FALSE) {
// List stopped instances when requested (except orphans)
if (!pcmk_is_set(child_rsc->flags, pe_rsc_orphan)
&& !pcmk_is_set(options, pe_print_clone_active)) {
stopped_list = pcmk__add_word(stopped_list, child_rsc->id);
}
} else if (is_set_recursive(child_rsc, pe_rsc_orphan, TRUE)
|| is_set_recursive(child_rsc, pe_rsc_managed, FALSE) == FALSE
|| is_set_recursive(child_rsc, pe_rsc_failed, TRUE)) {
// Print individual instance when active orphaned/unmanaged/failed
print_full = TRUE;
} else if (child_rsc->fns->active(child_rsc, TRUE)) {
// Instance of fully active anonymous clone
pe_node_t *location = child_rsc->fns->location(child_rsc, NULL, TRUE);
if (location) {
// Instance is active on a single node
enum rsc_role_e a_role = child_rsc->fns->state(child_rsc, TRUE);
if (location->details->online == FALSE && location->details->unclean) {
print_full = TRUE;
} else if (a_role > RSC_ROLE_SLAVE) {
master_list = g_list_append(master_list, location);
} else {
started_list = g_list_append(started_list, location);
}
} else {
/* uncolocated group - bleh */
print_full = TRUE;
}
} else {
// Instance of partially active anonymous clone
print_full = TRUE;
}
if (print_full) {
GListPtr all = NULL;
/* Print every resource that's a child of this clone. */
all = g_list_prepend(all, strdup("*"));
out->message(out, crm_map_element_name(child_rsc->xml), options,
child_rsc, only_node, all);
g_list_free_full(all, free);
}
}
if (pcmk_is_set(options, pe_print_clone_details)) {
free(stopped_list);
out->end_list(out);
return pcmk_rc_ok;
}
/* Masters */
master_list = g_list_sort(master_list, sort_node_uname);
for (gIter = master_list; gIter; gIter = gIter->next) {
pe_node_t *host = gIter->data;
if (!pcmk__str_in_list(only_node, host->details->uname)) {
continue;
}
list_text = pcmk__add_word(list_text, host->details->uname);
active_instances++;
}
if (list_text != NULL) {
out->list_item(out, NULL, " Masters: [%s ]", list_text);
g_list_free(master_list);
free(list_text);
list_text = NULL;
}
/* Started/Slaves */
started_list = g_list_sort(started_list, sort_node_uname);
for (gIter = started_list; gIter; gIter = gIter->next) {
pe_node_t *host = gIter->data;
if (!pcmk__str_in_list(only_node, host->details->uname)) {
continue;
}
list_text = pcmk__add_word(list_text, host->details->uname);
active_instances++;
}
if (list_text != NULL) {
if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
enum rsc_role_e role = configured_role(rsc);
if(role == RSC_ROLE_SLAVE) {
out->list_item(out, NULL, " Slaves (target-role): [%s ]", list_text);
} else {
out->list_item(out, NULL, " Slaves: [%s ]", list_text);
}
} else {
out->list_item(out, NULL, " Started: [%s ]", list_text);
}
g_list_free(started_list);
free(list_text);
list_text = NULL;
}
if (!pcmk_is_set(options, pe_print_clone_active)) {
const char *state = "Stopped";
enum rsc_role_e role = configured_role(rsc);
if (role == RSC_ROLE_STOPPED) {
state = "Stopped (disabled)";
}
if (!pcmk_is_set(rsc->flags, pe_rsc_unique)
&& (clone_data->clone_max > active_instances)) {
GListPtr nIter;
GListPtr list = g_hash_table_get_values(rsc->allowed_nodes);
/* Custom stopped list for non-unique clones */
free(stopped_list);
stopped_list = NULL;
if (list == NULL) {
/* Clusters with symmetrical=false haven't calculated allowed_nodes yet
* If we've not probed for them yet, the Stopped list will be empty
*/
list = g_hash_table_get_values(rsc->known_on);
}
list = g_list_sort(list, sort_node_uname);
for (nIter = list; nIter != NULL; nIter = nIter->next) {
pe_node_t *node = (pe_node_t *)nIter->data;
if (pe_find_node(rsc->running_on, node->details->uname) == NULL &&
pcmk__str_in_list(only_node, node->details->uname)) {
stopped_list = pcmk__add_word(stopped_list,
node->details->uname);
}
}
g_list_free(list);
}
if (stopped_list != NULL) {
out->list_item(out, NULL, " %s: [%s ]", state, stopped_list);
free(stopped_list);
}
}
out->end_list(out);
return rc;
}
PCMK__OUTPUT_ARGS("clone", "unsigned int", "pe_resource_t *", "GListPtr", "GListPtr")
int
pe__clone_text(pcmk__output_t *out, va_list args)
{
unsigned int options = va_arg(args, unsigned int);
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
GListPtr only_node = va_arg(args, GListPtr);
GListPtr only_rsc = va_arg(args, GListPtr);
char *list_text = NULL;
char *stopped_list = NULL;
GListPtr master_list = NULL;
GListPtr started_list = NULL;
GListPtr gIter = rsc->children;
clone_variant_data_t *clone_data = NULL;
int active_instances = 0;
int rc = pcmk_rc_no_output;
gboolean print_everything = TRUE;
get_clone_variant_data(clone_data, rsc);
if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
return rc;
}
print_everything = pcmk__str_in_list(only_rsc, rsc_printable_id(rsc)) ||
(strstr(rsc->id, ":") != NULL && pcmk__str_in_list(only_rsc, rsc->id));
- out->begin_list(out, NULL, NULL, "Clone Set: %s [%s]%s%s%s",
+ out->begin_list(out, NULL, NULL, "Clone Set: %s [%s]%s%s%s%s",
rsc->id, ID(clone_data->xml_obj_child),
pcmk_is_set(rsc->flags, pe_rsc_promotable) ? " (promotable)" : "",
pcmk_is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
- pcmk_is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)");
+ pcmk_is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)",
+ pe__resource_is_disabled(rsc) ? " (disabled)" : "");
rc = pcmk_rc_ok;
for (; gIter != NULL; gIter = gIter->next) {
gboolean print_full = FALSE;
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
gboolean partially_active = child_rsc->fns->active(child_rsc, FALSE);
if (pcmk__rsc_filtered_by_node(child_rsc, only_node)) {
continue;
}
if (child_rsc->fns->is_filtered(child_rsc, only_rsc, print_everything)) {
continue;
}
if (options & pe_print_clone_details) {
print_full = TRUE;
}
if (pcmk_is_set(rsc->flags, pe_rsc_unique)) {
// Print individual instance when unique (except stopped orphans)
if (partially_active || !pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
print_full = TRUE;
}
// Everything else in this block is for anonymous clones
} else if (pcmk_is_set(options, pe_print_pending)
&& (child_rsc->pending_task != NULL)
&& strcmp(child_rsc->pending_task, "probe")) {
// Print individual instance when non-probe action is pending
print_full = TRUE;
} else if (partially_active == FALSE) {
// List stopped instances when requested (except orphans)
if (!pcmk_is_set(child_rsc->flags, pe_rsc_orphan)
&& !pcmk_is_set(options, pe_print_clone_active)) {
stopped_list = pcmk__add_word(stopped_list, child_rsc->id);
}
} else if (is_set_recursive(child_rsc, pe_rsc_orphan, TRUE)
|| is_set_recursive(child_rsc, pe_rsc_managed, FALSE) == FALSE
|| is_set_recursive(child_rsc, pe_rsc_failed, TRUE)) {
// Print individual instance when active orphaned/unmanaged/failed
print_full = TRUE;
} else if (child_rsc->fns->active(child_rsc, TRUE)) {
// Instance of fully active anonymous clone
pe_node_t *location = child_rsc->fns->location(child_rsc, NULL, TRUE);
if (location) {
// Instance is active on a single node
enum rsc_role_e a_role = child_rsc->fns->state(child_rsc, TRUE);
if (location->details->online == FALSE && location->details->unclean) {
print_full = TRUE;
} else if (a_role > RSC_ROLE_SLAVE) {
master_list = g_list_append(master_list, location);
} else {
started_list = g_list_append(started_list, location);
}
} else {
/* uncolocated group - bleh */
print_full = TRUE;
}
} else {
// Instance of partially active anonymous clone
print_full = TRUE;
}
if (print_full) {
GListPtr all = NULL;
/* Print every resource that's a child of this clone. */
all = g_list_prepend(all, strdup("*"));
out->message(out, crm_map_element_name(child_rsc->xml), options,
child_rsc, only_node, all);
g_list_free_full(all, free);
}
}
if (pcmk_is_set(options, pe_print_clone_details)) {
free(stopped_list);
out->end_list(out);
return pcmk_rc_ok;
}
/* Masters */
master_list = g_list_sort(master_list, sort_node_uname);
for (gIter = master_list; gIter; gIter = gIter->next) {
pe_node_t *host = gIter->data;
if (!pcmk__str_in_list(only_node, host->details->uname)) {
continue;
}
list_text = pcmk__add_word(list_text, host->details->uname);
active_instances++;
}
if (list_text != NULL) {
out->list_item(out, "Masters", "[%s ]", list_text);
g_list_free(master_list);
free(list_text);
list_text = NULL;
}
/* Started/Slaves */
started_list = g_list_sort(started_list, sort_node_uname);
for (gIter = started_list; gIter; gIter = gIter->next) {
pe_node_t *host = gIter->data;
if (!pcmk__str_in_list(only_node, host->details->uname)) {
continue;
}
list_text = pcmk__add_word(list_text, host->details->uname);
active_instances++;
}
if (list_text != NULL) {
if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
enum rsc_role_e role = configured_role(rsc);
if(role == RSC_ROLE_SLAVE) {
out->list_item(out, "Slaves (target-role)", "[%s ]", list_text);
} else {
out->list_item(out, "Slaves", "[%s ]", list_text);
}
} else {
out->list_item(out, "Started", "[%s ]", list_text);
}
g_list_free(started_list);
free(list_text);
list_text = NULL;
}
if (!pcmk_is_set(options, pe_print_clone_active)) {
const char *state = "Stopped";
enum rsc_role_e role = configured_role(rsc);
if (role == RSC_ROLE_STOPPED) {
state = "Stopped (disabled)";
}
if (!pcmk_is_set(rsc->flags, pe_rsc_unique)
&& (clone_data->clone_max > active_instances)) {
GListPtr nIter;
GListPtr list = g_hash_table_get_values(rsc->allowed_nodes);
/* Custom stopped list for non-unique clones */
free(stopped_list);
stopped_list = NULL;
if (list == NULL) {
/* Clusters with symmetrical=false haven't calculated allowed_nodes yet
* If we've not probed for them yet, the Stopped list will be empty
*/
list = g_hash_table_get_values(rsc->known_on);
}
list = g_list_sort(list, sort_node_uname);
for (nIter = list; nIter != NULL; nIter = nIter->next) {
pe_node_t *node = (pe_node_t *)nIter->data;
if (pe_find_node(rsc->running_on, node->details->uname) == NULL &&
pcmk__str_in_list(only_node, node->details->uname)) {
stopped_list = pcmk__add_word(stopped_list,
node->details->uname);
}
}
g_list_free(list);
}
if (stopped_list != NULL) {
out->list_item(out, state, "[%s ]", stopped_list);
free(stopped_list);
}
}
out->end_list(out);
return rc;
}
void
clone_free(pe_resource_t * rsc)
{
clone_variant_data_t *clone_data = NULL;
get_clone_variant_data(clone_data, rsc);
pe_rsc_trace(rsc, "Freeing %s", rsc->id);
for (GListPtr gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
CRM_ASSERT(child_rsc);
pe_rsc_trace(child_rsc, "Freeing child %s", child_rsc->id);
free_xml(child_rsc->xml);
child_rsc->xml = NULL;
/* There could be a saved unexpanded xml */
free_xml(child_rsc->orig_xml);
child_rsc->orig_xml = NULL;
child_rsc->fns->free(child_rsc);
}
g_list_free(rsc->children);
if (clone_data) {
CRM_ASSERT(clone_data->demote_notify == NULL);
CRM_ASSERT(clone_data->stop_notify == NULL);
CRM_ASSERT(clone_data->start_notify == NULL);
CRM_ASSERT(clone_data->promote_notify == NULL);
}
common_free(rsc);
}
enum rsc_role_e
clone_resource_state(const pe_resource_t * rsc, gboolean current)
{
enum rsc_role_e clone_role = RSC_ROLE_UNKNOWN;
GListPtr gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
enum rsc_role_e a_role = child_rsc->fns->state(child_rsc, current);
if (a_role > clone_role) {
clone_role = a_role;
}
}
pe_rsc_trace(rsc, "%s role: %s", rsc->id, role2text(clone_role));
return clone_role;
}
/*!
* \internal
* \brief Check whether a clone has an instance for every node
*
* \param[in] rsc Clone to check
* \param[in] data_set Cluster state
*/
bool
pe__is_universal_clone(pe_resource_t *rsc,
pe_working_set_t *data_set)
{
if (pe_rsc_is_clone(rsc)) {
clone_variant_data_t *clone_data = NULL;
get_clone_variant_data(clone_data, rsc);
if (clone_data->clone_max == g_list_length(data_set->nodes)) {
return TRUE;
}
}
return FALSE;
}
gboolean
pe__clone_is_filtered(pe_resource_t *rsc, GListPtr only_rsc, gboolean check_parent)
{
gboolean passes = FALSE;
clone_variant_data_t *clone_data = NULL;
if (pcmk__str_in_list(only_rsc, rsc_printable_id(rsc))) {
passes = TRUE;
} else {
get_clone_variant_data(clone_data, rsc);
passes = pcmk__str_in_list(only_rsc, ID(clone_data->xml_obj_child));
if (!passes) {
for (GListPtr gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
if (!child_rsc->fns->is_filtered(child_rsc, only_rsc, FALSE)) {
passes = TRUE;
break;
}
}
}
}
return !passes;
}
diff --git a/lib/pengine/group.c b/lib/pengine/group.c
index 3386139d35..6464123c4a 100644
--- a/lib/pengine/group.c
+++ b/lib/pengine/group.c
@@ -1,392 +1,404 @@
/*
* Copyright 2004-2020 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#define VARIANT_GROUP 1
#include "./variant.h"
gboolean
group_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
{
xmlNode *xml_obj = rsc->xml;
xmlNode *xml_native_rsc = NULL;
group_variant_data_t *group_data = NULL;
const char *group_ordered = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_ORDERED);
const char *group_colocated = g_hash_table_lookup(rsc->meta, "collocated");
const char *clone_id = NULL;
pe_rsc_trace(rsc, "Processing resource %s...", rsc->id);
group_data = calloc(1, sizeof(group_variant_data_t));
group_data->num_children = 0;
group_data->first_child = NULL;
group_data->last_child = NULL;
rsc->variant_opaque = group_data;
// We don't actually need the null checks but it speeds up the common case
if ((group_ordered == NULL)
|| (crm_str_to_boolean(group_ordered, &(group_data->ordered)) < 0)) {
group_data->ordered = TRUE;
}
if ((group_colocated == NULL)
|| (crm_str_to_boolean(group_colocated, &(group_data->colocated)) < 0)) {
group_data->colocated = TRUE;
}
clone_id = crm_element_value(rsc->xml, XML_RSC_ATTR_INCARNATION);
for (xml_native_rsc = __xml_first_child_element(xml_obj); xml_native_rsc != NULL;
xml_native_rsc = __xml_next_element(xml_native_rsc)) {
if (pcmk__str_eq((const char *)xml_native_rsc->name, XML_CIB_TAG_RESOURCE, pcmk__str_none)) {
pe_resource_t *new_rsc = NULL;
crm_xml_add(xml_native_rsc, XML_RSC_ATTR_INCARNATION, clone_id);
if (common_unpack(xml_native_rsc, &new_rsc, rsc, data_set) == FALSE) {
pe_err("Failed unpacking resource %s", crm_element_value(xml_obj, XML_ATTR_ID));
if (new_rsc != NULL && new_rsc->fns != NULL) {
new_rsc->fns->free(new_rsc);
}
continue;
}
group_data->num_children++;
rsc->children = g_list_append(rsc->children, new_rsc);
if (group_data->first_child == NULL) {
group_data->first_child = new_rsc;
}
group_data->last_child = new_rsc;
pe_rsc_trace(rsc, "Added %s member %s", rsc->id, new_rsc->id);
}
}
if (group_data->num_children == 0) {
pcmk__config_warn("Group %s does not have any children", rsc->id);
return TRUE; // Allow empty groups, children can be added later
}
pe_rsc_trace(rsc, "Added %d children to resource %s...", group_data->num_children, rsc->id);
return TRUE;
}
gboolean
group_active(pe_resource_t * rsc, gboolean all)
{
gboolean c_all = TRUE;
gboolean c_any = FALSE;
GListPtr gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
if (child_rsc->fns->active(child_rsc, all)) {
c_any = TRUE;
} else {
c_all = FALSE;
}
}
if (c_any == FALSE) {
return FALSE;
} else if (all && c_all == FALSE) {
return FALSE;
}
return TRUE;
}
static void
group_print_xml(pe_resource_t * rsc, const char *pre_text, long options, void *print_data)
{
GListPtr gIter = rsc->children;
char *child_text = crm_strdup_printf("%s ", pre_text);
status_print("%sid);
status_print("number_resources=\"%d\" ", g_list_length(rsc->children));
status_print(">\n");
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
child_rsc->fns->print(child_rsc, child_text, options, print_data);
}
status_print("%s\n", pre_text);
free(child_text);
}
void
group_print(pe_resource_t * rsc, const char *pre_text, long options, void *print_data)
{
char *child_text = NULL;
GListPtr gIter = rsc->children;
if (pre_text == NULL) {
pre_text = " ";
}
if (options & pe_print_xml) {
group_print_xml(rsc, pre_text, options, print_data);
return;
}
child_text = crm_strdup_printf("%s ", pre_text);
status_print("%sResource Group: %s", pre_text ? pre_text : "", rsc->id);
if (options & pe_print_html) {
status_print("\n\n");
} else if ((options & pe_print_log) == 0) {
status_print("\n");
}
if (options & pe_print_brief) {
print_rscs_brief(rsc->children, child_text, options, print_data, TRUE);
} else {
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
if (options & pe_print_html) {
status_print("- \n");
}
child_rsc->fns->print(child_rsc, child_text, options, print_data);
if (options & pe_print_html) {
status_print("
\n");
}
}
}
if (options & pe_print_html) {
status_print("
\n");
}
free(child_text);
}
PCMK__OUTPUT_ARGS("group", "unsigned int", "pe_resource_t *", "GListPtr", "GListPtr")
int
pe__group_xml(pcmk__output_t *out, va_list args)
{
unsigned int options = va_arg(args, unsigned int);
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
GListPtr only_node = va_arg(args, GListPtr);
GListPtr only_rsc = va_arg(args, GListPtr);
GListPtr gIter = rsc->children;
char *count = crm_itoa(g_list_length(gIter));
int rc = pcmk_rc_no_output;
gboolean print_everything = TRUE;
if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
free(count);
return rc;
}
print_everything = pcmk__str_in_list(only_rsc, rsc_printable_id(rsc)) ||
(strstr(rsc->id, ":") != NULL && pcmk__str_in_list(only_rsc, rsc->id));
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
if (child_rsc->fns->is_filtered(child_rsc, only_rsc, print_everything)) {
continue;
}
if (rc == pcmk_rc_no_output) {
- rc = pe__name_and_nvpairs_xml(out, true, "group", 2
+ rc = pe__name_and_nvpairs_xml(out, true, "group", 4
, "id", rsc->id
- , "number_resources", count);
+ , "number_resources", count
+ , "managed", pe__rsc_bool_str(rsc, pe_rsc_managed)
+ , "disabled", pe__resource_is_disabled(rsc) ? "true" : "false");
free(count);
CRM_ASSERT(rc == pcmk_rc_ok);
}
out->message(out, crm_map_element_name(child_rsc->xml), options, child_rsc,
only_node, only_rsc);
}
if (rc == pcmk_rc_ok) {
pcmk__output_xml_pop_parent(out);
}
return rc;
}
PCMK__OUTPUT_ARGS("group", "unsigned int", "pe_resource_t *", "GListPtr", "GListPtr")
int
pe__group_html(pcmk__output_t *out, va_list args)
{
unsigned int options = va_arg(args, unsigned int);
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
GListPtr only_node = va_arg(args, GListPtr);
GListPtr only_rsc = va_arg(args, GListPtr);
int rc = pcmk_rc_no_output;
gboolean print_everything = TRUE;
if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
return rc;
}
print_everything = pcmk__str_in_list(only_rsc, rsc_printable_id(rsc)) ||
(strstr(rsc->id, ":") != NULL && pcmk__str_in_list(only_rsc, rsc->id));
if (options & pe_print_brief) {
GListPtr rscs = pe__filter_rsc_list(rsc->children, only_rsc);
if (rscs != NULL) {
- out->begin_list(out, NULL, NULL, "Resource Group: %s", rsc->id);
+ out->begin_list(out, NULL, NULL, "Resource Group: %s%s%s", rsc->id,
+ pcmk_is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)",
+ pe__resource_is_disabled(rsc) ? " (disabled)" : "");
+
pe__rscs_brief_output(out, rscs, options, TRUE);
rc = pcmk_rc_ok;
g_list_free(rscs);
}
} else {
for (GListPtr gIter = rsc->children; gIter; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
if (child_rsc->fns->is_filtered(child_rsc, only_rsc, print_everything)) {
continue;
}
- PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Resource Group: %s", rsc->id);
+ PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Resource Group: %s%s%s", rsc->id,
+ pcmk_is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)",
+ pe__resource_is_disabled(rsc) ? " (disabled)" : "");
out->message(out, crm_map_element_name(child_rsc->xml), options,
child_rsc, only_node, only_rsc);
}
}
PCMK__OUTPUT_LIST_FOOTER(out, rc);
return rc;
}
PCMK__OUTPUT_ARGS("group", "unsigned int", "pe_resource_t *", "GListPtr", "GListPtr")
int
pe__group_text(pcmk__output_t *out, va_list args)
{
unsigned int options = va_arg(args, unsigned int);
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
GListPtr only_node = va_arg(args, GListPtr);
GListPtr only_rsc = va_arg(args, GListPtr);
int rc = pcmk_rc_no_output;
gboolean print_everything = TRUE;
if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
return rc;
}
print_everything = pcmk__str_in_list(only_rsc, rsc_printable_id(rsc)) ||
(strstr(rsc->id, ":") != NULL && pcmk__str_in_list(only_rsc, rsc->id));
if (options & pe_print_brief) {
GListPtr rscs = pe__filter_rsc_list(rsc->children, only_rsc);
if (rscs != NULL) {
- out->begin_list(out, NULL, NULL, "Resource Group: %s", rsc->id);
+ out->begin_list(out, NULL, NULL, "Resource Group: %s%s%s", rsc->id,
+ pcmk_is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)",
+ pe__resource_is_disabled(rsc) ? " (disabled)" : "");
+
pe__rscs_brief_output(out, rscs, options, TRUE);
rc = pcmk_rc_ok;
g_list_free(rscs);
}
} else {
for (GListPtr gIter = rsc->children; gIter; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
if (child_rsc->fns->is_filtered(child_rsc, only_rsc, print_everything)) {
continue;
}
- PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Resource Group: %s", rsc->id);
+ PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Resource Group: %s%s%s", rsc->id,
+ pcmk_is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)",
+ pe__resource_is_disabled(rsc) ? " (disabled)" : "");
out->message(out, crm_map_element_name(child_rsc->xml), options,
child_rsc, only_node, only_rsc);
}
}
PCMK__OUTPUT_LIST_FOOTER(out, rc);
return rc;
}
void
group_free(pe_resource_t * rsc)
{
CRM_CHECK(rsc != NULL, return);
pe_rsc_trace(rsc, "Freeing %s", rsc->id);
for (GListPtr gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
CRM_ASSERT(child_rsc);
pe_rsc_trace(child_rsc, "Freeing child %s", child_rsc->id);
child_rsc->fns->free(child_rsc);
}
pe_rsc_trace(rsc, "Freeing child list");
g_list_free(rsc->children);
common_free(rsc);
}
enum rsc_role_e
group_resource_state(const pe_resource_t * rsc, gboolean current)
{
enum rsc_role_e group_role = RSC_ROLE_UNKNOWN;
GListPtr gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
enum rsc_role_e role = child_rsc->fns->state(child_rsc, current);
if (role > group_role) {
group_role = role;
}
}
pe_rsc_trace(rsc, "%s role: %s", rsc->id, role2text(group_role));
return group_role;
}
gboolean
pe__group_is_filtered(pe_resource_t *rsc, GListPtr only_rsc, gboolean check_parent)
{
gboolean passes = FALSE;
if (check_parent && pcmk__str_in_list(only_rsc, rsc_printable_id(uber_parent(rsc)))) {
passes = TRUE;
} else if (pcmk__str_in_list(only_rsc, rsc_printable_id(rsc))) {
passes = TRUE;
} else if (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(only_rsc, rsc->id)) {
passes = TRUE;
} else {
for (GListPtr gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
if (!child_rsc->fns->is_filtered(child_rsc, only_rsc, FALSE)) {
passes = TRUE;
break;
}
}
}
return !passes;
}
diff --git a/lib/pengine/pe_output.c b/lib/pengine/pe_output.c
index fe4356b493..9094034c1c 100644
--- a/lib/pengine/pe_output.c
+++ b/lib/pengine/pe_output.c
@@ -1,1906 +1,1923 @@
/*
* Copyright 2019-2020 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
static char *
failed_action_string(xmlNodePtr xml_op) {
const char *op_key = crm_element_value(xml_op, XML_LRM_ATTR_TASK_KEY);
int rc = crm_parse_int(crm_element_value(xml_op, XML_LRM_ATTR_RC), "0");
int status = crm_parse_int(crm_element_value(xml_op, XML_LRM_ATTR_OPSTATUS), "0");
const char *exit_reason = crm_element_value(xml_op, XML_LRM_ATTR_EXIT_REASON);
time_t last_change = 0;
if (crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE,
&last_change) == pcmk_ok) {
crm_time_t *crm_when = crm_time_new(NULL);
char *time_s = NULL;
char *buf = NULL;
crm_time_set_timet(crm_when, &last_change);
time_s = crm_time_as_string(crm_when, crm_time_log_date | crm_time_log_timeofday | crm_time_log_with_timezone);
buf = crm_strdup_printf("%s on %s '%s' (%d): call=%s, status='%s', "
"exitreason='%s', " XML_RSC_OP_LAST_CHANGE
"='%s', queued=%sms, exec=%sms",
op_key ? op_key : ID(xml_op),
crm_element_value(xml_op, XML_ATTR_UNAME),
services_ocf_exitcode_str(rc), rc,
crm_element_value(xml_op, XML_LRM_ATTR_CALLID),
services_lrm_status_str(status),
exit_reason ? exit_reason : "none",
time_s,
crm_element_value(xml_op, XML_RSC_OP_T_QUEUE),
crm_element_value(xml_op, XML_RSC_OP_T_EXEC));
crm_time_free(crm_when);
free(time_s);
return buf;
} else {
return crm_strdup_printf("%s on %s '%s' (%d): call=%s, status=%s, exitreason='%s'",
op_key ? op_key : ID(xml_op),
crm_element_value(xml_op, XML_ATTR_UNAME),
services_ocf_exitcode_str(rc), rc,
crm_element_value(xml_op, XML_LRM_ATTR_CALLID),
services_lrm_status_str(status),
exit_reason ? exit_reason : "none");
}
}
static const char *
get_cluster_stack(pe_working_set_t *data_set)
{
xmlNode *stack = get_xpath_object("//nvpair[@name='cluster-infrastructure']",
data_set->input, LOG_DEBUG);
return stack? crm_element_value(stack, XML_NVPAIR_ATTR_VALUE) : "unknown";
}
static char *
last_changed_string(const char *last_written, const char *user,
const char *client, const char *origin) {
if (last_written != NULL || user != NULL || client != NULL || origin != NULL) {
return crm_strdup_printf("%s%s%s%s%s%s%s",
last_written ? last_written : "",
user ? " by " : "",
user ? user : "",
client ? " via " : "",
client ? client : "",
origin ? " on " : "",
origin ? origin : "");
} else {
return strdup("");
}
}
static char *
op_history_string(xmlNode *xml_op, const char *task, const char *interval_ms_s,
int rc, gboolean print_timing) {
const char *call = crm_element_value(xml_op, XML_LRM_ATTR_CALLID);
char *interval_str = NULL;
char *buf = NULL;
if (interval_ms_s && !pcmk__str_eq(interval_ms_s, "0", pcmk__str_casei)) {
char *pair = pcmk_format_nvpair("interval", interval_ms_s, "ms");
interval_str = crm_strdup_printf(" %s", pair);
free(pair);
}
if (print_timing) {
char *last_change_str = NULL;
char *last_run_str = NULL;
char *exec_str = NULL;
char *queue_str = NULL;
const char *value = NULL;
time_t epoch = 0;
if ((crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE, &epoch) == pcmk_ok)
&& (epoch > 0)) {
char *time = pcmk_format_named_time(XML_RSC_OP_LAST_CHANGE, epoch);
last_change_str = crm_strdup_printf(" %s", time);
free(time);
}
if ((crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_RUN, &epoch) == pcmk_ok)
&& (epoch > 0)) {
char *time = pcmk_format_named_time(XML_RSC_OP_LAST_RUN, epoch);
last_run_str = crm_strdup_printf(" %s", time);
free(time);
}
value = crm_element_value(xml_op, XML_RSC_OP_T_EXEC);
if (value) {
char *pair = pcmk_format_nvpair(XML_RSC_OP_T_EXEC, value, "ms");
exec_str = crm_strdup_printf(" %s", pair);
free(pair);
}
value = crm_element_value(xml_op, XML_RSC_OP_T_QUEUE);
if (value) {
char *pair = pcmk_format_nvpair(XML_RSC_OP_T_QUEUE, value, "ms");
queue_str = crm_strdup_printf(" %s", pair);
free(pair);
}
buf = crm_strdup_printf("(%s) %s:%s%s%s%s%s rc=%d (%s)", call, task,
interval_str ? interval_str : "",
last_change_str ? last_change_str : "",
last_run_str ? last_run_str : "",
exec_str ? exec_str : "",
queue_str ? queue_str : "",
rc, services_ocf_exitcode_str(rc));
if (last_change_str) {
free(last_change_str);
}
if (last_run_str) {
free(last_run_str);
}
if (exec_str) {
free(exec_str);
}
if (queue_str) {
free(queue_str);
}
} else {
buf = crm_strdup_printf("(%s) %s%s%s", call, task,
interval_str ? ":" : "",
interval_str ? interval_str : "");
}
if (interval_str) {
free(interval_str);
}
return buf;
}
static char *
resource_history_string(pe_resource_t *rsc, const char *rsc_id, gboolean all,
int failcount, time_t last_failure) {
char *buf = NULL;
if (rsc == NULL) {
buf = crm_strdup_printf("%s: orphan", rsc_id);
} else if (all || failcount || last_failure > 0) {
char *failcount_s = NULL;
char *lastfail_s = NULL;
if (failcount > 0) {
failcount_s = crm_strdup_printf(" %s=%d", PCMK__FAIL_COUNT_PREFIX,
failcount);
} else {
failcount_s = strdup("");
}
if (last_failure > 0) {
lastfail_s = crm_strdup_printf(" %s='%s'",
PCMK__LAST_FAILURE_PREFIX,
pcmk__epoch2str(&last_failure));
}
buf = crm_strdup_printf("%s: migration-threshold=%d%s%s",
rsc_id, rsc->migration_threshold, failcount_s,
lastfail_s? lastfail_s : "");
free(failcount_s);
free(lastfail_s);
} else {
buf = crm_strdup_printf("%s:", rsc_id);
}
return buf;
}
PCMK__OUTPUT_ARGS("cluster-summary", "pe_working_set_t *", "gboolean", "gboolean", "gboolean",
"gboolean", "gboolean", "gboolean")
int
pe__cluster_summary(pcmk__output_t *out, va_list args) {
pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
gboolean print_clone_detail = va_arg(args, gboolean);
gboolean show_stack = va_arg(args, gboolean);
gboolean show_dc = va_arg(args, gboolean);
gboolean show_times = va_arg(args, gboolean);
gboolean show_counts = va_arg(args, gboolean);
gboolean show_options = va_arg(args, gboolean);
int rc = pcmk_rc_no_output;
const char *stack_s = get_cluster_stack(data_set);
if (show_stack) {
PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Cluster Summary");
out->message(out, "cluster-stack", stack_s);
}
/* Always print DC if none, even if not requested */
if (data_set->dc_node == NULL || show_dc) {
xmlNode *dc_version = get_xpath_object("//nvpair[@name='dc-version']",
data_set->input, LOG_DEBUG);
const char *dc_version_s = dc_version?
crm_element_value(dc_version, XML_NVPAIR_ATTR_VALUE)
: NULL;
const char *quorum = crm_element_value(data_set->input, XML_ATTR_HAVE_QUORUM);
char *dc_name = data_set->dc_node ? pe__node_display_name(data_set->dc_node, print_clone_detail) : NULL;
PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Cluster Summary");
out->message(out, "cluster-dc", data_set->dc_node, quorum, dc_version_s, dc_name);
free(dc_name);
}
if (show_times) {
const char *last_written = crm_element_value(data_set->input, XML_CIB_ATTR_WRITTEN);
const char *user = crm_element_value(data_set->input, XML_ATTR_UPDATE_USER);
const char *client = crm_element_value(data_set->input, XML_ATTR_UPDATE_CLIENT);
const char *origin = crm_element_value(data_set->input, XML_ATTR_UPDATE_ORIG);
PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Cluster Summary");
out->message(out, "cluster-times", last_written, user, client, origin);
}
if (show_counts) {
PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Cluster Summary");
out->message(out, "cluster-counts", g_list_length(data_set->nodes),
data_set->ninstances, data_set->disabled_resources,
data_set->blocked_resources);
}
if (show_options) {
PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Cluster Summary");
out->message(out, "cluster-options", data_set);
}
PCMK__OUTPUT_LIST_FOOTER(out, rc);
- if (pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)) {
- out->message(out, "maint-mode");
+ if (out->message(out, "maint-mode", data_set->flags) == pcmk_rc_ok) {
rc = pcmk_rc_ok;
}
return rc;
}
PCMK__OUTPUT_ARGS("cluster-summary", "pe_working_set_t *", "gboolean", "gboolean", "gboolean",
"gboolean", "gboolean", "gboolean")
int
pe__cluster_summary_html(pcmk__output_t *out, va_list args) {
pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
gboolean print_clone_detail = va_arg(args, gboolean);
gboolean show_stack = va_arg(args, gboolean);
gboolean show_dc = va_arg(args, gboolean);
gboolean show_times = va_arg(args, gboolean);
gboolean show_counts = va_arg(args, gboolean);
gboolean show_options = va_arg(args, gboolean);
int rc = pcmk_rc_no_output;
const char *stack_s = get_cluster_stack(data_set);
if (show_stack) {
PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Cluster Summary");
out->message(out, "cluster-stack", stack_s);
}
/* Always print DC if none, even if not requested */
if (data_set->dc_node == NULL || show_dc) {
xmlNode *dc_version = get_xpath_object("//nvpair[@name='dc-version']",
data_set->input, LOG_DEBUG);
const char *dc_version_s = dc_version?
crm_element_value(dc_version, XML_NVPAIR_ATTR_VALUE)
: NULL;
const char *quorum = crm_element_value(data_set->input, XML_ATTR_HAVE_QUORUM);
char *dc_name = data_set->dc_node ? pe__node_display_name(data_set->dc_node, print_clone_detail) : NULL;
PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Cluster Summary");
out->message(out, "cluster-dc", data_set->dc_node, quorum, dc_version_s, dc_name);
free(dc_name);
}
if (show_times) {
const char *last_written = crm_element_value(data_set->input, XML_CIB_ATTR_WRITTEN);
const char *user = crm_element_value(data_set->input, XML_ATTR_UPDATE_USER);
const char *client = crm_element_value(data_set->input, XML_ATTR_UPDATE_CLIENT);
const char *origin = crm_element_value(data_set->input, XML_ATTR_UPDATE_ORIG);
PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Cluster Summary");
out->message(out, "cluster-times", last_written, user, client, origin);
}
if (show_counts) {
PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Cluster Summary");
out->message(out, "cluster-counts", g_list_length(data_set->nodes),
data_set->ninstances, data_set->disabled_resources,
data_set->blocked_resources);
}
if (show_options) {
/* Kind of a hack - close the list we may have opened earlier in this
* function so we can put all the options into their own list. We
* only want to do this on HTML output, though.
*/
PCMK__OUTPUT_LIST_FOOTER(out, rc);
out->begin_list(out, NULL, NULL, "Config Options");
out->message(out, "cluster-options", data_set);
}
PCMK__OUTPUT_LIST_FOOTER(out, rc);
- if (pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)) {
- out->message(out, "maint-mode");
+ if (out->message(out, "maint-mode", data_set->flags) == pcmk_rc_ok) {
rc = pcmk_rc_ok;
}
return rc;
}
char *
pe__node_display_name(pe_node_t *node, bool print_detail)
{
char *node_name;
const char *node_host = NULL;
const char *node_id = NULL;
int name_len;
CRM_ASSERT((node != NULL) && (node->details != NULL) && (node->details->uname != NULL));
/* Host is displayed only if this is a guest node */
if (pe__is_guest_node(node)) {
pe_node_t *host_node = pe__current_node(node->details->remote_rsc);
if (host_node && host_node->details) {
node_host = host_node->details->uname;
}
if (node_host == NULL) {
node_host = ""; /* so we at least get "uname@" to indicate guest */
}
}
/* Node ID is displayed if different from uname and detail is requested */
if (print_detail && !pcmk__str_eq(node->details->uname, node->details->id, pcmk__str_casei)) {
node_id = node->details->id;
}
/* Determine name length */
name_len = strlen(node->details->uname) + 1;
if (node_host) {
name_len += strlen(node_host) + 1; /* "@node_host" */
}
if (node_id) {
name_len += strlen(node_id) + 3; /* + " (node_id)" */
}
/* Allocate and populate display name */
node_name = malloc(name_len);
CRM_ASSERT(node_name != NULL);
strcpy(node_name, node->details->uname);
if (node_host) {
strcat(node_name, "@");
strcat(node_name, node_host);
}
if (node_id) {
strcat(node_name, " (");
strcat(node_name, node_id);
strcat(node_name, ")");
}
return node_name;
}
int
pe__name_and_nvpairs_xml(pcmk__output_t *out, bool is_list, const char *tag_name
, size_t pairs_count, ...)
{
xmlNodePtr xml_node = NULL;
va_list args;
CRM_ASSERT(tag_name != NULL);
xml_node = pcmk__output_xml_peek_parent(out);
CRM_ASSERT(xml_node != NULL);
xml_node = is_list
? create_xml_node(xml_node, tag_name)
: xmlNewChild(xml_node, NULL, (pcmkXmlStr) tag_name, NULL);
va_start(args, pairs_count);
while(pairs_count--) {
const char *param_name = va_arg(args, const char *);
const char *param_value = va_arg(args, const char *);
if (param_name && param_value) {
xmlSetProp(xml_node, (pcmkXmlStr)param_name, (pcmkXmlStr)param_value);
}
};
va_end(args);
if (is_list) {
pcmk__output_xml_push_parent(out, xml_node);
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("ban", "pe_node_t *", "pe__location_t *", "gboolean")
int
pe__ban_html(pcmk__output_t *out, va_list args) {
pe_node_t *pe_node = va_arg(args, pe_node_t *);
pe__location_t *location = va_arg(args, pe__location_t *);
gboolean print_clone_detail = va_arg(args, gboolean);
char *node_name = pe__node_display_name(pe_node, print_clone_detail);
char *buf = crm_strdup_printf("%s\tprevents %s from running %son %s",
location->id, location->rsc_lh->id,
location->role_filter == RSC_ROLE_MASTER ? "as Master " : "",
node_name);
pcmk__output_create_html_node(out, "li", NULL, NULL, buf);
free(node_name);
free(buf);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("ban", "pe_node_t *", "pe__location_t *", "gboolean")
int
pe__ban_text(pcmk__output_t *out, va_list args) {
pe_node_t *pe_node = va_arg(args, pe_node_t *);
pe__location_t *location = va_arg(args, pe__location_t *);
gboolean print_clone_detail = va_arg(args, gboolean);
char *node_name = pe__node_display_name(pe_node, print_clone_detail);
out->list_item(out, NULL, "%s\tprevents %s from running %son %s",
location->id, location->rsc_lh->id,
location->role_filter == RSC_ROLE_MASTER ? "as Master " : "",
node_name);
free(node_name);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("ban", "pe_node_t *", "pe__location_t *", "gboolean")
int
pe__ban_xml(pcmk__output_t *out, va_list args) {
xmlNodePtr node = pcmk__output_create_xml_node(out, "ban");
pe_node_t *pe_node = va_arg(args, pe_node_t *);
pe__location_t *location = va_arg(args, pe__location_t *);
gboolean print_clone_detail G_GNUC_UNUSED = va_arg(args, gboolean);
char *weight_s = crm_itoa(pe_node->weight);
xmlSetProp(node, (pcmkXmlStr) "id", (pcmkXmlStr) location->id);
xmlSetProp(node, (pcmkXmlStr) "resource", (pcmkXmlStr) location->rsc_lh->id);
xmlSetProp(node, (pcmkXmlStr) "node", (pcmkXmlStr) pe_node->details->uname);
xmlSetProp(node, (pcmkXmlStr) "weight", (pcmkXmlStr) weight_s);
xmlSetProp(node, (pcmkXmlStr) "master_only",
(pcmkXmlStr) pcmk__btoa(location->role_filter == RSC_ROLE_MASTER));
free(weight_s);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("cluster-counts", "unsigned int", "int", "int", "int")
int
pe__cluster_counts_html(pcmk__output_t *out, va_list args) {
xmlNodePtr nodes_node = pcmk__output_create_xml_node(out, "li");
xmlNodePtr resources_node = pcmk__output_create_xml_node(out, "li");
unsigned int nnodes = va_arg(args, unsigned int);
int nresources = va_arg(args, int);
int ndisabled = va_arg(args, int);
int nblocked = va_arg(args, int);
char *nnodes_str = crm_strdup_printf("%d node%s configured",
nnodes, pcmk__plural_s(nnodes));
pcmk_create_html_node(nodes_node, "span", NULL, NULL, nnodes_str);
free(nnodes_str);
if (ndisabled && nblocked) {
char *s = crm_strdup_printf("%d resource instance%s configured (%d ",
nresources, pcmk__plural_s(nresources),
ndisabled);
pcmk_create_html_node(resources_node, "span", NULL, NULL, s);
free(s);
pcmk_create_html_node(resources_node, "span", NULL, "bold", "DISABLED");
s = crm_strdup_printf(", %d ", nblocked);
pcmk_create_html_node(resources_node, "span", NULL, NULL, s);
free(s);
pcmk_create_html_node(resources_node, "span", NULL, "bold", "BLOCKED");
pcmk_create_html_node(resources_node, "span", NULL, NULL,
" from further action due to failure)");
} else if (ndisabled && !nblocked) {
char *s = crm_strdup_printf("%d resource instance%s configured (%d ",
nresources, pcmk__plural_s(nresources),
ndisabled);
pcmk_create_html_node(resources_node, "span", NULL, NULL, s);
free(s);
pcmk_create_html_node(resources_node, "span", NULL, "bold", "DISABLED");
pcmk_create_html_node(resources_node, "span", NULL, NULL, ")");
} else if (!ndisabled && nblocked) {
char *s = crm_strdup_printf("%d resource instance%s configured (%d ",
nresources, pcmk__plural_s(nresources),
nblocked);
pcmk_create_html_node(resources_node, "span", NULL, NULL, s);
free(s);
pcmk_create_html_node(resources_node, "span", NULL, "bold", "BLOCKED");
pcmk_create_html_node(resources_node, "span", NULL, NULL,
" from further action due to failure)");
} else {
char *s = crm_strdup_printf("%d resource instance%s configured",
nresources, pcmk__plural_s(nresources));
pcmk_create_html_node(resources_node, "span", NULL, NULL, s);
free(s);
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("cluster-counts", "unsigned int", "int", "int", "int")
int
pe__cluster_counts_text(pcmk__output_t *out, va_list args) {
unsigned int nnodes = va_arg(args, unsigned int);
int nresources = va_arg(args, int);
int ndisabled = va_arg(args, int);
int nblocked = va_arg(args, int);
out->list_item(out, NULL, "%d node%s configured",
nnodes, pcmk__plural_s(nnodes));
if (ndisabled && nblocked) {
out->list_item(out, NULL, "%d resource instance%s configured "
"(%d DISABLED, %d BLOCKED from "
"further action due to failure)",
nresources, pcmk__plural_s(nresources), ndisabled,
nblocked);
} else if (ndisabled && !nblocked) {
out->list_item(out, NULL, "%d resource instance%s configured "
"(%d DISABLED)",
nresources, pcmk__plural_s(nresources), ndisabled);
} else if (!ndisabled && nblocked) {
out->list_item(out, NULL, "%d resource instance%s configured "
"(%d BLOCKED from further action "
"due to failure)",
nresources, pcmk__plural_s(nresources), nblocked);
} else {
out->list_item(out, NULL, "%d resource instance%s configured",
nresources, pcmk__plural_s(nresources));
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("cluster-counts", "unsigned int", "int", "int", "int")
int
pe__cluster_counts_xml(pcmk__output_t *out, va_list args) {
xmlNodePtr nodes_node = pcmk__output_create_xml_node(out, "nodes_configured");
xmlNodePtr resources_node = pcmk__output_create_xml_node(out, "resources_configured");
unsigned int nnodes = va_arg(args, unsigned int);
int nresources = va_arg(args, int);
int ndisabled = va_arg(args, int);
int nblocked = va_arg(args, int);
char *s = crm_itoa(nnodes);
xmlSetProp(nodes_node, (pcmkXmlStr) "number", (pcmkXmlStr) s);
free(s);
s = crm_itoa(nresources);
xmlSetProp(resources_node, (pcmkXmlStr) "number", (pcmkXmlStr) s);
free(s);
s = crm_itoa(ndisabled);
xmlSetProp(resources_node, (pcmkXmlStr) "disabled", (pcmkXmlStr) s);
free(s);
s = crm_itoa(nblocked);
xmlSetProp(resources_node, (pcmkXmlStr) "blocked", (pcmkXmlStr) s);
free(s);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("cluster-dc", "pe_node_t *", "const char *", "const char *", "char *")
int
pe__cluster_dc_html(pcmk__output_t *out, va_list args) {
xmlNodePtr node = pcmk__output_create_xml_node(out, "li");
pe_node_t *dc = va_arg(args, pe_node_t *);
const char *quorum = va_arg(args, const char *);
const char *dc_version_s = va_arg(args, const char *);
char *dc_name = va_arg(args, char *);
pcmk_create_html_node(node, "span", NULL, "bold", "Current DC: ");
if (dc) {
if (crm_is_true(quorum)) {
char *buf = crm_strdup_printf("%s (version %s) - partition with quorum",
dc_name, dc_version_s ? dc_version_s : "unknown");
pcmk_create_html_node(node, "span", NULL, NULL, buf);
free(buf);
} else {
char *buf = crm_strdup_printf("%s (version %s) - partition",
dc_name, dc_version_s ? dc_version_s : "unknown");
pcmk_create_html_node(node, "span", NULL, NULL, buf);
free(buf);
pcmk_create_html_node(node, "span", NULL, "warning", "WITHOUT");
pcmk_create_html_node(node, "span", NULL, NULL, "quorum");
}
} else {
pcmk_create_html_node(node ,"span", NULL, "warning", "NONE");
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("cluster-dc", "pe_node_t *", "const char *", "const char *", "char *")
int
pe__cluster_dc_text(pcmk__output_t *out, va_list args) {
pe_node_t *dc = va_arg(args, pe_node_t *);
const char *quorum = va_arg(args, const char *);
const char *dc_version_s = va_arg(args, const char *);
char *dc_name = va_arg(args, char *);
if (dc) {
out->list_item(out, "Current DC", "%s (version %s) - partition %s quorum",
dc_name, dc_version_s ? dc_version_s : "unknown",
crm_is_true(quorum) ? "with" : "WITHOUT");
} else {
out->list_item(out, "Current DC", "NONE");
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("cluster-dc", "pe_node_t *", "const char *", "const char *", "char *")
int
pe__cluster_dc_xml(pcmk__output_t *out, va_list args) {
xmlNodePtr node = pcmk__output_create_xml_node(out, "current_dc");
pe_node_t *dc = va_arg(args, pe_node_t *);
const char *quorum = va_arg(args, const char *);
const char *dc_version_s = va_arg(args, const char *);
char *dc_name G_GNUC_UNUSED = va_arg(args, char *);
if (dc) {
xmlSetProp(node, (pcmkXmlStr) "present", (pcmkXmlStr) "true");
xmlSetProp(node, (pcmkXmlStr) "version", (pcmkXmlStr) (dc_version_s ? dc_version_s : ""));
xmlSetProp(node, (pcmkXmlStr) "name", (pcmkXmlStr) dc->details->uname);
xmlSetProp(node, (pcmkXmlStr) "id", (pcmkXmlStr) dc->details->id);
xmlSetProp(node, (pcmkXmlStr) "with_quorum",
(pcmkXmlStr) pcmk__btoa(crm_is_true(quorum)));
} else {
xmlSetProp(node, (pcmkXmlStr) "present", (pcmkXmlStr) "false");
}
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("maint-mode")
+PCMK__OUTPUT_ARGS("maint-mode", "unsigned long long")
int
pe__cluster_maint_mode_text(pcmk__output_t *out, va_list args) {
- fprintf(out->dest, "\n *** Resource management is DISABLED ***");
- fprintf(out->dest, "\n The cluster will not attempt to start, stop or recover services");
- fprintf(out->dest, "\n");
- return pcmk_rc_ok;
+ unsigned long long flags = va_arg(args, unsigned long long);
+
+ if (pcmk_is_set(flags, pe_flag_maintenance_mode)) {
+ fprintf(out->dest, "\n *** Resource management is DISABLED ***");
+ fprintf(out->dest, "\n The cluster will not attempt to start, stop or recover services");
+ fprintf(out->dest, "\n");
+ return pcmk_rc_ok;
+ } else if (pcmk_is_set(flags, pe_flag_stop_everything)) {
+ fprintf(out->dest, "\n *** Resource management is DISABLED ***");
+ fprintf(out->dest, "\n The cluster will keep all resources stopped");
+ fprintf(out->dest, "\n");
+ return pcmk_rc_ok;
+ } else {
+ return pcmk_rc_no_output;
+ }
}
PCMK__OUTPUT_ARGS("cluster-options", "pe_working_set_t *")
int
pe__cluster_options_html(pcmk__output_t *out, va_list args) {
pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
out->list_item(out, NULL, "STONITH of failed nodes %s",
pcmk_is_set(data_set->flags, pe_flag_stonith_enabled) ? "enabled" : "disabled");
out->list_item(out, NULL, "Cluster is %s",
pcmk_is_set(data_set->flags, pe_flag_symmetric_cluster) ? "symmetric" : "asymmetric");
switch (data_set->no_quorum_policy) {
case no_quorum_freeze:
out->list_item(out, NULL, "No quorum policy: Freeze resources");
break;
case no_quorum_stop:
out->list_item(out, NULL, "No quorum policy: Stop ALL resources");
break;
case no_quorum_demote:
out->list_item(out, NULL, "No quorum policy: Demote promotable "
"resources and stop all other resources");
break;
case no_quorum_ignore:
out->list_item(out, NULL, "No quorum policy: Ignore");
break;
case no_quorum_suicide:
out->list_item(out, NULL, "No quorum policy: Suicide");
break;
}
if (pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)) {
xmlNodePtr node = pcmk__output_create_xml_node(out, "li");
pcmk_create_html_node(node, "span", NULL, NULL, "Resource management: ");
pcmk_create_html_node(node, "span", NULL, "bold", "DISABLED");
pcmk_create_html_node(node, "span", NULL, NULL,
" (the cluster will not attempt to start, stop, or recover services)");
+ } else if (pcmk_is_set(data_set->flags, pe_flag_stop_everything)) {
+ xmlNodePtr node = pcmk__output_create_xml_node(out, "li");
+
+ pcmk_create_html_node(node, "span", NULL, NULL, "Resource management: ");
+ pcmk_create_html_node(node, "span", NULL, "bold", "STOPPED");
+ pcmk_create_html_node(node, "span", NULL, NULL,
+ " (the cluster will keep all resources stopped)");
} else {
out->list_item(out, NULL, "Resource management: enabled");
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("cluster-options", "pe_working_set_t *")
int
pe__cluster_options_log(pcmk__output_t *out, va_list args) {
pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
if (pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)) {
out->info(out, "Resource management is DISABLED. The cluster will not attempt to start, stop or recover services.");
return pcmk_rc_ok;
+ } else if (pcmk_is_set(data_set->flags, pe_flag_stop_everything)) {
+ out->info(out, "Resource management is DISABLED. The cluster has stopped all resources.");
+ return pcmk_rc_ok;
} else {
return pcmk_rc_no_output;
}
}
PCMK__OUTPUT_ARGS("cluster-options", "pe_working_set_t *")
int
pe__cluster_options_text(pcmk__output_t *out, va_list args) {
pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
out->list_item(out, NULL, "STONITH of failed nodes %s",
pcmk_is_set(data_set->flags, pe_flag_stonith_enabled) ? "enabled" : "disabled");
out->list_item(out, NULL, "Cluster is %s",
pcmk_is_set(data_set->flags, pe_flag_symmetric_cluster) ? "symmetric" : "asymmetric");
switch (data_set->no_quorum_policy) {
case no_quorum_freeze:
out->list_item(out, NULL, "No quorum policy: Freeze resources");
break;
case no_quorum_stop:
out->list_item(out, NULL, "No quorum policy: Stop ALL resources");
break;
case no_quorum_demote:
out->list_item(out, NULL, "No quorum policy: Demote promotable "
"resources and stop all other resources");
break;
case no_quorum_ignore:
out->list_item(out, NULL, "No quorum policy: Ignore");
break;
case no_quorum_suicide:
out->list_item(out, NULL, "No quorum policy: Suicide");
break;
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("cluster-options", "pe_working_set_t *")
int
pe__cluster_options_xml(pcmk__output_t *out, va_list args) {
xmlNodePtr node = pcmk__output_create_xml_node(out, "cluster_options");
pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
xmlSetProp(node, (pcmkXmlStr) "stonith-enabled",
(pcmkXmlStr) pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)));
xmlSetProp(node, (pcmkXmlStr) "symmetric-cluster",
(pcmkXmlStr) pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_symmetric_cluster)));
switch (data_set->no_quorum_policy) {
case no_quorum_freeze:
xmlSetProp(node, (pcmkXmlStr) "no-quorum-policy", (pcmkXmlStr) "freeze");
break;
case no_quorum_stop:
xmlSetProp(node, (pcmkXmlStr) "no-quorum-policy", (pcmkXmlStr) "stop");
break;
case no_quorum_demote:
xmlSetProp(node, (pcmkXmlStr) "no-quorum-policy", (pcmkXmlStr) "demote");
break;
case no_quorum_ignore:
xmlSetProp(node, (pcmkXmlStr) "no-quorum-policy", (pcmkXmlStr) "ignore");
break;
case no_quorum_suicide:
xmlSetProp(node, (pcmkXmlStr) "no-quorum-policy", (pcmkXmlStr) "suicide");
break;
}
xmlSetProp(node, (pcmkXmlStr) "maintenance-mode",
(pcmkXmlStr) pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)));
+ xmlSetProp(node, (pcmkXmlStr) "stop-all-resources",
+ (pcmkXmlStr) pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_stop_everything)));
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("cluster-stack", "const char *")
int
pe__cluster_stack_html(pcmk__output_t *out, va_list args) {
xmlNodePtr node = pcmk__output_create_xml_node(out, "li");
const char *stack_s = va_arg(args, const char *);
pcmk_create_html_node(node, "span", NULL, "bold", "Stack: ");
pcmk_create_html_node(node, "span", NULL, NULL, stack_s);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("cluster-stack", "const char *")
int
pe__cluster_stack_text(pcmk__output_t *out, va_list args) {
const char *stack_s = va_arg(args, const char *);
out->list_item(out, "Stack", "%s", stack_s);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("cluster-stack", "const char *")
int
pe__cluster_stack_xml(pcmk__output_t *out, va_list args) {
xmlNodePtr node = pcmk__output_create_xml_node(out, "stack");
const char *stack_s = va_arg(args, const char *);
xmlSetProp(node, (pcmkXmlStr) "type", (pcmkXmlStr) stack_s);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("cluster-times", "const char *", "const char *", "const char *", "const char *")
int
pe__cluster_times_html(pcmk__output_t *out, va_list args) {
xmlNodePtr updated_node = pcmk__output_create_xml_node(out, "li");
xmlNodePtr changed_node = pcmk__output_create_xml_node(out, "li");
const char *last_written = va_arg(args, const char *);
const char *user = va_arg(args, const char *);
const char *client = va_arg(args, const char *);
const char *origin = va_arg(args, const char *);
char *buf = last_changed_string(last_written, user, client, origin);
pcmk_create_html_node(updated_node, "span", NULL, "bold", "Last updated: ");
pcmk_create_html_node(updated_node, "span", NULL, NULL,
pcmk__epoch2str(NULL));
pcmk_create_html_node(changed_node, "span", NULL, "bold", "Last change: ");
pcmk_create_html_node(changed_node, "span", NULL, NULL, buf);
free(buf);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("cluster-times", "const char *", "const char *", "const char *", "const char *")
int
pe__cluster_times_xml(pcmk__output_t *out, va_list args) {
xmlNodePtr updated_node = pcmk__output_create_xml_node(out, "last_update");
xmlNodePtr changed_node = pcmk__output_create_xml_node(out, "last_change");
const char *last_written = va_arg(args, const char *);
const char *user = va_arg(args, const char *);
const char *client = va_arg(args, const char *);
const char *origin = va_arg(args, const char *);
xmlSetProp(updated_node, (pcmkXmlStr) "time",
(pcmkXmlStr) pcmk__epoch2str(NULL));
xmlSetProp(changed_node, (pcmkXmlStr) "time", (pcmkXmlStr) (last_written ? last_written : ""));
xmlSetProp(changed_node, (pcmkXmlStr) "user", (pcmkXmlStr) (user ? user : ""));
xmlSetProp(changed_node, (pcmkXmlStr) "client", (pcmkXmlStr) (client ? client : ""));
xmlSetProp(changed_node, (pcmkXmlStr) "origin", (pcmkXmlStr) (origin ? origin : ""));
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("cluster-times", "const char *", "const char *", "const char *", "const char *")
int
pe__cluster_times_text(pcmk__output_t *out, va_list args) {
const char *last_written = va_arg(args, const char *);
const char *user = va_arg(args, const char *);
const char *client = va_arg(args, const char *);
const char *origin = va_arg(args, const char *);
char *buf = last_changed_string(last_written, user, client, origin);
out->list_item(out, "Last updated", "%s", pcmk__epoch2str(NULL));
out->list_item(out, "Last change", " %s", buf);
free(buf);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("failed-action", "xmlNodePtr")
int
pe__failed_action_text(pcmk__output_t *out, va_list args) {
xmlNodePtr xml_op = va_arg(args, xmlNodePtr);
char *s = failed_action_string(xml_op);
out->list_item(out, NULL, "%s", s);
free(s);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("failed-action", "xmlNodePtr")
int
pe__failed_action_xml(pcmk__output_t *out, va_list args) {
xmlNodePtr xml_op = va_arg(args, xmlNodePtr);
const char *op_key = crm_element_value(xml_op, XML_LRM_ATTR_TASK_KEY);
const char *last = crm_element_value(xml_op, XML_RSC_OP_LAST_CHANGE);
int rc = crm_parse_int(crm_element_value(xml_op, XML_LRM_ATTR_RC), "0");
int status = crm_parse_int(crm_element_value(xml_op, XML_LRM_ATTR_OPSTATUS), "0");
const char *exit_reason = crm_element_value(xml_op, XML_LRM_ATTR_EXIT_REASON);
char *rc_s = crm_itoa(rc);
char *reason_s = crm_xml_escape(exit_reason ? exit_reason : "none");
xmlNodePtr node = pcmk__output_create_xml_node(out, "failure");
xmlSetProp(node, (pcmkXmlStr) (op_key ? "op_key" : "id"),
(pcmkXmlStr) (op_key ? op_key : "id"));
xmlSetProp(node, (pcmkXmlStr) "node",
(pcmkXmlStr) crm_element_value(xml_op, XML_ATTR_UNAME));
xmlSetProp(node, (pcmkXmlStr) "exitstatus",
(pcmkXmlStr) services_ocf_exitcode_str(rc));
xmlSetProp(node, (pcmkXmlStr) "exitreason", (pcmkXmlStr) reason_s);
xmlSetProp(node, (pcmkXmlStr) "exitcode", (pcmkXmlStr) rc_s);
xmlSetProp(node, (pcmkXmlStr) "call",
(pcmkXmlStr) crm_element_value(xml_op, XML_LRM_ATTR_CALLID));
xmlSetProp(node, (pcmkXmlStr) "status",
(pcmkXmlStr) services_lrm_status_str(status));
if (last) {
guint interval_ms = 0;
char *s = NULL;
time_t when = crm_parse_int(last, "0");
crm_time_t *crm_when = crm_time_new(NULL);
char *rc_change = NULL;
crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms);
s = crm_itoa(interval_ms);
crm_time_set_timet(crm_when, &when);
rc_change = crm_time_as_string(crm_when, crm_time_log_date | crm_time_log_timeofday | crm_time_log_with_timezone);
xmlSetProp(node, (pcmkXmlStr) XML_RSC_OP_LAST_CHANGE, (pcmkXmlStr) rc_change);
xmlSetProp(node, (pcmkXmlStr) "queued",
(pcmkXmlStr) crm_element_value(xml_op, XML_RSC_OP_T_QUEUE));
xmlSetProp(node, (pcmkXmlStr) "exec",
(pcmkXmlStr) crm_element_value(xml_op, XML_RSC_OP_T_EXEC));
xmlSetProp(node, (pcmkXmlStr) "interval", (pcmkXmlStr) s);
xmlSetProp(node, (pcmkXmlStr) "task",
(pcmkXmlStr) crm_element_value(xml_op, XML_LRM_ATTR_TASK));
free(s);
free(rc_change);
crm_time_free(crm_when);
}
free(reason_s);
free(rc_s);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("node", "pe_node_t *", "unsigned int", "gboolean", "const char *",
"gboolean", "gboolean", "gboolean", "GListPtr", "GListPtr")
int
pe__node_html(pcmk__output_t *out, va_list args) {
pe_node_t *node = va_arg(args, pe_node_t *);
unsigned int print_opts = va_arg(args, unsigned int);
gboolean full = va_arg(args, gboolean);
const char *node_mode G_GNUC_UNUSED = va_arg(args, const char *);
gboolean print_clone_detail = va_arg(args, gboolean);
gboolean print_brief = va_arg(args, gboolean);
gboolean group_by_node = va_arg(args, gboolean);
GListPtr only_node = va_arg(args, GListPtr);
GListPtr only_rsc = va_arg(args, GListPtr);
char *node_name = pe__node_display_name(node, print_clone_detail);
char *buf = crm_strdup_printf("Node: %s", node_name);
if (full) {
xmlNodePtr item_node = pcmk__output_create_xml_node(out, "li");
pcmk_create_html_node(item_node, "span", NULL, NULL, buf);
if (node->details->standby_onfail && node->details->online) {
pcmk_create_html_node(item_node, "span", NULL, "standby", " standby (on-fail)");
} else if (node->details->standby && node->details->online) {
char *s = crm_strdup_printf(" standby%s", node->details->running_rsc ? " (with active resources)" : "");
pcmk_create_html_node(item_node, "span", NULL, " standby", s);
free(s);
} else if (node->details->standby) {
pcmk_create_html_node(item_node, "span", NULL, "offline", " OFFLINE (standby)");
} else if (node->details->maintenance && node->details->online) {
pcmk_create_html_node(item_node, "span", NULL, "maint", " maintenance");
} else if (node->details->maintenance) {
pcmk_create_html_node(item_node, "span", NULL, "offline", " OFFLINE (maintenance)");
} else if (node->details->online) {
pcmk_create_html_node(item_node, "span", NULL, "online", " online");
} else {
pcmk_create_html_node(item_node, "span", NULL, "offline", " OFFLINE");
}
if (print_brief && group_by_node) {
GListPtr rscs = pe__filter_rsc_list(node->details->running_rsc, only_rsc);
if (rscs != NULL) {
out->begin_list(out, NULL, NULL, NULL);
pe__rscs_brief_output(out, rscs, print_opts | pe_print_rsconly, FALSE);
out->end_list(out);
}
} else if (group_by_node) {
GListPtr lpc2 = NULL;
out->begin_list(out, NULL, NULL, NULL);
for (lpc2 = node->details->running_rsc; lpc2 != NULL; lpc2 = lpc2->next) {
pe_resource_t *rsc = (pe_resource_t *) lpc2->data;
out->message(out, crm_map_element_name(rsc->xml), print_opts | pe_print_rsconly,
rsc, only_node, only_rsc);
}
out->end_list(out);
}
} else {
out->begin_list(out, NULL, NULL, "%s", buf);
}
free(buf);
free(node_name);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("node", "pe_node_t *", "unsigned int", "gboolean", "const char *",
"gboolean", "gboolean", "gboolean", "GListPtr", "GListPtr")
int
pe__node_text(pcmk__output_t *out, va_list args) {
pe_node_t *node = va_arg(args, pe_node_t *);
unsigned int print_opts = va_arg(args, unsigned int);
gboolean full = va_arg(args, gboolean);
const char *node_mode = va_arg(args, const char *);
gboolean print_clone_detail = va_arg(args, gboolean);
gboolean print_brief = va_arg(args, gboolean);
gboolean group_by_node = va_arg(args, gboolean);
GListPtr only_node = va_arg(args, GListPtr);
GListPtr only_rsc = va_arg(args, GListPtr);
if (full) {
char *node_name = pe__node_display_name(node, print_clone_detail);
char *buf = NULL;
/* Print the node name and status */
if (pe__is_guest_node(node)) {
buf = crm_strdup_printf("GuestNode %s: %s", node_name, node_mode);
} else if (pe__is_remote_node(node)) {
buf = crm_strdup_printf("RemoteNode %s: %s", node_name, node_mode);
} else {
buf = crm_strdup_printf("Node %s: %s", node_name, node_mode);
}
/* If we're grouping by node, print its resources */
if (group_by_node) {
if (print_brief) {
GListPtr rscs = pe__filter_rsc_list(node->details->running_rsc, only_rsc);
if (rscs != NULL) {
out->begin_list(out, NULL, NULL, "%s", buf);
out->begin_list(out, NULL, NULL, "Resources");
pe__rscs_brief_output(out, rscs, print_opts | pe_print_rsconly, FALSE);
out->end_list(out);
out->end_list(out);
}
} else {
GListPtr gIter2 = NULL;
out->begin_list(out, NULL, NULL, "%s", buf);
out->begin_list(out, NULL, NULL, "Resources");
for (gIter2 = node->details->running_rsc; gIter2 != NULL; gIter2 = gIter2->next) {
pe_resource_t *rsc = (pe_resource_t *) gIter2->data;
out->message(out, crm_map_element_name(rsc->xml), print_opts | pe_print_rsconly,
rsc, only_node, only_rsc);
}
out->end_list(out);
out->end_list(out);
}
} else {
out->list_item(out, NULL, "%s", buf);
}
free(buf);
free(node_name);
} else {
out->begin_list(out, NULL, NULL, "Node: %s", pe__node_display_name(node, print_clone_detail));
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("node", "pe_node_t *", "unsigned int", "gboolean", "const char *",
"gboolean", "gboolean", "gboolean", "GListPtr", "GListPtr")
int
pe__node_xml(pcmk__output_t *out, va_list args) {
pe_node_t *node = va_arg(args, pe_node_t *);
unsigned int print_opts = va_arg(args, unsigned int);
gboolean full = va_arg(args, gboolean);
const char *node_mode G_GNUC_UNUSED = va_arg(args, const char *);
gboolean print_clone_detail G_GNUC_UNUSED = va_arg(args, gboolean);
gboolean print_brief G_GNUC_UNUSED = va_arg(args, gboolean);
gboolean group_by_node = va_arg(args, gboolean);
GListPtr only_node = va_arg(args, GListPtr);
GListPtr only_rsc = va_arg(args, GListPtr);
if (full) {
const char *node_type = "unknown";
char *length_s = crm_itoa(g_list_length(node->details->running_rsc));
switch (node->details->type) {
case node_member:
node_type = "member";
break;
case node_remote:
node_type = "remote";
break;
case node_ping:
node_type = "ping";
break;
}
pe__name_and_nvpairs_xml(out, true, "node", 13,
"name", node->details->uname,
"id", node->details->id,
"online", pcmk__btoa(node->details->online),
"standby", pcmk__btoa(node->details->standby),
"standby_onfail", pcmk__btoa(node->details->standby_onfail),
"maintenance", pcmk__btoa(node->details->maintenance),
"pending", pcmk__btoa(node->details->pending),
"unclean", pcmk__btoa(node->details->unclean),
"shutdown", pcmk__btoa(node->details->shutdown),
"expected_up", pcmk__btoa(node->details->expected_up),
"is_dc", pcmk__btoa(node->details->is_dc),
"resources_running", length_s,
"type", node_type);
if (pe__is_guest_node(node)) {
xmlNodePtr xml_node = pcmk__output_xml_peek_parent(out);
xmlSetProp(xml_node, (pcmkXmlStr) "id_as_resource",
(pcmkXmlStr) node->details->remote_rsc->container->id);
}
if (group_by_node) {
GListPtr lpc = NULL;
for (lpc = node->details->running_rsc; lpc != NULL; lpc = lpc->next) {
pe_resource_t *rsc = (pe_resource_t *) lpc->data;
out->message(out, crm_map_element_name(rsc->xml), print_opts | pe_print_rsconly,
rsc, only_node, only_rsc);
}
}
free(length_s);
out->end_list(out);
} else {
xmlNodePtr parent = pcmk__output_xml_create_parent(out, "node");
xmlSetProp(parent, (pcmkXmlStr) "name", (pcmkXmlStr) node->details->uname);
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("node-attribute", "const char *", "const char *", "gboolean", "int")
int
pe__node_attribute_text(pcmk__output_t *out, va_list args) {
const char *name = va_arg(args, const char *);
const char *value = va_arg(args, const char *);
gboolean add_extra = va_arg(args, gboolean);
int expected_score = va_arg(args, int);
if (add_extra) {
int v = crm_parse_int(value, "0");
if (v <= 0) {
out->list_item(out, NULL, "%-32s\t: %-10s\t: Connectivity is lost", name, value);
} else if (v < expected_score) {
out->list_item(out, NULL, "%-32s\t: %-10s\t: Connectivity is degraded (Expected=%d)", name, value, expected_score);
} else {
out->list_item(out, NULL, "%-32s\t: %-10s", name, value);
}
} else {
out->list_item(out, NULL, "%-32s\t: %-10s", name, value);
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("node-attribute", "const char *", "const char *", "gboolean", "int")
int
pe__node_attribute_html(pcmk__output_t *out, va_list args) {
const char *name = va_arg(args, const char *);
const char *value = va_arg(args, const char *);
gboolean add_extra = va_arg(args, gboolean);
int expected_score = va_arg(args, int);
if (add_extra) {
int v = crm_parse_int(value, "0");
char *s = crm_strdup_printf("%s: %s", name, value);
xmlNodePtr item_node = pcmk__output_create_xml_node(out, "li");
pcmk_create_html_node(item_node, "span", NULL, NULL, s);
free(s);
if (v <= 0) {
pcmk_create_html_node(item_node, "span", NULL, "bold", "(connectivity is lost)");
} else if (v < expected_score) {
char *buf = crm_strdup_printf("(connectivity is degraded -- expected %d", expected_score);
pcmk_create_html_node(item_node, "span", NULL, "bold", buf);
free(buf);
}
} else {
out->list_item(out, NULL, "%s: %s", name, value);
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("node-attribute", "const char *", "const char *", "gboolean", "int")
int
pe__node_attribute_xml(pcmk__output_t *out, va_list args) {
const char *name = va_arg(args, const char *);
const char *value = va_arg(args, const char *);
gboolean add_extra = va_arg(args, gboolean);
int expected_score = va_arg(args, int);
xmlNodePtr node = pcmk__output_create_xml_node(out, "attribute");
xmlSetProp(node, (pcmkXmlStr) "name", (pcmkXmlStr) name);
xmlSetProp(node, (pcmkXmlStr) "value", (pcmkXmlStr) value);
if (add_extra) {
char *buf = crm_itoa(expected_score);
xmlSetProp(node, (pcmkXmlStr) "expected", (pcmkXmlStr) buf);
free(buf);
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("node-list", "GListPtr", "GListPtr", "GListPtr", "unsigned int", "gboolean", "gboolean", "gboolean")
int
pe__node_list_html(pcmk__output_t *out, va_list args) {
GListPtr nodes = va_arg(args, GListPtr);
GListPtr only_node = va_arg(args, GListPtr);
GListPtr only_rsc = va_arg(args, GListPtr);
unsigned int print_opts = va_arg(args, unsigned int);
gboolean print_clone_detail = va_arg(args, gboolean);
gboolean print_brief = va_arg(args, gboolean);
gboolean group_by_node = va_arg(args, gboolean);
int rc = pcmk_rc_no_output;
for (GListPtr gIter = nodes; gIter != NULL; gIter = gIter->next) {
pe_node_t *node = (pe_node_t *) gIter->data;
if (!pcmk__str_in_list(only_node, node->details->uname)) {
continue;
}
PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Node List");
out->message(out, "node", node, print_opts, TRUE, NULL, print_clone_detail,
print_brief, group_by_node, only_node, only_rsc);
}
PCMK__OUTPUT_LIST_FOOTER(out, rc);
return rc;
}
PCMK__OUTPUT_ARGS("node-list", "GListPtr", "GListPtr", "GListPtr", "unsigned int", "gboolean", "gboolean", "gboolean")
int
pe__node_list_text(pcmk__output_t *out, va_list args) {
GListPtr nodes = va_arg(args, GListPtr);
GListPtr only_node = va_arg(args, GListPtr);
GListPtr only_rsc = va_arg(args, GListPtr);
unsigned int print_opts = va_arg(args, unsigned int);
gboolean print_clone_detail = va_arg(args, gboolean);
gboolean print_brief = va_arg(args, gboolean);
gboolean group_by_node = va_arg(args, gboolean);
/* space-separated lists of node names */
char *online_nodes = NULL;
char *online_remote_nodes = NULL;
char *online_guest_nodes = NULL;
char *offline_nodes = NULL;
char *offline_remote_nodes = NULL;
int rc = pcmk_rc_no_output;
for (GListPtr gIter = nodes; gIter != NULL; gIter = gIter->next) {
pe_node_t *node = (pe_node_t *) gIter->data;
const char *node_mode = NULL;
char *node_name = pe__node_display_name(node, print_clone_detail);
if (!pcmk__str_in_list(only_node, node->details->uname)) {
free(node_name);
continue;
}
PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Node List");
/* Get node mode */
if (node->details->unclean) {
if (node->details->online) {
node_mode = "UNCLEAN (online)";
} else if (node->details->pending) {
node_mode = "UNCLEAN (pending)";
} else {
node_mode = "UNCLEAN (offline)";
}
} else if (node->details->pending) {
node_mode = "pending";
} else if (node->details->standby_onfail && node->details->online) {
node_mode = "standby (on-fail)";
} else if (node->details->standby) {
if (node->details->online) {
if (node->details->running_rsc) {
node_mode = "standby (with active resources)";
} else {
node_mode = "standby";
}
} else {
node_mode = "OFFLINE (standby)";
}
} else if (node->details->maintenance) {
if (node->details->online) {
node_mode = "maintenance";
} else {
node_mode = "OFFLINE (maintenance)";
}
} else if (node->details->online) {
node_mode = "online";
if (group_by_node == FALSE) {
if (pe__is_guest_node(node)) {
online_guest_nodes = pcmk__add_word(online_guest_nodes,
node_name);
} else if (pe__is_remote_node(node)) {
online_remote_nodes = pcmk__add_word(online_remote_nodes,
node_name);
} else {
online_nodes = pcmk__add_word(online_nodes, node_name);
}
free(node_name);
continue;
}
} else {
node_mode = "OFFLINE";
if (group_by_node == FALSE) {
if (pe__is_remote_node(node)) {
offline_remote_nodes = pcmk__add_word(offline_remote_nodes,
node_name);
} else if (pe__is_guest_node(node)) {
/* ignore offline guest nodes */
} else {
offline_nodes = pcmk__add_word(offline_nodes, node_name);
}
free(node_name);
continue;
}
}
/* If we get here, node is in bad state, or we're grouping by node */
out->message(out, "node", node, print_opts, TRUE, node_mode, print_clone_detail,
print_brief, group_by_node, only_node, only_rsc);
free(node_name);
}
/* If we're not grouping by node, summarize nodes by status */
if (online_nodes) {
out->list_item(out, "Online", "[%s ]", online_nodes);
free(online_nodes);
}
if (offline_nodes) {
out->list_item(out, "OFFLINE", "[%s ]", offline_nodes);
free(offline_nodes);
}
if (online_remote_nodes) {
out->list_item(out, "RemoteOnline", "[%s ]", online_remote_nodes);
free(online_remote_nodes);
}
if (offline_remote_nodes) {
out->list_item(out, "RemoteOFFLINE", "[%s ]", offline_remote_nodes);
free(offline_remote_nodes);
}
if (online_guest_nodes) {
out->list_item(out, "GuestOnline", "[%s ]", online_guest_nodes);
free(online_guest_nodes);
}
PCMK__OUTPUT_LIST_FOOTER(out, rc);
return rc;
}
PCMK__OUTPUT_ARGS("node-list", "GListPtr", "GListPtr", "GListPtr", "unsigned int", "gboolean", "gboolean", "gboolean")
int
pe__node_list_xml(pcmk__output_t *out, va_list args) {
GListPtr nodes = va_arg(args, GListPtr);
GListPtr only_node = va_arg(args, GListPtr);
GListPtr only_rsc = va_arg(args, GListPtr);
unsigned int print_opts = va_arg(args, unsigned int);
gboolean print_clone_detail = va_arg(args, gboolean);
gboolean print_brief = va_arg(args, gboolean);
gboolean group_by_node = va_arg(args, gboolean);
out->begin_list(out, NULL, NULL, "nodes");
for (GListPtr gIter = nodes; gIter != NULL; gIter = gIter->next) {
pe_node_t *node = (pe_node_t *) gIter->data;
if (!pcmk__str_in_list(only_node, node->details->uname)) {
continue;
}
out->message(out, "node", node, print_opts, TRUE, NULL, print_clone_detail,
print_brief, group_by_node, only_node, only_rsc);
}
out->end_list(out);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("op-history", "struct xmlNode *", "const char *", "const char *", "int", "gboolean")
int
pe__op_history_text(pcmk__output_t *out, va_list args) {
xmlNode *xml_op = va_arg(args, xmlNode *);
const char *task = va_arg(args, const char *);
const char *interval_ms_s = va_arg(args, const char *);
int rc = va_arg(args, int);
gboolean print_timing = va_arg(args, gboolean);
char *buf = op_history_string(xml_op, task, interval_ms_s, rc, print_timing);
out->list_item(out, NULL, "%s", buf);
free(buf);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("op-history", "struct xmlNode *", "const char *", "const char *", "int", "gboolean")
int
pe__op_history_xml(pcmk__output_t *out, va_list args) {
xmlNode *xml_op = va_arg(args, xmlNode *);
const char *task = va_arg(args, const char *);
const char *interval_ms_s = va_arg(args, const char *);
int rc = va_arg(args, int);
gboolean print_timing = va_arg(args, gboolean);
char *rc_s = NULL;
xmlNodePtr node = pcmk__output_create_xml_node(out, "operation_history");
xmlSetProp(node, (pcmkXmlStr) "call",
(pcmkXmlStr) crm_element_value(xml_op, XML_LRM_ATTR_CALLID));
xmlSetProp(node, (pcmkXmlStr) "task", (pcmkXmlStr) task);
if (interval_ms_s && !pcmk__str_eq(interval_ms_s, "0", pcmk__str_casei)) {
char *s = crm_strdup_printf("%sms", interval_ms_s);
xmlSetProp(node, (pcmkXmlStr) "interval", (pcmkXmlStr) s);
free(s);
}
if (print_timing) {
const char *value = NULL;
value = crm_element_value(xml_op, XML_RSC_OP_LAST_CHANGE);
if (value) {
time_t int_value = (time_t) crm_parse_int(value, NULL);
if (int_value > 0) {
xmlSetProp(node, (pcmkXmlStr) XML_RSC_OP_LAST_CHANGE,
(pcmkXmlStr) pcmk__epoch2str(&int_value));
}
}
value = crm_element_value(xml_op, XML_RSC_OP_LAST_RUN);
if (value) {
time_t int_value = (time_t) crm_parse_int(value, NULL);
if (int_value > 0) {
xmlSetProp(node, (pcmkXmlStr) XML_RSC_OP_LAST_RUN,
(pcmkXmlStr) pcmk__epoch2str(&int_value));
}
}
value = crm_element_value(xml_op, XML_RSC_OP_T_EXEC);
if (value) {
char *s = crm_strdup_printf("%sms", value);
xmlSetProp(node, (pcmkXmlStr) XML_RSC_OP_T_EXEC, (pcmkXmlStr) s);
free(s);
}
value = crm_element_value(xml_op, XML_RSC_OP_T_QUEUE);
if (value) {
char *s = crm_strdup_printf("%sms", value);
xmlSetProp(node, (pcmkXmlStr) XML_RSC_OP_T_QUEUE, (pcmkXmlStr) s);
free(s);
}
}
rc_s = crm_itoa(rc);
xmlSetProp(node, (pcmkXmlStr) "rc", (pcmkXmlStr) rc_s);
xmlSetProp(node, (pcmkXmlStr) "rc_text", (pcmkXmlStr) services_ocf_exitcode_str(rc));
free(rc_s);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("resource-history", "pe_resource_t *", "const char *", "gboolean", "int", "time_t", "gboolean")
int
pe__resource_history_text(pcmk__output_t *out, va_list args) {
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
const char *rsc_id = va_arg(args, const char *);
gboolean all = va_arg(args, gboolean);
int failcount = va_arg(args, int);
time_t last_failure = va_arg(args, int);
gboolean as_header = va_arg(args, gboolean);
char *buf = resource_history_string(rsc, rsc_id, all, failcount, last_failure);
if (as_header) {
out->begin_list(out, NULL, NULL, "%s", buf);
} else {
out->list_item(out, NULL, "%s", buf);
}
free(buf);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("resource-history", "pe_resource_t *", "const char *", "gboolean", "int", "time_t", "gboolean")
int
pe__resource_history_xml(pcmk__output_t *out, va_list args) {
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
const char *rsc_id = va_arg(args, const char *);
gboolean all = va_arg(args, gboolean);
int failcount = va_arg(args, int);
time_t last_failure = va_arg(args, int);
gboolean as_header = va_arg(args, gboolean);
xmlNodePtr node = pcmk__output_xml_create_parent(out, "resource_history");
xmlSetProp(node, (pcmkXmlStr) "id", (pcmkXmlStr) rsc_id);
if (rsc == NULL) {
xmlSetProp(node, (pcmkXmlStr) "orphan", (pcmkXmlStr) "true");
} else if (all || failcount || last_failure > 0) {
char *migration_s = crm_itoa(rsc->migration_threshold);
xmlSetProp(node, (pcmkXmlStr) "orphan", (pcmkXmlStr) "false");
xmlSetProp(node, (pcmkXmlStr) "migration-threshold",
(pcmkXmlStr) migration_s);
free(migration_s);
if (failcount > 0) {
char *s = crm_itoa(failcount);
xmlSetProp(node, (pcmkXmlStr) PCMK__FAIL_COUNT_PREFIX,
(pcmkXmlStr) s);
free(s);
}
if (last_failure > 0) {
xmlSetProp(node, (pcmkXmlStr) PCMK__LAST_FAILURE_PREFIX,
(pcmkXmlStr) pcmk__epoch2str(&last_failure));
}
}
if (as_header == FALSE) {
pcmk__output_xml_pop_parent(out);
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("resource-list", "pe_working_set_t *", "unsigned int", "gboolean",
"gboolean", "gboolean", "gboolean", "GListPtr", "GListPtr", "gboolean")
int
pe__resource_list(pcmk__output_t *out, va_list args)
{
pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
unsigned int print_opts = va_arg(args, unsigned int);
gboolean group_by_node = va_arg(args, gboolean);
gboolean inactive_resources = va_arg(args, gboolean);
gboolean brief_output = va_arg(args, gboolean);
gboolean print_summary = va_arg(args, gboolean);
GListPtr only_node = va_arg(args, GListPtr);
GListPtr only_rsc = va_arg(args, GListPtr);
gboolean print_spacer = va_arg(args, gboolean);
GListPtr rsc_iter;
int rc = pcmk_rc_no_output;
/* If we already showed active resources by node, and
* we're not showing inactive resources, we have nothing to do
*/
if (group_by_node && !inactive_resources) {
return rc;
}
PCMK__OUTPUT_SPACER_IF(out, print_spacer);
if (group_by_node) {
/* Active resources have already been printed by node */
out->begin_list(out, NULL, NULL, "Inactive Resources");
} else if (inactive_resources) {
out->begin_list(out, NULL, NULL, "Full List of Resources");
} else {
out->begin_list(out, NULL, NULL, "Active Resources");
}
/* If we haven't already printed resources grouped by node,
* and brief output was requested, print resource summary */
if (brief_output && !group_by_node) {
GListPtr rscs = pe__filter_rsc_list(data_set->resources, only_rsc);
pe__rscs_brief_output(out, rscs, print_opts, inactive_resources);
g_list_free(rscs);
}
/* For each resource, display it if appropriate */
for (rsc_iter = data_set->resources; rsc_iter != NULL; rsc_iter = rsc_iter->next) {
pe_resource_t *rsc = (pe_resource_t *) rsc_iter->data;
int x;
/* Complex resources may have some sub-resources active and some inactive */
gboolean is_active = rsc->fns->active(rsc, TRUE);
gboolean partially_active = rsc->fns->active(rsc, FALSE);
/* Skip inactive orphans (deleted but still in CIB) */
if (pcmk_is_set(rsc->flags, pe_rsc_orphan) && !is_active) {
continue;
/* Skip active resources if we already displayed them by node */
} else if (group_by_node) {
if (is_active) {
continue;
}
/* Skip primitives already counted in a brief summary */
} else if (brief_output && (rsc->variant == pe_native)) {
continue;
/* Skip resources that aren't at least partially active,
* unless we're displaying inactive resources
*/
} else if (!partially_active && !inactive_resources) {
continue;
} else if (partially_active && !pe__rsc_running_on_any_node_in_list(rsc, only_node)) {
continue;
}
/* Print this resource */
x = out->message(out, crm_map_element_name(rsc->xml), print_opts, rsc,
only_node, only_rsc);
if (x == pcmk_rc_ok) {
rc = pcmk_rc_ok;
}
}
if (print_summary && rc != pcmk_rc_ok) {
if (group_by_node) {
out->list_item(out, NULL, "No inactive resources");
} else if (inactive_resources) {
out->list_item(out, NULL, "No resources");
} else {
out->list_item(out, NULL, "No active resources");
}
}
out->end_list(out);
return rc;
}
PCMK__OUTPUT_ARGS("ticket", "pe_ticket_t *")
int
pe__ticket_html(pcmk__output_t *out, va_list args) {
pe_ticket_t *ticket = va_arg(args, pe_ticket_t *);
if (ticket->last_granted > -1) {
char *time = pcmk_format_named_time("last-granted", ticket->last_granted);
out->list_item(out, NULL, "%s:\t%s%s %s", ticket->id,
ticket->granted ? "granted" : "revoked",
ticket->standby ? " [standby]" : "",
time);
free(time);
} else {
out->list_item(out, NULL, "%s:\t%s%s", ticket->id,
ticket->granted ? "granted" : "revoked",
ticket->standby ? " [standby]" : "");
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("ticket", "pe_ticket_t *")
int
pe__ticket_text(pcmk__output_t *out, va_list args) {
pe_ticket_t *ticket = va_arg(args, pe_ticket_t *);
if (ticket->last_granted > -1) {
char *time = pcmk_format_named_time("last-granted", ticket->last_granted);
out->list_item(out, ticket->id, "\t%s%s %s",
ticket->granted ? "granted" : "revoked",
ticket->standby ? " [standby]" : "",
time);
free(time);
} else {
out->list_item(out, ticket->id, "\t%s%s",
ticket->granted ? "granted" : "revoked",
ticket->standby ? " [standby]" : "");
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("ticket", "pe_ticket_t *")
int
pe__ticket_xml(pcmk__output_t *out, va_list args) {
xmlNodePtr node = NULL;
pe_ticket_t *ticket = va_arg(args, pe_ticket_t *);
node = pcmk__output_create_xml_node(out, "ticket");
xmlSetProp(node, (pcmkXmlStr) "id", (pcmkXmlStr) ticket->id);
xmlSetProp(node, (pcmkXmlStr) "status", (pcmkXmlStr) (ticket->granted ? "granted" : "revoked"));
xmlSetProp(node, (pcmkXmlStr) "standby",
(pcmkXmlStr) pcmk__btoa(ticket->standby));
if (ticket->last_granted > -1) {
xmlSetProp(node, (pcmkXmlStr) "last-granted",
(pcmkXmlStr) pcmk__epoch2str(&ticket->last_granted));
}
return pcmk_rc_ok;
}
static pcmk__message_entry_t fmt_functions[] = {
{ "ban", "html", pe__ban_html },
{ "ban", "log", pe__ban_text },
{ "ban", "text", pe__ban_text },
{ "ban", "xml", pe__ban_xml },
{ "bundle", "xml", pe__bundle_xml },
{ "bundle", "html", pe__bundle_html },
{ "bundle", "text", pe__bundle_text },
{ "bundle", "log", pe__bundle_text },
{ "clone", "xml", pe__clone_xml },
{ "clone", "html", pe__clone_html },
{ "clone", "text", pe__clone_text },
{ "clone", "log", pe__clone_text },
{ "cluster-counts", "html", pe__cluster_counts_html },
{ "cluster-counts", "log", pe__cluster_counts_text },
{ "cluster-counts", "text", pe__cluster_counts_text },
{ "cluster-counts", "xml", pe__cluster_counts_xml },
{ "cluster-dc", "html", pe__cluster_dc_html },
{ "cluster-dc", "log", pe__cluster_dc_text },
{ "cluster-dc", "text", pe__cluster_dc_text },
{ "cluster-dc", "xml", pe__cluster_dc_xml },
{ "cluster-options", "html", pe__cluster_options_html },
{ "cluster-options", "log", pe__cluster_options_log },
{ "cluster-options", "text", pe__cluster_options_text },
{ "cluster-options", "xml", pe__cluster_options_xml },
{ "cluster-summary", "default", pe__cluster_summary },
{ "cluster-summary", "html", pe__cluster_summary_html },
{ "cluster-stack", "html", pe__cluster_stack_html },
{ "cluster-stack", "log", pe__cluster_stack_text },
{ "cluster-stack", "text", pe__cluster_stack_text },
{ "cluster-stack", "xml", pe__cluster_stack_xml },
{ "cluster-times", "html", pe__cluster_times_html },
{ "cluster-times", "log", pe__cluster_times_text },
{ "cluster-times", "text", pe__cluster_times_text },
{ "cluster-times", "xml", pe__cluster_times_xml },
{ "failed-action", "html", pe__failed_action_text },
{ "failed-action", "log", pe__failed_action_text },
{ "failed-action", "text", pe__failed_action_text },
{ "failed-action", "xml", pe__failed_action_xml },
{ "group", "xml", pe__group_xml },
{ "group", "html", pe__group_html },
{ "group", "text", pe__group_text },
{ "group", "log", pe__group_text },
- /* maint-mode only exists for text and log. Other formatters output it as
- * part of the cluster-options handler.
- */
- { "maint-mode", "log", pe__cluster_maint_mode_text },
{ "maint-mode", "text", pe__cluster_maint_mode_text },
{ "node", "html", pe__node_html },
{ "node", "log", pe__node_text },
{ "node", "text", pe__node_text },
{ "node", "xml", pe__node_xml },
{ "node-list", "html", pe__node_list_html },
{ "node-list", "log", pe__node_list_text },
{ "node-list", "text", pe__node_list_text },
{ "node-list", "xml", pe__node_list_xml },
{ "node-attribute", "html", pe__node_attribute_html },
{ "node-attribute", "log", pe__node_attribute_text },
{ "node-attribute", "text", pe__node_attribute_text },
{ "node-attribute", "xml", pe__node_attribute_xml },
{ "op-history", "html", pe__op_history_text },
{ "op-history", "log", pe__op_history_text },
{ "op-history", "text", pe__op_history_text },
{ "op-history", "xml", pe__op_history_xml },
{ "primitive", "xml", pe__resource_xml },
{ "primitive", "html", pe__resource_html },
{ "primitive", "text", pe__resource_text },
{ "primitive", "log", pe__resource_text },
{ "resource-history", "default", pe__resource_history_text },
{ "resource-history", "xml", pe__resource_history_xml },
{ "resource-list", "default", pe__resource_list },
{ "ticket", "html", pe__ticket_html },
{ "ticket", "log", pe__ticket_text },
{ "ticket", "text", pe__ticket_text },
{ "ticket", "xml", pe__ticket_xml },
{ NULL, NULL, NULL }
};
void
pe__register_messages(pcmk__output_t *out) {
pcmk__register_messages(out, fmt_functions);
}
void
pe__output_node(pe_node_t *node, gboolean details, pcmk__output_t *out)
{
if (node == NULL) {
crm_trace("");
return;
}
CRM_ASSERT(node->details);
crm_trace("%sNode %s: (weight=%d, fixed=%s)",
node->details->online ? "" : "Unavailable/Unclean ",
node->details->uname, node->weight, node->fixed ? "True" : "False");
if (details) {
char *pe_mutable = strdup("\t\t");
GListPtr gIter = node->details->running_rsc;
GListPtr all = NULL;
all = g_list_prepend(all, strdup("*"));
crm_trace("\t\t===Node Attributes");
g_hash_table_foreach(node->details->attrs, print_str_str, pe_mutable);
free(pe_mutable);
crm_trace("\t\t=== Resources");
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *rsc = (pe_resource_t *) gIter->data;
out->message(out, crm_map_element_name(rsc->xml),
pe_print_pending, rsc, all, all);
}
g_list_free_full(all, free);
}
}
diff --git a/tools/crm_mon_curses.c b/tools/crm_mon_curses.c
index ef0b0c7bb1..2c092dfe0d 100644
--- a/tools/crm_mon_curses.c
+++ b/tools/crm_mon_curses.c
@@ -1,432 +1,446 @@
/*
* Copyright 2019-2020 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "crm_mon.h"
#if CURSES_ENABLED
GOptionEntry crm_mon_curses_output_entries[] = {
{ NULL }
};
typedef struct curses_list_data_s {
unsigned int len;
char *singular_noun;
char *plural_noun;
} curses_list_data_t;
typedef struct private_data_s {
GQueue *parent_q;
} private_data_t;
static void
curses_free_priv(pcmk__output_t *out) {
private_data_t *priv = out->priv;
if (priv == NULL) {
return;
}
g_queue_free(priv->parent_q);
free(priv);
out->priv = NULL;
}
static bool
curses_init(pcmk__output_t *out) {
private_data_t *priv = NULL;
/* If curses_init was previously called on this output struct, just return. */
if (out->priv != NULL) {
return true;
} else {
out->priv = calloc(1, sizeof(private_data_t));
if (out->priv == NULL) {
return false;
}
priv = out->priv;
}
priv->parent_q = g_queue_new();
initscr();
cbreak();
noecho();
return true;
}
static void
curses_finish(pcmk__output_t *out, crm_exit_t exit_status, bool print, void **copy_dest) {
echo();
nocbreak();
endwin();
}
static void
curses_reset(pcmk__output_t *out) {
CRM_ASSERT(out != NULL);
curses_free_priv(out);
curses_init(out);
}
static void
curses_subprocess_output(pcmk__output_t *out, int exit_status,
const char *proc_stdout, const char *proc_stderr) {
if (proc_stdout != NULL) {
printw("%s\n", proc_stdout);
}
if (proc_stderr != NULL) {
printw("%s\n", proc_stderr);
}
clrtoeol();
refresh();
}
/* curses_version is defined in curses.h, so we can't use that name here.
* Note that this function prints out via text, not with curses.
*/
static void
curses_ver(pcmk__output_t *out, bool extended) {
if (extended) {
printf("Pacemaker %s (Build: %s): %s\n", PACEMAKER_VERSION, BUILD_VERSION, CRM_FEATURES);
} else {
printf("Pacemaker %s\n", PACEMAKER_VERSION);
printf("Written by Andrew Beekhof\n");
}
}
G_GNUC_PRINTF(2, 3)
static void
curses_error(pcmk__output_t *out, const char *format, ...) {
va_list ap;
/* Informational output does not get indented, to separate it from other
* potentially indented list output.
*/
va_start(ap, format);
vw_printw(stdscr, format, ap);
va_end(ap);
/* Add a newline. */
addch('\n');
clrtoeol();
refresh();
sleep(2);
}
G_GNUC_PRINTF(2, 3)
static void
curses_info(pcmk__output_t *out, const char *format, ...) {
va_list ap;
/* Informational output does not get indented, to separate it from other
* potentially indented list output.
*/
va_start(ap, format);
vw_printw(stdscr, format, ap);
va_end(ap);
/* Add a newline. */
addch('\n');
clrtoeol();
refresh();
}
static void
curses_output_xml(pcmk__output_t *out, const char *name, const char *buf) {
private_data_t *priv = out->priv;
CRM_ASSERT(priv != NULL);
curses_indented_printf(out, "%s", buf);
}
G_GNUC_PRINTF(4, 5)
static void
curses_begin_list(pcmk__output_t *out, const char *singular_noun, const char *plural_noun,
const char *format, ...) {
private_data_t *priv = out->priv;
curses_list_data_t *new_list = NULL;
va_list ap;
CRM_ASSERT(priv != NULL);
va_start(ap, format);
curses_indented_vprintf(out, format, ap);
printw(":\n");
va_end(ap);
new_list = calloc(1, sizeof(curses_list_data_t));
new_list->len = 0;
new_list->singular_noun = singular_noun == NULL ? NULL : strdup(singular_noun);
new_list->plural_noun = plural_noun == NULL ? NULL : strdup(plural_noun);
g_queue_push_tail(priv->parent_q, new_list);
}
G_GNUC_PRINTF(3, 4)
static void
curses_list_item(pcmk__output_t *out, const char *id, const char *format, ...) {
private_data_t *priv = out->priv;
va_list ap;
CRM_ASSERT(priv != NULL);
va_start(ap, format);
if (id != NULL) {
curses_indented_printf(out, "%s: ", id);
vw_printw(stdscr, format, ap);
} else {
curses_indented_vprintf(out, format, ap);
}
addch('\n');
va_end(ap);
out->increment_list(out);
}
static void
curses_increment_list(pcmk__output_t *out) {
private_data_t *priv = out->priv;
gpointer tail;
CRM_ASSERT(priv != NULL);
tail = g_queue_peek_tail(priv->parent_q);
CRM_ASSERT(tail != NULL);
((curses_list_data_t *) tail)->len++;
}
static void
curses_end_list(pcmk__output_t *out) {
private_data_t *priv = out->priv;
curses_list_data_t *node = NULL;
CRM_ASSERT(priv != NULL);
node = g_queue_pop_tail(priv->parent_q);
if (node->singular_noun != NULL && node->plural_noun != NULL) {
if (node->len == 1) {
curses_indented_printf(out, "%d %s found\n", node->len, node->singular_noun);
} else {
curses_indented_printf(out, "%d %s found\n", node->len, node->plural_noun);
}
}
free(node);
}
static bool
curses_is_quiet(pcmk__output_t *out) {
return out->quiet;
}
pcmk__output_t *
crm_mon_mk_curses_output(char **argv) {
pcmk__output_t *retval = calloc(1, sizeof(pcmk__output_t));
if (retval == NULL) {
return NULL;
}
retval->fmt_name = "console";
retval->request = argv == NULL ? NULL : g_strjoinv(" ", argv);
retval->init = curses_init;
retval->free_priv = curses_free_priv;
retval->finish = curses_finish;
retval->reset = curses_reset;
retval->register_message = pcmk__register_message;
retval->message = pcmk__call_message;
retval->subprocess_output = curses_subprocess_output;
retval->version = curses_ver;
retval->err = curses_error;
retval->info = curses_info;
retval->output_xml = curses_output_xml;
retval->begin_list = curses_begin_list;
retval->list_item = curses_list_item;
retval->increment_list = curses_increment_list;
retval->end_list = curses_end_list;
retval->is_quiet = curses_is_quiet;
return retval;
}
G_GNUC_PRINTF(2, 0)
void
curses_indented_vprintf(pcmk__output_t *out, const char *format, va_list args) {
int level = 0;
private_data_t *priv = out->priv;
CRM_ASSERT(priv != NULL);
level = g_queue_get_length(priv->parent_q);
for (int i = 0; i < level; i++) {
printw(" ");
}
if (level > 0) {
printw("* ");
}
vw_printw(stdscr, format, args);
clrtoeol();
refresh();
}
G_GNUC_PRINTF(2, 3)
void
curses_indented_printf(pcmk__output_t *out, const char *format, ...) {
va_list ap;
va_start(ap, format);
curses_indented_vprintf(out, format, ap);
va_end(ap);
}
PCMK__OUTPUT_ARGS("stonith-event", "stonith_history_t *", "gboolean", "gboolean")
static int
stonith_event_console(pcmk__output_t *out, va_list args) {
stonith_history_t *event = va_arg(args, stonith_history_t *);
gboolean full_history = va_arg(args, gboolean);
gboolean later_succeeded = va_arg(args, gboolean);
crm_time_t *crm_when = crm_time_new(NULL);
char *buf = NULL;
crm_time_set_timet(crm_when, &(event->completed));
buf = crm_time_as_string(crm_when, crm_time_log_date | crm_time_log_timeofday | crm_time_log_with_timezone);
switch (event->state) {
case st_failed:
curses_indented_printf(out, "%s of %s failed: delegate=%s, client=%s, origin=%s, %s='%s'%s\n",
stonith_action_str(event->action), event->target,
event->delegate ? event->delegate : "",
event->client, event->origin,
full_history ? "completed" : "last-failed", buf,
later_succeeded ? " (a later attempt succeeded)" : "");
break;
case st_done:
curses_indented_printf(out, "%s of %s successful: delegate=%s, client=%s, origin=%s, %s='%s'\n",
stonith_action_str(event->action), event->target,
event->delegate ? event->delegate : "",
event->client, event->origin,
full_history ? "completed" : "last-successful", buf);
break;
default:
curses_indented_printf(out, "%s of %s pending: client=%s, origin=%s\n",
stonith_action_str(event->action), event->target,
event->client, event->origin);
break;
}
free(buf);
crm_time_free(crm_when);
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("maint-mode")
+PCMK__OUTPUT_ARGS("maint-mode", "unsigned long long")
static int
cluster_maint_mode_console(pcmk__output_t *out, va_list args) {
- printw("\n *** Resource management is DISABLED ***");
- printw("\n The cluster will not attempt to start, stop or recover services");
- printw("\n");
+ unsigned long long flags = va_arg(args, unsigned long long);
+ int rc;
+
+ if (pcmk_is_set(flags, pe_flag_maintenance_mode)) {
+ printw("\n *** Resource management is DISABLED ***");
+ printw("\n The cluster will not attempt to start, stop or recover services");
+ printw("\n");
+ rc = pcmk_rc_ok;
+ } else if (pcmk_is_set(flags, pe_flag_stop_everything)) {
+ printw("\n *** Resource management is DISABLED ***");
+ printw("\n The cluster will keep all resources stopped");
+ printw("\n");
+ rc = pcmk_rc_ok;
+ } else {
+ rc = pcmk_rc_no_output;
+ }
+
clrtoeol();
refresh();
- return pcmk_rc_ok;
+ return rc;
}
static pcmk__message_entry_t fmt_functions[] = {
{ "ban", "console", pe__ban_text },
{ "bundle", "console", pe__bundle_text },
{ "clone", "console", pe__clone_text },
{ "cluster-counts", "console", pe__cluster_counts_text },
{ "cluster-dc", "console", pe__cluster_dc_text },
{ "cluster-options", "console", pe__cluster_options_text },
{ "cluster-stack", "console", pe__cluster_stack_text },
{ "cluster-summary", "console", pe__cluster_summary },
{ "cluster-times", "console", pe__cluster_times_text },
{ "failed-action", "console", pe__failed_action_text },
{ "failed-fencing-history", "console", stonith__failed_history },
{ "fencing-history", "console", stonith__history },
{ "full-fencing-history", "console", stonith__full_history },
{ "group", "console", pe__group_text },
{ "maint-mode", "console", cluster_maint_mode_console },
{ "node", "console", pe__node_text },
{ "node-attribute", "console", pe__node_attribute_text },
{ "node-list", "console", pe__node_list_text },
{ "op-history", "console", pe__op_history_text },
{ "pending-fencing-actions", "console", stonith__pending_actions },
{ "primitive", "console", pe__resource_text },
{ "resource-history", "console", pe__resource_history_text },
{ "stonith-event", "console", stonith_event_console },
{ "ticket", "console", pe__ticket_text },
{ NULL, NULL, NULL }
};
void
crm_mon_register_messages(pcmk__output_t *out) {
pcmk__register_messages(out, fmt_functions);
}
#else
pcmk__output_t *
crm_mon_mk_curses_output(char **argv) {
/* curses was disabled in the build, so fall back to text. */
return pcmk__mk_text_output(argv);
}
G_GNUC_PRINTF(2, 0)
void
curses_indented_vprintf(pcmk__output_t *out, const char *format, va_list args) {
return;
}
G_GNUC_PRINTF(2, 3)
void
curses_indented_printf(pcmk__output_t *out, const char *format, ...) {
return;
}
void
crm_mon_register_messages(pcmk__output_t *out) {
return;
}
#endif
diff --git a/xml/api/crm_mon-2.3.rng b/xml/api/crm_mon-2.3.rng
new file mode 100644
index 0000000000..17aef7e477
--- /dev/null
+++ b/xml/api/crm_mon-2.3.rng
@@ -0,0 +1,410 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ unknown
+ member
+ remote
+ ping
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ granted
+ revoked
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ docker
+ rkt
+ podman
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/xml/crm_mon.rng b/xml/crm_mon.rng
index 355e5d934a..be87fbac84 100644
--- a/xml/crm_mon.rng
+++ b/xml/crm_mon.rng
@@ -1,16 +1,16 @@
-
+