diff --git a/cts/cli/crm_mon-T180.xml b/cts/cli/crm_mon-T180.xml
new file mode 100644
index 0000000000..ab4f24dd22
--- /dev/null
+++ b/cts/cli/crm_mon-T180.xml
@@ -0,0 +1,160 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/cts/cli/regression.crm_mon.exp b/cts/cli/regression.crm_mon.exp
index 7ab27c4d47..cdcef5f51d 100644
--- a/cts/cli/regression.crm_mon.exp
+++ b/cts/cli/regression.crm_mon.exp
@@ -1,4247 +1,4303 @@
=#=#=#= Begin test: Basic text output =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
- * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
+ * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
Active Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
* Fencing (stonith:fence_xvm): Started cluster01
* dummy (ocf:pacemaker:Dummy): Started cluster02
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01
* httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02
* httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped
* Resource Group: exim-group:
* Public-IP (ocf:heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
* Clone Set: mysql-clone-group [mysql-group]:
* Started: [ cluster01 cluster02 ]
* Clone Set: promotable-clone [promotable-rsc] (promotable):
* Promoted: [ cluster02 ]
* Unpromoted: [ cluster01 ]
=#=#=#= End test: Basic text output - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output
=#=#=#= Begin test: XML output =#=#=#=
=#=#=#= End test: XML output - OK (0) =#=#=#=
* Passed: crm_mon - XML output
=#=#=#= Begin test: Basic text output without node section =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Active Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
* Fencing (stonith:fence_xvm): Started cluster01
* dummy (ocf:pacemaker:Dummy): Started cluster02
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01
* httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02
* httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped
* Resource Group: exim-group:
* Public-IP (ocf:heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
* Clone Set: mysql-clone-group [mysql-group]:
* Started: [ cluster01 cluster02 ]
* Clone Set: promotable-clone [promotable-rsc] (promotable):
* Promoted: [ cluster02 ]
* Unpromoted: [ cluster01 ]
=#=#=#= End test: Basic text output without node section - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output without node section
=#=#=#= Begin test: XML output without the node section =#=#=#=
=#=#=#= End test: XML output without the node section - OK (0) =#=#=#=
* Passed: crm_mon - XML output without the node section
=#=#=#= Begin test: Text output with only the node section =#=#=#=
Node List:
* Online: [ cluster01 cluster02 ]
- * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
+ * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
=#=#=#= End test: Text output with only the node section - OK (0) =#=#=#=
* Passed: crm_mon - Text output with only the node section
=#=#=#= Begin test: Complete text output =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
- * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
+ * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
Active Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
* Fencing (stonith:fence_xvm): Started cluster01
* dummy (ocf:pacemaker:Dummy): Started cluster02
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01
* httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02
* httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped
* Resource Group: exim-group:
* Public-IP (ocf:heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
* Clone Set: mysql-clone-group [mysql-group]:
* Started: [ cluster01 cluster02 ]
* Clone Set: promotable-clone [promotable-rsc] (promotable):
* Promoted: [ cluster02 ]
* Unpromoted: [ cluster01 ]
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* dummy: migration-threshold=1000000:
* (18) start
* (19) monitor: interval="60000ms"
* Public-IP: migration-threshold=1000000:
* (2) start
* Email: migration-threshold=1000000:
* (2) start
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* promotable-rsc: migration-threshold=1000000:
* (4) monitor: interval="10000ms"
* (5) cancel: interval="10000ms"
* (6) promote
* (7) monitor: interval="5000ms"
* httpd-bundle-ip-192.168.122.132: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-docker-1: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-1: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="30000ms"
* Node: cluster01:
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
* Fencing: migration-threshold=1000000:
* (15) start
* (20) monitor: interval="60000ms"
* dummy: migration-threshold=1000000:
* (16) stop
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* promotable-rsc: migration-threshold=1000000:
* (2) start
* (4) monitor: interval="10000ms"
* httpd-bundle-ip-192.168.122.131: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-docker-0: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-0: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="30000ms"
- * Node: httpd-bundle-0@cluster01:
+ * Node: httpd-bundle-0:
* httpd: migration-threshold=1000000:
* (1) start
- * Node: httpd-bundle-1@cluster02:
+ * Node: httpd-bundle-1:
* httpd: migration-threshold=1000000:
* (1) start
Negative Location Constraints:
* not-on-cluster1 prevents dummy from running on cluster01
=#=#=#= End test: Complete text output - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output
=#=#=#= Begin test: Complete text output with detail =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (2) (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Node cluster01 (1): online, feature set <3.15.1
* Node cluster02 (2): online, feature set <3.15.1
* GuestNode httpd-bundle-0@cluster01: online
* GuestNode httpd-bundle-1@cluster02: online
* GuestNode httpd-bundle-2@: OFFLINE
Active Resources:
* Clone Set: ping-clone [ping]:
* ping (ocf:pacemaker:ping): Started cluster02
* ping (ocf:pacemaker:ping): Started cluster01
* Fencing (stonith:fence_xvm): Started cluster01
* dummy (ocf:pacemaker:Dummy): Started cluster02
* Container bundle set: httpd-bundle [pcmk:http]:
* Replica[0]
* httpd-bundle-ip-192.168.122.131 (ocf:heartbeat:IPaddr2): Started cluster01
* httpd (ocf:heartbeat:apache): Started httpd-bundle-0
* httpd-bundle-docker-0 (ocf:heartbeat:docker): Started cluster01
* httpd-bundle-0 (ocf:pacemaker:remote): Started cluster01
* Replica[1]
* httpd-bundle-ip-192.168.122.132 (ocf:heartbeat:IPaddr2): Started cluster02
* httpd (ocf:heartbeat:apache): Started httpd-bundle-1
* httpd-bundle-docker-1 (ocf:heartbeat:docker): Started cluster02
* httpd-bundle-1 (ocf:pacemaker:remote): Started cluster02
* Replica[2]
* httpd-bundle-ip-192.168.122.133 (ocf:heartbeat:IPaddr2): Stopped
* httpd (ocf:heartbeat:apache): Stopped
* httpd-bundle-docker-2 (ocf:heartbeat:docker): Stopped
* httpd-bundle-2 (ocf:pacemaker:remote): Stopped
* Resource Group: exim-group:
* Public-IP (ocf:heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
* Clone Set: mysql-clone-group [mysql-group]:
* Resource Group: mysql-group:0:
* mysql-proxy (lsb:mysql-proxy): Started cluster02
* Resource Group: mysql-group:1:
* mysql-proxy (lsb:mysql-proxy): Started cluster01
* Clone Set: promotable-clone [promotable-rsc] (promotable):
* promotable-rsc (ocf:pacemaker:Stateful): Promoted cluster02
* promotable-rsc (ocf:pacemaker:Stateful): Unpromoted cluster01
* promotable-rsc (ocf:pacemaker:Stateful): Stopped
* promotable-rsc (ocf:pacemaker:Stateful): Stopped
* promotable-rsc (ocf:pacemaker:Stateful): Stopped
Node Attributes:
* Node: cluster01 (1):
* location : office
* pingd : 1000
* Node: cluster02 (2):
* pingd : 1000
Operations:
* Node: cluster02 (2):
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* dummy: migration-threshold=1000000:
* (18) start
* (19) monitor: interval="60000ms"
* Public-IP: migration-threshold=1000000:
* (2) start
* Email: migration-threshold=1000000:
* (2) start
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* promotable-rsc: migration-threshold=1000000:
* (4) monitor: interval="10000ms"
* (5) cancel: interval="10000ms"
* (6) promote
* (7) monitor: interval="5000ms"
* httpd-bundle-ip-192.168.122.132: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-docker-1: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-1: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="30000ms"
* Node: cluster01 (1):
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
* Fencing: migration-threshold=1000000:
* (15) start
* (20) monitor: interval="60000ms"
* dummy: migration-threshold=1000000:
* (16) stop
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* promotable-rsc: migration-threshold=1000000:
* (2) start
* (4) monitor: interval="10000ms"
* httpd-bundle-ip-192.168.122.131: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-docker-0: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-0: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="30000ms"
* Node: httpd-bundle-0@cluster01:
* httpd: migration-threshold=1000000:
* (1) start
* Node: httpd-bundle-1@cluster02:
* httpd: migration-threshold=1000000:
* (1) start
Negative Location Constraints:
* not-on-cluster1 prevents dummy from running on cluster01 (1)
=#=#=#= End test: Complete text output with detail - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output with detail
=#=#=#= Begin test: Complete brief text output =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
- * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
+ * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
Active Resources:
* 1 (ocf:pacemaker:Dummy): Active cluster02
* 1 (stonith:fence_xvm): Active cluster01
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01
* httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02
* httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped
* Resource Group: exim-group:
* 1/1 (lsb:exim): Active cluster02
* 1/1 (ocf:heartbeat:IPaddr): Active cluster02
* Clone Set: mysql-clone-group [mysql-group]:
* Started: [ cluster01 cluster02 ]
* Clone Set: promotable-clone [promotable-rsc] (promotable):
* Promoted: [ cluster02 ]
* Unpromoted: [ cluster01 ]
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* dummy: migration-threshold=1000000:
* (18) start
* (19) monitor: interval="60000ms"
* Public-IP: migration-threshold=1000000:
* (2) start
* Email: migration-threshold=1000000:
* (2) start
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* promotable-rsc: migration-threshold=1000000:
* (4) monitor: interval="10000ms"
* (5) cancel: interval="10000ms"
* (6) promote
* (7) monitor: interval="5000ms"
* httpd-bundle-ip-192.168.122.132: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-docker-1: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-1: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="30000ms"
* Node: cluster01:
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
* Fencing: migration-threshold=1000000:
* (15) start
* (20) monitor: interval="60000ms"
* dummy: migration-threshold=1000000:
* (16) stop
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* promotable-rsc: migration-threshold=1000000:
* (2) start
* (4) monitor: interval="10000ms"
* httpd-bundle-ip-192.168.122.131: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-docker-0: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-0: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="30000ms"
- * Node: httpd-bundle-0@cluster01:
+ * Node: httpd-bundle-0:
* httpd: migration-threshold=1000000:
* (1) start
- * Node: httpd-bundle-1@cluster02:
+ * Node: httpd-bundle-1:
* httpd: migration-threshold=1000000:
* (1) start
Negative Location Constraints:
* not-on-cluster1 prevents dummy from running on cluster01
=#=#=#= End test: Complete brief text output - OK (0) =#=#=#=
* Passed: crm_mon - Complete brief text output
=#=#=#= Begin test: Complete text output grouped by node =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Node cluster01: online:
* Resources:
* ping (ocf:pacemaker:ping): Started
* Fencing (stonith:fence_xvm): Started
* mysql-proxy (lsb:mysql-proxy): Started
* promotable-rsc (ocf:pacemaker:Stateful): Unpromoted
* httpd-bundle-ip-192.168.122.131 (ocf:heartbeat:IPaddr2): Started
* httpd-bundle-docker-0 (ocf:heartbeat:docker): Started
* Node cluster02: online:
* Resources:
* ping (ocf:pacemaker:ping): Started
* dummy (ocf:pacemaker:Dummy): Started
* Public-IP (ocf:heartbeat:IPaddr): Started
* Email (lsb:exim): Started
* mysql-proxy (lsb:mysql-proxy): Started
* promotable-rsc (ocf:pacemaker:Stateful): Promoted
* httpd-bundle-ip-192.168.122.132 (ocf:heartbeat:IPaddr2): Started
* httpd-bundle-docker-1 (ocf:heartbeat:docker): Started
- * GuestNode httpd-bundle-0@cluster01: online:
+ * GuestNode httpd-bundle-0: online:
* Resources:
* httpd (ocf:heartbeat:apache): Started
- * GuestNode httpd-bundle-1@cluster02: online:
+ * GuestNode httpd-bundle-1: online:
* Resources:
* httpd (ocf:heartbeat:apache): Started
- * GuestNode httpd-bundle-2@: OFFLINE:
+ * GuestNode httpd-bundle-2: OFFLINE:
* Resources:
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* dummy: migration-threshold=1000000:
* (18) start
* (19) monitor: interval="60000ms"
* Public-IP: migration-threshold=1000000:
* (2) start
* Email: migration-threshold=1000000:
* (2) start
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* promotable-rsc: migration-threshold=1000000:
* (4) monitor: interval="10000ms"
* (5) cancel: interval="10000ms"
* (6) promote
* (7) monitor: interval="5000ms"
* httpd-bundle-ip-192.168.122.132: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-docker-1: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-1: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="30000ms"
* Node: cluster01:
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
* Fencing: migration-threshold=1000000:
* (15) start
* (20) monitor: interval="60000ms"
* dummy: migration-threshold=1000000:
* (16) stop
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* promotable-rsc: migration-threshold=1000000:
* (2) start
* (4) monitor: interval="10000ms"
* httpd-bundle-ip-192.168.122.131: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-docker-0: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-0: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="30000ms"
- * Node: httpd-bundle-0@cluster01:
+ * Node: httpd-bundle-0:
* httpd: migration-threshold=1000000:
* (1) start
- * Node: httpd-bundle-1@cluster02:
+ * Node: httpd-bundle-1:
* httpd: migration-threshold=1000000:
* (1) start
Negative Location Constraints:
* not-on-cluster1 prevents dummy from running on cluster01
=#=#=#= End test: Complete text output grouped by node - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output grouped by node
=#=#=#= Begin test: Complete brief text output grouped by node =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Node cluster01: online:
* Resources:
* 1 (lsb:mysql-proxy): Active
* 1 (ocf:heartbeat:IPaddr2): Active
* 1 (ocf:heartbeat:docker): Active
* 1 (ocf:pacemaker:Stateful): Active
* 1 (ocf:pacemaker:ping): Active
* 1 (ocf:pacemaker:remote): Active
* 1 (stonith:fence_xvm): Active
* Node cluster02: online:
* Resources:
* 1 (lsb:exim): Active
* 1 (lsb:mysql-proxy): Active
* 1 (ocf:heartbeat:IPaddr): Active
* 1 (ocf:heartbeat:IPaddr2): Active
* 1 (ocf:heartbeat:docker): Active
* 1 (ocf:pacemaker:Dummy): Active
* 1 (ocf:pacemaker:Stateful): Active
* 1 (ocf:pacemaker:ping): Active
* 1 (ocf:pacemaker:remote): Active
- * GuestNode httpd-bundle-0@cluster01: online:
+ * GuestNode httpd-bundle-0: online:
* Resources:
* 1 (ocf:heartbeat:apache): Active
- * GuestNode httpd-bundle-1@cluster02: online:
+ * GuestNode httpd-bundle-1: online:
* Resources:
* 1 (ocf:heartbeat:apache): Active
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* dummy: migration-threshold=1000000:
* (18) start
* (19) monitor: interval="60000ms"
* Public-IP: migration-threshold=1000000:
* (2) start
* Email: migration-threshold=1000000:
* (2) start
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* promotable-rsc: migration-threshold=1000000:
* (4) monitor: interval="10000ms"
* (5) cancel: interval="10000ms"
* (6) promote
* (7) monitor: interval="5000ms"
* httpd-bundle-ip-192.168.122.132: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-docker-1: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-1: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="30000ms"
* Node: cluster01:
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
* Fencing: migration-threshold=1000000:
* (15) start
* (20) monitor: interval="60000ms"
* dummy: migration-threshold=1000000:
* (16) stop
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* promotable-rsc: migration-threshold=1000000:
* (2) start
* (4) monitor: interval="10000ms"
* httpd-bundle-ip-192.168.122.131: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-docker-0: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-0: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="30000ms"
- * Node: httpd-bundle-0@cluster01:
+ * Node: httpd-bundle-0:
* httpd: migration-threshold=1000000:
* (1) start
- * Node: httpd-bundle-1@cluster02:
+ * Node: httpd-bundle-1:
* httpd: migration-threshold=1000000:
* (1) start
Negative Location Constraints:
* not-on-cluster1 prevents dummy from running on cluster01
=#=#=#= End test: Complete brief text output grouped by node - OK (0) =#=#=#=
* Passed: crm_mon - Complete brief text output grouped by node
=#=#=#= Begin test: XML output grouped by node =#=#=#=
=#=#=#= End test: XML output grouped by node - OK (0) =#=#=#=
* Passed: crm_mon - XML output grouped by node
=#=#=#= Begin test: Complete text output filtered by node =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 ]
Active Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 ]
* Fencing (stonith:fence_xvm): Started cluster01
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01
* httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped
* Clone Set: mysql-clone-group [mysql-group]:
* Started: [ cluster01 ]
* Clone Set: promotable-clone [promotable-rsc] (promotable):
* Unpromoted: [ cluster01 ]
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
Operations:
* Node: cluster01:
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
* Fencing: migration-threshold=1000000:
* (15) start
* (20) monitor: interval="60000ms"
* dummy: migration-threshold=1000000:
* (16) stop
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* promotable-rsc: migration-threshold=1000000:
* (2) start
* (4) monitor: interval="10000ms"
* httpd-bundle-ip-192.168.122.131: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-docker-0: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-0: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="30000ms"
Negative Location Constraints:
* not-on-cluster1 prevents dummy from running on cluster01
=#=#=#= End test: Complete text output filtered by node - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output filtered by node
=#=#=#= Begin test: XML output filtered by node =#=#=#=
=#=#=#= End test: XML output filtered by node - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by node
=#=#=#= Begin test: Complete text output filtered by tag =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster02 ]
Active Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster02 ]
* dummy (ocf:pacemaker:Dummy): Started cluster02
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02
* httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped
* Resource Group: exim-group:
* Public-IP (ocf:heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
* Clone Set: mysql-clone-group [mysql-group]:
* Started: [ cluster02 ]
* Clone Set: promotable-clone [promotable-rsc] (promotable):
* Promoted: [ cluster02 ]
Node Attributes:
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* dummy: migration-threshold=1000000:
* (18) start
* (19) monitor: interval="60000ms"
* Public-IP: migration-threshold=1000000:
* (2) start
* Email: migration-threshold=1000000:
* (2) start
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* promotable-rsc: migration-threshold=1000000:
* (4) monitor: interval="10000ms"
* (5) cancel: interval="10000ms"
* (6) promote
* (7) monitor: interval="5000ms"
* httpd-bundle-ip-192.168.122.132: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-docker-1: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-1: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="30000ms"
Negative Location Constraints:
* not-on-cluster1 prevents dummy from running on cluster01
=#=#=#= End test: Complete text output filtered by tag - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output filtered by tag
=#=#=#= Begin test: XML output filtered by tag =#=#=#=
=#=#=#= End test: XML output filtered by tag - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by tag
=#=#=#= Begin test: Complete text output filtered by resource tag =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
- * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
+ * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
Active Resources:
* Fencing (stonith:fence_xvm): Started cluster01
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster01:
* Fencing: migration-threshold=1000000:
* (15) start
* (20) monitor: interval="60000ms"
=#=#=#= End test: Complete text output filtered by resource tag - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output filtered by resource tag
=#=#=#= Begin test: XML output filtered by resource tag =#=#=#=
=#=#=#= End test: XML output filtered by resource tag - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by resource tag
=#=#=#= Begin test: Basic text output filtered by node that doesn't exist =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Active Resources:
* No active resources
=#=#=#= End test: Basic text output filtered by node that doesn't exist - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output filtered by node that doesn't exist
=#=#=#= Begin test: XML output filtered by node that doesn't exist =#=#=#=
=#=#=#= End test: XML output filtered by node that doesn't exist - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by node that doesn't exist
=#=#=#= Begin test: Basic text output with inactive resources =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
- * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
+ * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
Full List of Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
* Fencing (stonith:fence_xvm): Started cluster01
* dummy (ocf:pacemaker:Dummy): Started cluster02
* Clone Set: inactive-clone [inactive-dhcpd] (disabled):
* Stopped (disabled): [ cluster01 cluster02 ]
* Resource Group: inactive-group (disabled):
* inactive-dummy-1 (ocf:pacemaker:Dummy): Stopped (disabled)
* inactive-dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled)
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01
* httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02
* httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped
* Resource Group: exim-group:
* Public-IP (ocf:heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
* Clone Set: mysql-clone-group [mysql-group]:
* Started: [ cluster01 cluster02 ]
* Clone Set: promotable-clone [promotable-rsc] (promotable):
* Promoted: [ cluster02 ]
* Unpromoted: [ cluster01 ]
=#=#=#= End test: Basic text output with inactive resources - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output with inactive resources
=#=#=#= Begin test: Basic text output with inactive resources, filtered by node =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster02 ]
Full List of Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster02 ]
* dummy (ocf:pacemaker:Dummy): Started cluster02
* Clone Set: inactive-clone [inactive-dhcpd] (disabled):
* Stopped (disabled): [ cluster02 ]
* Resource Group: inactive-group (disabled):
* inactive-dummy-1 (ocf:pacemaker:Dummy): Stopped (disabled)
* inactive-dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled)
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02
* httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped
* Resource Group: exim-group:
* Public-IP (ocf:heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
* Clone Set: mysql-clone-group [mysql-group]:
* Started: [ cluster02 ]
* Clone Set: promotable-clone [promotable-rsc] (promotable):
* Promoted: [ cluster02 ]
=#=#=#= End test: Basic text output with inactive resources, filtered by node - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output with inactive resources, filtered by node
=#=#=#= Begin test: Complete text output filtered by primitive resource =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
- * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
+ * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
Active Resources:
* Fencing (stonith:fence_xvm): Started cluster01
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster01:
* Fencing: migration-threshold=1000000:
* (15) start
* (20) monitor: interval="60000ms"
=#=#=#= End test: Complete text output filtered by primitive resource - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output filtered by primitive resource
=#=#=#= Begin test: XML output filtered by primitive resource =#=#=#=
=#=#=#= End test: XML output filtered by primitive resource - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by primitive resource
=#=#=#= Begin test: Complete text output filtered by group resource =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
- * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
+ * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
Active Resources:
* Resource Group: exim-group:
* Public-IP (ocf:heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* Public-IP: migration-threshold=1000000:
* (2) start
* Email: migration-threshold=1000000:
* (2) start
=#=#=#= End test: Complete text output filtered by group resource - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output filtered by group resource
=#=#=#= Begin test: XML output filtered by group resource =#=#=#=
=#=#=#= End test: XML output filtered by group resource - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by group resource
=#=#=#= Begin test: Complete text output filtered by group resource member =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
- * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
+ * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
Active Resources:
* Resource Group: exim-group:
* Public-IP (ocf:heartbeat:IPaddr): Started cluster02
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* Public-IP: migration-threshold=1000000:
* (2) start
=#=#=#= End test: Complete text output filtered by group resource member - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output filtered by group resource member
=#=#=#= Begin test: XML output filtered by group resource member =#=#=#=
=#=#=#= End test: XML output filtered by group resource member - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by group resource member
=#=#=#= Begin test: Complete text output filtered by clone resource =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
- * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
+ * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
Active Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* Node: cluster01:
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
=#=#=#= End test: Complete text output filtered by clone resource - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output filtered by clone resource
=#=#=#= Begin test: XML output filtered by clone resource =#=#=#=
=#=#=#= End test: XML output filtered by clone resource - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by clone resource
=#=#=#= Begin test: Complete text output filtered by clone resource instance =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
- * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
+ * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
Active Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* Node: cluster01:
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
=#=#=#= End test: Complete text output filtered by clone resource instance - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output filtered by clone resource instance
=#=#=#= Begin test: XML output filtered by clone resource instance =#=#=#=
=#=#=#= End test: XML output filtered by clone resource instance - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by clone resource instance
=#=#=#= Begin test: Complete text output filtered by exact clone resource instance =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (2) (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Node cluster01 (1): online, feature set <3.15.1
* Node cluster02 (2): online, feature set <3.15.1
* GuestNode httpd-bundle-0@cluster01: online
* GuestNode httpd-bundle-1@cluster02: online
* GuestNode httpd-bundle-2@: OFFLINE
Active Resources:
* Clone Set: ping-clone [ping]:
* ping (ocf:pacemaker:ping): Started cluster02
Node Attributes:
* Node: cluster01 (1):
* location : office
* pingd : 1000
* Node: cluster02 (2):
* pingd : 1000
Operations:
* Node: cluster02 (2):
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* Node: cluster01 (1):
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
=#=#=#= End test: Complete text output filtered by exact clone resource instance - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output filtered by exact clone resource instance
=#=#=#= Begin test: XML output filtered by exact clone resource instance =#=#=#=
=#=#=#= End test: XML output filtered by exact clone resource instance - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by exact clone resource instance
=#=#=#= Begin test: Basic text output filtered by resource that doesn't exist =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
- * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
+ * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
Active Resources:
* No active resources
=#=#=#= End test: Basic text output filtered by resource that doesn't exist - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output filtered by resource that doesn't exist
=#=#=#= Begin test: XML output filtered by resource that doesn't exist =#=#=#=
=#=#=#= End test: XML output filtered by resource that doesn't exist - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by resource that doesn't exist
=#=#=#= Begin test: Basic text output with inactive resources, filtered by tag =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
- * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
+ * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
Full List of Resources:
* Clone Set: inactive-clone [inactive-dhcpd] (disabled):
* Stopped (disabled): [ cluster01 cluster02 ]
* Resource Group: inactive-group (disabled):
* inactive-dummy-1 (ocf:pacemaker:Dummy): Stopped (disabled)
* inactive-dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled)
=#=#=#= End test: Basic text output with inactive resources, filtered by tag - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output with inactive resources, filtered by tag
=#=#=#= Begin test: Basic text output with inactive resources, filtered by bundle resource =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
- * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
+ * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
Full List of Resources:
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01
* httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02
* httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped
=#=#=#= End test: Basic text output with inactive resources, filtered by bundle resource - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output with inactive resources, filtered by bundle resource
=#=#=#= Begin test: XML output filtered by inactive bundle resource =#=#=#=
=#=#=#= End test: XML output filtered by inactive bundle resource - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by inactive bundle resource
=#=#=#= Begin test: Basic text output with inactive resources, filtered by bundled IP address resource =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
- * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
+ * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
Full List of Resources:
* Container bundle set: httpd-bundle [pcmk:http]:
* Replica[0]
* httpd-bundle-ip-192.168.122.131 (ocf:heartbeat:IPaddr2): Started cluster01
=#=#=#= End test: Basic text output with inactive resources, filtered by bundled IP address resource - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output with inactive resources, filtered by bundled IP address resource
=#=#=#= Begin test: XML output filtered by bundled IP address resource =#=#=#=
=#=#=#= End test: XML output filtered by bundled IP address resource - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by bundled IP address resource
=#=#=#= Begin test: Basic text output with inactive resources, filtered by bundled container =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
- * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
+ * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
Full List of Resources:
* Container bundle set: httpd-bundle [pcmk:http]:
* Replica[1]
* httpd-bundle-docker-1 (ocf:heartbeat:docker): Started cluster02
=#=#=#= End test: Basic text output with inactive resources, filtered by bundled container - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output with inactive resources, filtered by bundled container
=#=#=#= Begin test: XML output filtered by bundled container =#=#=#=
=#=#=#= End test: XML output filtered by bundled container - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by bundled container
=#=#=#= Begin test: Basic text output with inactive resources, filtered by bundle connection =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
- * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
+ * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
Full List of Resources:
* Container bundle set: httpd-bundle [pcmk:http]:
* Replica[0]
* httpd-bundle-0 (ocf:pacemaker:remote): Started cluster01
=#=#=#= End test: Basic text output with inactive resources, filtered by bundle connection - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output with inactive resources, filtered by bundle connection
=#=#=#= Begin test: XML output filtered by bundle connection =#=#=#=
=#=#=#= End test: XML output filtered by bundle connection - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by bundle connection
=#=#=#= Begin test: Basic text output with inactive resources, filtered by bundled primitive resource =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
- * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
+ * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
Full List of Resources:
* Container bundle set: httpd-bundle [pcmk:http]:
* Replica[0]
* httpd (ocf:heartbeat:apache): Started httpd-bundle-0
* Replica[1]
* httpd (ocf:heartbeat:apache): Started httpd-bundle-1
* Replica[2]
* httpd (ocf:heartbeat:apache): Stopped
=#=#=#= End test: Basic text output with inactive resources, filtered by bundled primitive resource - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output with inactive resources, filtered by bundled primitive resource
=#=#=#= Begin test: XML output filtered by bundled primitive resource =#=#=#=
=#=#=#= End test: XML output filtered by bundled primitive resource - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by bundled primitive resource
=#=#=#= Begin test: Complete text output, filtered by clone name in cloned group =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (2) (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Node cluster01 (1): online, feature set <3.15.1
* Node cluster02 (2): online, feature set <3.15.1
* GuestNode httpd-bundle-0@cluster01: online
* GuestNode httpd-bundle-1@cluster02: online
* GuestNode httpd-bundle-2@: OFFLINE
Active Resources:
* Clone Set: mysql-clone-group [mysql-group]:
* Resource Group: mysql-group:0:
* mysql-proxy (lsb:mysql-proxy): Started cluster02
* Resource Group: mysql-group:1:
* mysql-proxy (lsb:mysql-proxy): Started cluster01
Node Attributes:
* Node: cluster01 (1):
* location : office
* pingd : 1000
* Node: cluster02 (2):
* pingd : 1000
Operations:
* Node: cluster02 (2):
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* Node: cluster01 (1):
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
=#=#=#= End test: Complete text output, filtered by clone name in cloned group - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output, filtered by clone name in cloned group
=#=#=#= Begin test: XML output, filtered by clone name in cloned group =#=#=#=
=#=#=#= End test: XML output, filtered by clone name in cloned group - OK (0) =#=#=#=
* Passed: crm_mon - XML output, filtered by clone name in cloned group
=#=#=#= Begin test: Complete text output, filtered by group name in cloned group =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (2) (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Node cluster01 (1): online, feature set <3.15.1
* Node cluster02 (2): online, feature set <3.15.1
* GuestNode httpd-bundle-0@cluster01: online
* GuestNode httpd-bundle-1@cluster02: online
* GuestNode httpd-bundle-2@: OFFLINE
Active Resources:
* Clone Set: mysql-clone-group [mysql-group]:
* Resource Group: mysql-group:0:
* mysql-proxy (lsb:mysql-proxy): Started cluster02
* Resource Group: mysql-group:1:
* mysql-proxy (lsb:mysql-proxy): Started cluster01
Node Attributes:
* Node: cluster01 (1):
* location : office
* pingd : 1000
* Node: cluster02 (2):
* pingd : 1000
Operations:
* Node: cluster02 (2):
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* Node: cluster01 (1):
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
=#=#=#= End test: Complete text output, filtered by group name in cloned group - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output, filtered by group name in cloned group
=#=#=#= Begin test: XML output, filtered by group name in cloned group =#=#=#=
=#=#=#= End test: XML output, filtered by group name in cloned group - OK (0) =#=#=#=
* Passed: crm_mon - XML output, filtered by group name in cloned group
=#=#=#= Begin test: Complete text output, filtered by exact group instance name in cloned group =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (2) (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Node cluster01 (1): online, feature set <3.15.1
* Node cluster02 (2): online, feature set <3.15.1
* GuestNode httpd-bundle-0@cluster01: online
* GuestNode httpd-bundle-1@cluster02: online
* GuestNode httpd-bundle-2@: OFFLINE
Active Resources:
* Clone Set: mysql-clone-group [mysql-group]:
* Resource Group: mysql-group:1:
* mysql-proxy (lsb:mysql-proxy): Started cluster01
Node Attributes:
* Node: cluster01 (1):
* location : office
* pingd : 1000
* Node: cluster02 (2):
* pingd : 1000
Operations:
* Node: cluster02 (2):
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* Node: cluster01 (1):
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
=#=#=#= End test: Complete text output, filtered by exact group instance name in cloned group - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output, filtered by exact group instance name in cloned group
=#=#=#= Begin test: XML output, filtered by exact group instance name in cloned group =#=#=#=
=#=#=#= End test: XML output, filtered by exact group instance name in cloned group - OK (0) =#=#=#=
* Passed: crm_mon - XML output, filtered by exact group instance name in cloned group
=#=#=#= Begin test: Complete text output, filtered by primitive name in cloned group =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (2) (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Node cluster01 (1): online, feature set <3.15.1
* Node cluster02 (2): online, feature set <3.15.1
* GuestNode httpd-bundle-0@cluster01: online
* GuestNode httpd-bundle-1@cluster02: online
* GuestNode httpd-bundle-2@: OFFLINE
Active Resources:
* Clone Set: mysql-clone-group [mysql-group]:
* Resource Group: mysql-group:0:
* mysql-proxy (lsb:mysql-proxy): Started cluster02
* Resource Group: mysql-group:1:
* mysql-proxy (lsb:mysql-proxy): Started cluster01
Node Attributes:
* Node: cluster01 (1):
* location : office
* pingd : 1000
* Node: cluster02 (2):
* pingd : 1000
Operations:
* Node: cluster02 (2):
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* Node: cluster01 (1):
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
=#=#=#= End test: Complete text output, filtered by primitive name in cloned group - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output, filtered by primitive name in cloned group
=#=#=#= Begin test: XML output, filtered by primitive name in cloned group =#=#=#=
=#=#=#= End test: XML output, filtered by primitive name in cloned group - OK (0) =#=#=#=
* Passed: crm_mon - XML output, filtered by primitive name in cloned group
=#=#=#= Begin test: Complete text output, filtered by exact primitive instance name in cloned group =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (2) (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Node cluster01 (1): online, feature set <3.15.1
* Node cluster02 (2): online, feature set <3.15.1
* GuestNode httpd-bundle-0@cluster01: online
* GuestNode httpd-bundle-1@cluster02: online
* GuestNode httpd-bundle-2@: OFFLINE
Active Resources:
* Clone Set: mysql-clone-group [mysql-group]:
* Resource Group: mysql-group:1:
* mysql-proxy (lsb:mysql-proxy): Started cluster01
Node Attributes:
* Node: cluster01 (1):
* location : office
* pingd : 1000
* Node: cluster02 (2):
* pingd : 1000
Operations:
* Node: cluster02 (2):
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* Node: cluster01 (1):
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
=#=#=#= End test: Complete text output, filtered by exact primitive instance name in cloned group - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output, filtered by exact primitive instance name in cloned group
=#=#=#= Begin test: XML output, filtered by exact primitive instance name in cloned group =#=#=#=
=#=#=#= End test: XML output, filtered by exact primitive instance name in cloned group - OK (0) =#=#=#=
* Passed: crm_mon - XML output, filtered by exact primitive instance name in cloned group
=#=#=#= Begin test: Text output of partially active resources =#=#=#=
unpack_rsc_op error: Preventing dummy-2 from restarting on cluster02 because of hard failure (unimplemented feature)| rc=3 id=dummy-2_last_failure_0
unpack_rsc_op error: Preventing httpd-bundle-clone from restarting on httpd-bundle-1 because of hard failure (invalid parameter)| rc=2 id=httpd_last_failure_0
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (2) (version) - partition with quorum
* Last updated:
* Last change:
* 4 nodes configured
* 16 resource instances configured (1 DISABLED)
Node List:
* Node cluster01 (1): online, feature set <3.15.1
* Node cluster02 (2): online, feature set <3.15.1
* GuestNode httpd-bundle-0@cluster02: online
* GuestNode httpd-bundle-1@cluster01: online
Active Resources:
* Clone Set: ping-clone [ping]:
* ping (ocf:pacemaker:ping): Started cluster01
* ping (ocf:pacemaker:ping): Stopped (not installed)
* Fencing (stonith:fence_xvm): Started cluster01
* Container bundle set: httpd-bundle [pcmk:http]:
* Replica[0]
* httpd-bundle-ip-192.168.122.131 (ocf:heartbeat:IPaddr2): Started cluster02
* httpd (ocf:heartbeat:apache): Started httpd-bundle-0
* httpd-bundle-docker-0 (ocf:heartbeat:docker): Started cluster02
* httpd-bundle-0 (ocf:pacemaker:remote): Started cluster02
* Replica[1]
* httpd-bundle-ip-192.168.122.132 (ocf:heartbeat:IPaddr2): Started cluster01
* httpd (ocf:heartbeat:apache): FAILED httpd-bundle-1
* httpd-bundle-docker-1 (ocf:heartbeat:docker): Started cluster01
* httpd-bundle-1 (ocf:pacemaker:remote): Started cluster01
* Resource Group: partially-active-group (2 members inactive):
* dummy-1 (ocf:pacemaker:Dummy): Started cluster02
* dummy-2 (ocf:pacemaker:Dummy): FAILED cluster02
Failed Resource Actions:
* dummy-2_monitor_0 on cluster02 'unimplemented feature' (3): call=2, status='complete', queued=0ms, exec=33ms
=#=#=#= End test: Text output of partially active resources - OK (0) =#=#=#=
* Passed: crm_mon - Text output of partially active resources
=#=#=#= Begin test: XML output of partially active resources =#=#=#=
unpack_rsc_op error: Preventing dummy-2 from restarting on cluster02 because of hard failure (unimplemented feature)| rc=3 id=dummy-2_last_failure_0
unpack_rsc_op error: Preventing httpd-bundle-clone from restarting on httpd-bundle-1 because of hard failure (invalid parameter)| rc=2 id=httpd_last_failure_0
=#=#=#= End test: XML output of partially active resources - OK (0) =#=#=#=
* Passed: crm_mon - XML output of partially active resources
=#=#=#= Begin test: Text output of partially active resources, with inactive resources =#=#=#=
unpack_rsc_op error: Preventing dummy-2 from restarting on cluster02 because of hard failure (unimplemented feature)| rc=3 id=dummy-2_last_failure_0
unpack_rsc_op error: Preventing httpd-bundle-clone from restarting on httpd-bundle-1 because of hard failure (invalid parameter)| rc=2 id=httpd_last_failure_0
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (2) (version) - partition with quorum
* Last updated:
* Last change:
* 4 nodes configured
* 16 resource instances configured (1 DISABLED)
Node List:
* Node cluster01 (1): online, feature set <3.15.1
* Node cluster02 (2): online, feature set <3.15.1
* GuestNode httpd-bundle-0@cluster02: online
* GuestNode httpd-bundle-1@cluster01: online
Full List of Resources:
* Clone Set: ping-clone [ping]:
* ping (ocf:pacemaker:ping): Started cluster01
* ping (ocf:pacemaker:ping): Stopped (not installed)
* Fencing (stonith:fence_xvm): Started cluster01
* Container bundle set: httpd-bundle [pcmk:http]:
* Replica[0]
* httpd-bundle-ip-192.168.122.131 (ocf:heartbeat:IPaddr2): Started cluster02
* httpd (ocf:heartbeat:apache): Started httpd-bundle-0
* httpd-bundle-docker-0 (ocf:heartbeat:docker): Started cluster02
* httpd-bundle-0 (ocf:pacemaker:remote): Started cluster02
* Replica[1]
* httpd-bundle-ip-192.168.122.132 (ocf:heartbeat:IPaddr2): Started cluster01
* httpd (ocf:heartbeat:apache): FAILED httpd-bundle-1
* httpd-bundle-docker-1 (ocf:heartbeat:docker): Started cluster01
* httpd-bundle-1 (ocf:pacemaker:remote): Started cluster01
* Resource Group: partially-active-group:
* dummy-1 (ocf:pacemaker:Dummy): Started cluster02
* dummy-2 (ocf:pacemaker:Dummy): FAILED cluster02
* dummy-3 (ocf:pacemaker:Dummy): Stopped (disabled)
* dummy-4 (ocf:pacemaker:Dummy): Stopped (not installed)
* smart-mon (ocf:pacemaker:HealthSMART): Stopped (not installed)
Failed Resource Actions:
* dummy-2_monitor_0 on cluster02 'unimplemented feature' (3): call=2, status='complete', queued=0ms, exec=33ms
=#=#=#= End test: Text output of partially active resources, with inactive resources - OK (0) =#=#=#=
* Passed: crm_mon - Text output of partially active resources, with inactive resources
=#=#=#= Begin test: Complete brief text output, with inactive resources =#=#=#=
unpack_rsc_op error: Preventing dummy-2 from restarting on cluster02 because of hard failure (unimplemented feature)| rc=3 id=dummy-2_last_failure_0
unpack_rsc_op error: Preventing httpd-bundle-clone from restarting on httpd-bundle-1 because of hard failure (invalid parameter)| rc=2 id=httpd_last_failure_0
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (2) (version) - partition with quorum
* Last updated:
* Last change:
* 4 nodes configured
* 16 resource instances configured (1 DISABLED)
Node List:
* Node cluster01 (1): online, feature set <3.15.1
* Node cluster02 (2): online, feature set <3.15.1
* GuestNode httpd-bundle-0@cluster02: online
* GuestNode httpd-bundle-1@cluster01: online
Full List of Resources:
* 0/1 (ocf:pacemaker:HealthSMART): Active
* 1/1 (stonith:fence_xvm): Active cluster01
* Clone Set: ping-clone [ping]:
* ping (ocf:pacemaker:ping): Started cluster01
* ping (ocf:pacemaker:ping): Stopped (not installed)
* Container bundle set: httpd-bundle [pcmk:http]:
* Replica[0]
* httpd-bundle-ip-192.168.122.131 (ocf:heartbeat:IPaddr2): Started cluster02
* httpd (ocf:heartbeat:apache): Started httpd-bundle-0
* httpd-bundle-docker-0 (ocf:heartbeat:docker): Started cluster02
* httpd-bundle-0 (ocf:pacemaker:remote): Started cluster02
* Replica[1]
* httpd-bundle-ip-192.168.122.132 (ocf:heartbeat:IPaddr2): Started cluster01
* httpd (ocf:heartbeat:apache): FAILED httpd-bundle-1
* httpd-bundle-docker-1 (ocf:heartbeat:docker): Started cluster01
* httpd-bundle-1 (ocf:pacemaker:remote): Started cluster01
* Resource Group: partially-active-group:
* 2/4 (ocf:pacemaker:Dummy): Active cluster02
Node Attributes:
* Node: cluster01 (1):
* pingd : 1000
* Node: cluster02 (2):
* pingd : 1000
Operations:
* Node: cluster02 (2):
* httpd-bundle-ip-192.168.122.131: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-docker-0: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-0: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="30000ms"
* dummy-1: migration-threshold=1000000:
* (2) start
* dummy-2: migration-threshold=1000000:
* (2) probe
* dummy-4: migration-threshold=1000000:
* (2) probe
* smart-mon: migration-threshold=1000000:
* (9) probe
* ping: migration-threshold=1000000:
* (6) probe
* Node: cluster01 (1):
* Fencing: migration-threshold=1000000:
* (15) start
* (20) monitor: interval="60000ms"
* ping: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* httpd-bundle-ip-192.168.122.132: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-docker-1: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-1: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="30000ms"
* Node: httpd-bundle-0@cluster02:
* httpd: migration-threshold=1000000:
* (1) start
* Node: httpd-bundle-1@cluster01:
* httpd: migration-threshold=1000000:
* (1) probe
Failed Resource Actions:
* dummy-2_monitor_0 on cluster02 'unimplemented feature' (3): call=2, status='complete', queued=0ms, exec=33ms
=#=#=#= End test: Complete brief text output, with inactive resources - OK (0) =#=#=#=
* Passed: crm_mon - Complete brief text output, with inactive resources
=#=#=#= Begin test: Text output of partially active group =#=#=#=
unpack_rsc_op error: Preventing dummy-2 from restarting on cluster02 because of hard failure (unimplemented feature)| rc=3 id=dummy-2_last_failure_0
unpack_rsc_op error: Preventing httpd-bundle-clone from restarting on httpd-bundle-1 because of hard failure (invalid parameter)| rc=2 id=httpd_last_failure_0
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 4 nodes configured
* 16 resource instances configured (1 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
- * GuestOnline: [ httpd-bundle-0@cluster02 httpd-bundle-1@cluster01 ]
+ * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
Active Resources:
* Resource Group: partially-active-group (2 members inactive):
* dummy-1 (ocf:pacemaker:Dummy): Started cluster02
* dummy-2 (ocf:pacemaker:Dummy): FAILED cluster02
=#=#=#= End test: Text output of partially active group - OK (0) =#=#=#=
* Passed: crm_mon - Text output of partially active group
=#=#=#= Begin test: Text output of partially active group, with inactive resources =#=#=#=
unpack_rsc_op error: Preventing dummy-2 from restarting on cluster02 because of hard failure (unimplemented feature)| rc=3 id=dummy-2_last_failure_0
unpack_rsc_op error: Preventing httpd-bundle-clone from restarting on httpd-bundle-1 because of hard failure (invalid parameter)| rc=2 id=httpd_last_failure_0
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 4 nodes configured
* 16 resource instances configured (1 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
- * GuestOnline: [ httpd-bundle-0@cluster02 httpd-bundle-1@cluster01 ]
+ * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
Full List of Resources:
* Resource Group: partially-active-group:
* dummy-1 (ocf:pacemaker:Dummy): Started cluster02
* dummy-2 (ocf:pacemaker:Dummy): FAILED cluster02
* dummy-3 (ocf:pacemaker:Dummy): Stopped (disabled)
* dummy-4 (ocf:pacemaker:Dummy): Stopped (not installed)
=#=#=#= End test: Text output of partially active group, with inactive resources - OK (0) =#=#=#=
* Passed: crm_mon - Text output of partially active group, with inactive resources
=#=#=#= Begin test: Text output of active member of partially active group =#=#=#=
unpack_rsc_op error: Preventing dummy-2 from restarting on cluster02 because of hard failure (unimplemented feature)| rc=3 id=dummy-2_last_failure_0
unpack_rsc_op error: Preventing httpd-bundle-clone from restarting on httpd-bundle-1 because of hard failure (invalid parameter)| rc=2 id=httpd_last_failure_0
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 4 nodes configured
* 16 resource instances configured (1 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
- * GuestOnline: [ httpd-bundle-0@cluster02 httpd-bundle-1@cluster01 ]
+ * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
Active Resources:
* Resource Group: partially-active-group (2 members inactive):
* dummy-1 (ocf:pacemaker:Dummy): Started cluster02
=#=#=#= End test: Text output of active member of partially active group - OK (0) =#=#=#=
* Passed: crm_mon - Text output of active member of partially active group
=#=#=#= Begin test: Text output of inactive member of partially active group =#=#=#=
unpack_rsc_op error: Preventing dummy-2 from restarting on cluster02 because of hard failure (unimplemented feature)| rc=3 id=dummy-2_last_failure_0
unpack_rsc_op error: Preventing httpd-bundle-clone from restarting on httpd-bundle-1 because of hard failure (invalid parameter)| rc=2 id=httpd_last_failure_0
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (2) (version) - partition with quorum
* Last updated:
* Last change:
* 4 nodes configured
* 16 resource instances configured (1 DISABLED)
Node List:
* Node cluster01 (1): online, feature set <3.15.1
* Node cluster02 (2): online, feature set <3.15.1
* GuestNode httpd-bundle-0@cluster02: online
* GuestNode httpd-bundle-1@cluster01: online
Active Resources:
* Resource Group: partially-active-group (2 members inactive):
* dummy-2 (ocf:pacemaker:Dummy): FAILED cluster02
Failed Resource Actions:
* dummy-2_monitor_0 on cluster02 'unimplemented feature' (3): call=2, status='complete', queued=0ms, exec=33ms
=#=#=#= End test: Text output of inactive member of partially active group - OK (0) =#=#=#=
* Passed: crm_mon - Text output of inactive member of partially active group
=#=#=#= Begin test: Complete brief text output grouped by node, with inactive resources =#=#=#=
unpack_rsc_op error: Preventing dummy-2 from restarting on cluster02 because of hard failure (unimplemented feature)| rc=3 id=dummy-2_last_failure_0
unpack_rsc_op error: Preventing httpd-bundle-clone from restarting on httpd-bundle-1 because of hard failure (invalid parameter)| rc=2 id=httpd_last_failure_0
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (2) (version) - partition with quorum
* Last updated:
* Last change:
* 4 nodes configured
* 16 resource instances configured (1 DISABLED)
Node List:
* Node cluster01 (1): online, feature set <3.15.1:
* Resources:
* 1 (ocf:heartbeat:IPaddr2): Active
* 1 (ocf:heartbeat:docker): Active
* 1 (ocf:pacemaker:ping): Active
* 1 (ocf:pacemaker:remote): Active
* 1 (stonith:fence_xvm): Active
* Node cluster02 (2): online, feature set <3.15.1:
* Resources:
* 1 (ocf:heartbeat:IPaddr2): Active
* 1 (ocf:heartbeat:docker): Active
* 2 (ocf:pacemaker:Dummy): Active
* 1 (ocf:pacemaker:remote): Active
* GuestNode httpd-bundle-0@cluster02: online:
* Resources:
* 1 (ocf:heartbeat:apache): Active
* GuestNode httpd-bundle-1@cluster01: online:
* Resources:
* 1 (ocf:heartbeat:apache): Active
Inactive Resources:
* Clone Set: ping-clone [ping]:
* ping (ocf:pacemaker:ping): Started cluster01
* ping (ocf:pacemaker:ping): Stopped (not installed)
* Resource Group: partially-active-group:
* 2/4 (ocf:pacemaker:Dummy): Active cluster02
* smart-mon (ocf:pacemaker:HealthSMART): Stopped (not installed)
Node Attributes:
* Node: cluster01 (1):
* pingd : 1000
* Node: cluster02 (2):
* pingd : 1000
Operations:
* Node: cluster02 (2):
* httpd-bundle-ip-192.168.122.131: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-docker-0: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-0: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="30000ms"
* dummy-1: migration-threshold=1000000:
* (2) start
* dummy-2: migration-threshold=1000000:
* (2) probe
* dummy-4: migration-threshold=1000000:
* (2) probe
* smart-mon: migration-threshold=1000000:
* (9) probe
* ping: migration-threshold=1000000:
* (6) probe
* Node: cluster01 (1):
* Fencing: migration-threshold=1000000:
* (15) start
* (20) monitor: interval="60000ms"
* ping: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* httpd-bundle-ip-192.168.122.132: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-docker-1: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-1: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="30000ms"
* Node: httpd-bundle-0@cluster02:
* httpd: migration-threshold=1000000:
* (1) start
* Node: httpd-bundle-1@cluster01:
* httpd: migration-threshold=1000000:
* (1) probe
Failed Resource Actions:
* dummy-2_monitor_0 on cluster02 'unimplemented feature' (3): call=2, status='complete', queued=0ms, exec=33ms
=#=#=#= End test: Complete brief text output grouped by node, with inactive resources - OK (0) =#=#=#=
* Passed: crm_mon - Complete brief text output grouped by node, with inactive resources
=#=#=#= Begin test: Text output of partially active resources, with inactive resources, filtered by node =#=#=#=
unpack_rsc_op error: Preventing dummy-2 from restarting on cluster02 because of hard failure (unimplemented feature)| rc=3 id=dummy-2_last_failure_0
unpack_rsc_op error: Preventing httpd-bundle-clone from restarting on httpd-bundle-1 because of hard failure (invalid parameter)| rc=2 id=httpd_last_failure_0
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 4 nodes configured
* 16 resource instances configured (1 DISABLED)
Node List:
* Online: [ cluster01 ]
Full List of Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 ]
* Fencing (stonith:fence_xvm): Started cluster01
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): FAILED cluster01
* smart-mon (ocf:pacemaker:HealthSMART): Stopped (not installed)
=#=#=#= End test: Text output of partially active resources, with inactive resources, filtered by node - OK (0) =#=#=#=
* Passed: crm_mon - Text output of partially active resources, with inactive resources, filtered by node
=#=#=#= Begin test: Text output of partially active resources, filtered by node =#=#=#=
unpack_rsc_op error: Preventing dummy-2 from restarting on cluster02 because of hard failure (unimplemented feature)| rc=3 id=dummy-2_last_failure_0
unpack_rsc_op error: Preventing httpd-bundle-clone from restarting on httpd-bundle-1 because of hard failure (invalid parameter)| rc=2 id=httpd_last_failure_0
=#=#=#= End test: Text output of partially active resources, filtered by node - OK (0) =#=#=#=
* Passed: crm_mon - Text output of partially active resources, filtered by node
=#=#=#= Begin test: Text output of active unmanaged resource on offline node =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 2 nodes configured
* 3 resource instances configured
*** Resource management is DISABLED ***
The cluster will not attempt to start, stop or recover services
Node List:
* Online: [ cluster01 ]
* OFFLINE: [ cluster02 ]
Active Resources:
* Fencing (stonith:fence_xvm): Started cluster01 (unmanaged)
* rsc1 (ocf:pacemaker:Dummy): Started cluster01 (unmanaged)
* rsc2 (ocf:pacemaker:Dummy): Started cluster02 (unmanaged)
=#=#=#= End test: Text output of active unmanaged resource on offline node - OK (0) =#=#=#=
* Passed: crm_mon - Text output of active unmanaged resource on offline node
=#=#=#= Begin test: XML output of active unmanaged resource on offline node =#=#=#=
=#=#=#= End test: XML output of active unmanaged resource on offline node - OK (0) =#=#=#=
* Passed: crm_mon - XML output of active unmanaged resource on offline node
=#=#=#= Begin test: Brief text output of active unmanaged resource on offline node =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 2 nodes configured
* 3 resource instances configured
*** Resource management is DISABLED ***
The cluster will not attempt to start, stop or recover services
Node List:
* Online: [ cluster01 ]
* OFFLINE: [ cluster02 ]
Active Resources:
* 1 (ocf:pacemaker:Dummy): Active cluster01
* 1 (ocf:pacemaker:Dummy): Active cluster02
* 1 (stonith:fence_xvm): Active cluster01
=#=#=#= End test: Brief text output of active unmanaged resource on offline node - OK (0) =#=#=#=
* Passed: crm_mon - Brief text output of active unmanaged resource on offline node
=#=#=#= Begin test: Brief text output of active unmanaged resource on offline node, grouped by node =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 2 nodes configured
* 3 resource instances configured
*** Resource management is DISABLED ***
The cluster will not attempt to start, stop or recover services
Node List:
* Node cluster01: online:
* Resources:
* 1 (ocf:pacemaker:Dummy): Active
* 1 (stonith:fence_xvm): Active
* Node cluster02: OFFLINE:
* Resources:
* 1 (ocf:pacemaker:Dummy): Active
=#=#=#= End test: Brief text output of active unmanaged resource on offline node, grouped by node - OK (0) =#=#=#=
* Passed: crm_mon - Brief text output of active unmanaged resource on offline node, grouped by node
=#=#=#= Begin test: Text output of all resources with maintenance-mode enabled =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
*** Resource management is DISABLED ***
The cluster will not attempt to start, stop or recover services
Node List:
- * GuestNode httpd-bundle-0@cluster01: maintenance
- * GuestNode httpd-bundle-1@cluster02: maintenance
+ * GuestNode httpd-bundle-0: maintenance
+ * GuestNode httpd-bundle-1: maintenance
* Online: [ cluster01 cluster02 ]
Full List of Resources:
* Clone Set: ping-clone [ping] (unmanaged):
* ping (ocf:pacemaker:ping): Started cluster02 (unmanaged)
* ping (ocf:pacemaker:ping): Started cluster01 (unmanaged)
* Fencing (stonith:fence_xvm): Started cluster01 (unmanaged)
* dummy (ocf:pacemaker:Dummy): Started cluster02 (unmanaged)
* Clone Set: inactive-clone [inactive-dhcpd] (unmanaged, disabled):
* Stopped (disabled): [ cluster01 cluster02 ]
* Resource Group: inactive-group (unmanaged, disabled):
* inactive-dummy-1 (ocf:pacemaker:Dummy): Stopped (disabled, unmanaged)
* inactive-dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled, unmanaged)
* Container bundle set: httpd-bundle [pcmk:http] (unmanaged):
* httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01 (unmanaged)
* httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02 (unmanaged)
* httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped (unmanaged)
* Resource Group: exim-group (unmanaged):
* Public-IP (ocf:heartbeat:IPaddr): Started cluster02 (unmanaged)
* Email (lsb:exim): Started cluster02 (unmanaged)
* Clone Set: mysql-clone-group [mysql-group] (unmanaged):
* Resource Group: mysql-group:0 (unmanaged):
* mysql-proxy (lsb:mysql-proxy): Started cluster02 (unmanaged)
* Resource Group: mysql-group:1 (unmanaged):
* mysql-proxy (lsb:mysql-proxy): Started cluster01 (unmanaged)
* Clone Set: promotable-clone [promotable-rsc] (promotable, unmanaged):
* promotable-rsc (ocf:pacemaker:Stateful): Promoted cluster02 (unmanaged)
* promotable-rsc (ocf:pacemaker:Stateful): Unpromoted cluster01 (unmanaged)
=#=#=#= End test: Text output of all resources with maintenance-mode enabled - OK (0) =#=#=#=
* Passed: crm_mon - Text output of all resources with maintenance-mode enabled
+=#=#=#= Begin test: Text output of guest node's container on different node from its remote resource =#=#=#=
+Cluster Summary:
+ * Stack: corosync
+ * Current DC: cent7-host2 (version) - partition with quorum
+ * Last updated:
+ * Last change:
+ * 4 nodes configured
+ * 10 resource instances configured
+
+Node List:
+ * Online: [ cent7-host1 cent7-host2 ]
+ * GuestOnline: [ httpd-bundle1-0 httpd-bundle2-0 ]
+
+Active Resources:
+ * Resource Group: group1:
+ * dummy1 (ocf:pacemaker:Dummy): Started cent7-host1
+ * Resource Group: group2:
+ * dummy2 (ocf:pacemaker:Dummy): Started cent7-host2
+ * Container bundle: httpd-bundle1 [pcmktest:http]:
+ * httpd-bundle1-0 (192.168.20.188) (ocf:heartbeat:apache): Started cent7-host1
+ * Container bundle: httpd-bundle2 [pcmktest:http]:
+ * httpd-bundle2-0 (192.168.20.190) (ocf:heartbeat:apache): Started cent7-host2
+=#=#=#= End test: Text output of guest node's container on different node from its remote resource - OK (0) =#=#=#=
+* Passed: crm_mon - Text output of guest node's container on different node from its remote resource
+=#=#=#= Begin test: Complete text output of guest node's container on different node from its remote resource =#=#=#=
+Cluster Summary:
+ * Stack: corosync
+ * Current DC: cent7-host2 (3232262829) (version) - partition with quorum
+ * Last updated:
+ * Last change:
+ * 4 nodes configured
+ * 10 resource instances configured
+
+Node List:
+ * Node cent7-host1 (3232262828): online, feature set <3.15.1
+ * Node cent7-host2 (3232262829): online, feature set <3.15.1
+ * GuestNode httpd-bundle1-0@cent7-host1: online
+ * GuestNode httpd-bundle2-0@cent7-host2: online
+
+Active Resources:
+ * Resource Group: group1:
+ * dummy1 (ocf:pacemaker:Dummy): Started cent7-host1
+ * Resource Group: group2:
+ * dummy2 (ocf:pacemaker:Dummy): Started cent7-host2
+ * Container bundle: httpd-bundle1 [pcmktest:http]:
+ * httpd-bundle1-ip-192.168.20.188 (ocf:heartbeat:IPaddr2): Started cent7-host1
+ * httpd1 (ocf:heartbeat:apache): Started httpd-bundle1-0
+ * httpd-bundle1-docker-0 (ocf:heartbeat:docker): Started cent7-host1
+ * httpd-bundle1-0 (ocf:pacemaker:remote): Started cent7-host2
+ * Container bundle: httpd-bundle2 [pcmktest:http]:
+ * httpd-bundle2-ip-192.168.20.190 (ocf:heartbeat:IPaddr2): Started cent7-host2
+ * httpd2 (ocf:heartbeat:apache): Started httpd-bundle2-0
+ * httpd-bundle2-docker-0 (ocf:heartbeat:docker): Started cent7-host2
+ * httpd-bundle2-0 (ocf:pacemaker:remote): Started cent7-host2
+=#=#=#= End test: Complete text output of guest node's container on different node from its remote resource - OK (0) =#=#=#=
+* Passed: crm_mon - Complete text output of guest node's container on different node from its remote resource
diff --git a/cts/cli/regression.tools.exp b/cts/cli/regression.tools.exp
index 2ed991fffd..8451ad4857 100644
--- a/cts/cli/regression.tools.exp
+++ b/cts/cli/regression.tools.exp
@@ -1,5506 +1,5506 @@
Created new pacemaker configuration
Setting up shadow instance
A new shadow instance was created. To begin using it paste the following into your shell:
CIB_shadow=cts-cli ; export CIB_shadow
=#=#=#= Begin test: Validate CIB =#=#=#=
=#=#=#= Current cib after: Validate CIB =#=#=#=
=#=#=#= End test: Validate CIB - OK (0) =#=#=#=
* Passed: cibadmin - Validate CIB
=#=#=#= Begin test: Query the value of an attribute that does not exist =#=#=#=
crm_attribute: Error performing operation: No such device or address
=#=#=#= End test: Query the value of an attribute that does not exist - No such object (105) =#=#=#=
* Passed: crm_attribute - Query the value of an attribute that does not exist
=#=#=#= Begin test: Configure something before erasing =#=#=#=
=#=#=#= Current cib after: Configure something before erasing =#=#=#=
=#=#=#= End test: Configure something before erasing - OK (0) =#=#=#=
* Passed: crm_attribute - Configure something before erasing
=#=#=#= Begin test: Require --force for CIB erasure =#=#=#=
The supplied command is considered dangerous. To prevent accidental destruction of the cluster, the --force flag is required in order to proceed.
=#=#=#= Current cib after: Require --force for CIB erasure =#=#=#=
=#=#=#= End test: Require --force for CIB erasure - Operation not safe (107) =#=#=#=
* Passed: cibadmin - Require --force for CIB erasure
=#=#=#= Begin test: Allow CIB erasure with --force =#=#=#=
=#=#=#= End test: Allow CIB erasure with --force - OK (0) =#=#=#=
* Passed: cibadmin - Allow CIB erasure with --force
=#=#=#= Begin test: Query CIB =#=#=#=
=#=#=#= Current cib after: Query CIB =#=#=#=
=#=#=#= End test: Query CIB - OK (0) =#=#=#=
* Passed: cibadmin - Query CIB
=#=#=#= Begin test: Set cluster option =#=#=#=
=#=#=#= Current cib after: Set cluster option =#=#=#=
=#=#=#= End test: Set cluster option - OK (0) =#=#=#=
* Passed: crm_attribute - Set cluster option
=#=#=#= Begin test: Query new cluster option =#=#=#=
=#=#=#= Current cib after: Query new cluster option =#=#=#=
=#=#=#= End test: Query new cluster option - OK (0) =#=#=#=
* Passed: cibadmin - Query new cluster option
=#=#=#= Begin test: Query cluster options =#=#=#=
=#=#=#= Current cib after: Query cluster options =#=#=#=
=#=#=#= End test: Query cluster options - OK (0) =#=#=#=
* Passed: cibadmin - Query cluster options
=#=#=#= Begin test: Set no-quorum policy =#=#=#=
=#=#=#= Current cib after: Set no-quorum policy =#=#=#=
=#=#=#= End test: Set no-quorum policy - OK (0) =#=#=#=
* Passed: crm_attribute - Set no-quorum policy
=#=#=#= Begin test: Delete nvpair =#=#=#=
=#=#=#= Current cib after: Delete nvpair =#=#=#=
=#=#=#= End test: Delete nvpair - OK (0) =#=#=#=
* Passed: cibadmin - Delete nvpair
=#=#=#= Begin test: Create operation should fail =#=#=#=
Call failed: File exists
=#=#=#= Current cib after: Create operation should fail =#=#=#=
=#=#=#= End test: Create operation should fail - Requested item already exists (108) =#=#=#=
* Passed: cibadmin - Create operation should fail
=#=#=#= Begin test: Modify cluster options section =#=#=#=
=#=#=#= Current cib after: Modify cluster options section =#=#=#=
=#=#=#= End test: Modify cluster options section - OK (0) =#=#=#=
* Passed: cibadmin - Modify cluster options section
=#=#=#= Begin test: Query updated cluster option =#=#=#=
=#=#=#= Current cib after: Query updated cluster option =#=#=#=
=#=#=#= End test: Query updated cluster option - OK (0) =#=#=#=
* Passed: cibadmin - Query updated cluster option
=#=#=#= Begin test: Set duplicate cluster option =#=#=#=
=#=#=#= Current cib after: Set duplicate cluster option =#=#=#=
=#=#=#= End test: Set duplicate cluster option - OK (0) =#=#=#=
* Passed: crm_attribute - Set duplicate cluster option
=#=#=#= Begin test: Setting multiply defined cluster option should fail =#=#=#=
crm_attribute: Please choose from one of the matches below and supply the 'id' with --attr-id
Multiple attributes match name=cluster-delay
Value: 60s (id=cib-bootstrap-options-cluster-delay)
Value: 40s (id=duplicate-cluster-delay)
=#=#=#= Current cib after: Setting multiply defined cluster option should fail =#=#=#=
=#=#=#= End test: Setting multiply defined cluster option should fail - Multiple items match request (109) =#=#=#=
* Passed: crm_attribute - Setting multiply defined cluster option should fail
=#=#=#= Begin test: Set cluster option with -s =#=#=#=
=#=#=#= Current cib after: Set cluster option with -s =#=#=#=
=#=#=#= End test: Set cluster option with -s - OK (0) =#=#=#=
* Passed: crm_attribute - Set cluster option with -s
=#=#=#= Begin test: Delete cluster option with -i =#=#=#=
Deleted crm_config option: id=(null) name=cluster-delay
=#=#=#= Current cib after: Delete cluster option with -i =#=#=#=
=#=#=#= End test: Delete cluster option with -i - OK (0) =#=#=#=
* Passed: crm_attribute - Delete cluster option with -i
=#=#=#= Begin test: Create node1 and bring it online =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Current cluster status:
* Full List of Resources:
* No resources
Performing Requested Modifications:
* Bringing node node1 online
Transition Summary:
Executing Cluster Transition:
Revised Cluster Status:
* Node List:
* Online: [ node1 ]
* Full List of Resources:
* No resources
=#=#=#= Current cib after: Create node1 and bring it online =#=#=#=
=#=#=#= End test: Create node1 and bring it online - OK (0) =#=#=#=
* Passed: crm_simulate - Create node1 and bring it online
=#=#=#= Begin test: Create node attribute =#=#=#=
=#=#=#= Current cib after: Create node attribute =#=#=#=
=#=#=#= End test: Create node attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Create node attribute
=#=#=#= Begin test: Query new node attribute =#=#=#=
=#=#=#= Current cib after: Query new node attribute =#=#=#=
=#=#=#= End test: Query new node attribute - OK (0) =#=#=#=
* Passed: cibadmin - Query new node attribute
=#=#=#= Begin test: Set a transient (fail-count) node attribute =#=#=#=
=#=#=#= Current cib after: Set a transient (fail-count) node attribute =#=#=#=
=#=#=#= End test: Set a transient (fail-count) node attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Set a transient (fail-count) node attribute
=#=#=#= Begin test: Query a fail count =#=#=#=
scope=status name=fail-count-foo value=3
=#=#=#= Current cib after: Query a fail count =#=#=#=
=#=#=#= End test: Query a fail count - OK (0) =#=#=#=
* Passed: crm_failcount - Query a fail count
=#=#=#= Begin test: Show node attributes with crm_simulate =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Current cluster status:
* Node List:
* Online: [ node1 ]
* Full List of Resources:
* No resources
* Node Attributes:
* Node: node1:
* ram : 1024M
=#=#=#= End test: Show node attributes with crm_simulate - OK (0) =#=#=#=
* Passed: crm_simulate - Show node attributes with crm_simulate
=#=#=#= Begin test: Set a second transient node attribute =#=#=#=
=#=#=#= Current cib after: Set a second transient node attribute =#=#=#=
=#=#=#= End test: Set a second transient node attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Set a second transient node attribute
=#=#=#= Begin test: Query node attributes by pattern =#=#=#=
scope=status name=fail-count-foo value=3
scope=status name=fail-count-bar value=5
=#=#=#= End test: Query node attributes by pattern - OK (0) =#=#=#=
* Passed: crm_attribute - Query node attributes by pattern
=#=#=#= Begin test: Update node attributes by pattern =#=#=#=
=#=#=#= Current cib after: Update node attributes by pattern =#=#=#=
=#=#=#= End test: Update node attributes by pattern - OK (0) =#=#=#=
* Passed: crm_attribute - Update node attributes by pattern
=#=#=#= Begin test: Delete node attributes by pattern =#=#=#=
Deleted status attribute: id=status-node1-fail-count-foo name=fail-count-foo
Deleted status attribute: id=status-node1-fail-count-bar name=fail-count-bar
=#=#=#= Current cib after: Delete node attributes by pattern =#=#=#=
=#=#=#= End test: Delete node attributes by pattern - OK (0) =#=#=#=
* Passed: crm_attribute - Delete node attributes by pattern
=#=#=#= Begin test: crm_attribute given invalid pattern usage =#=#=#=
crm_attribute: Error: pattern can only be used with query, or with till-reboot update or delete
=#=#=#= End test: crm_attribute given invalid pattern usage - Incorrect usage (64) =#=#=#=
* Passed: crm_attribute - crm_attribute given invalid pattern usage
=#=#=#= Begin test: crm_attribute given invalid delete usage =#=#=#=
crm_attribute: Error: must specify attribute name or pattern to delete
=#=#=#= End test: crm_attribute given invalid delete usage - Incorrect usage (64) =#=#=#=
* Passed: crm_attribute - crm_attribute given invalid delete usage
=#=#=#= Begin test: Digest calculation =#=#=#=
Digest: =#=#=#= Current cib after: Digest calculation =#=#=#=
=#=#=#= End test: Digest calculation - OK (0) =#=#=#=
* Passed: cibadmin - Digest calculation
=#=#=#= Begin test: Replace operation should fail =#=#=#=
Call failed: Update was older than existing configuration
=#=#=#= Current cib after: Replace operation should fail =#=#=#=
=#=#=#= End test: Replace operation should fail - Update was older than existing configuration (103) =#=#=#=
* Passed: cibadmin - Replace operation should fail
=#=#=#= Begin test: Default standby value =#=#=#=
scope=status name=standby value=off
=#=#=#= Current cib after: Default standby value =#=#=#=
=#=#=#= End test: Default standby value - OK (0) =#=#=#=
* Passed: crm_standby - Default standby value
=#=#=#= Begin test: Set standby status =#=#=#=
=#=#=#= Current cib after: Set standby status =#=#=#=
=#=#=#= End test: Set standby status - OK (0) =#=#=#=
* Passed: crm_standby - Set standby status
=#=#=#= Begin test: Query standby value =#=#=#=
scope=nodes name=standby value=true
=#=#=#= Current cib after: Query standby value =#=#=#=
=#=#=#= End test: Query standby value - OK (0) =#=#=#=
* Passed: crm_standby - Query standby value
=#=#=#= Begin test: Delete standby value =#=#=#=
Deleted nodes attribute: id=nodes-node1-standby name=standby
=#=#=#= Current cib after: Delete standby value =#=#=#=
=#=#=#= End test: Delete standby value - OK (0) =#=#=#=
* Passed: crm_standby - Delete standby value
=#=#=#= Begin test: Create a resource =#=#=#=
=#=#=#= Current cib after: Create a resource =#=#=#=
=#=#=#= End test: Create a resource - OK (0) =#=#=#=
* Passed: cibadmin - Create a resource
=#=#=#= Begin test: crm_resource run with extra arguments =#=#=#=
crm_resource: non-option ARGV-elements:
[1 of 2] foo
[2 of 2] bar
=#=#=#= End test: crm_resource run with extra arguments - Incorrect usage (64) =#=#=#=
* Passed: crm_resource - crm_resource run with extra arguments
=#=#=#= Begin test: crm_resource given both -r and resource config =#=#=#=
crm_resource: --resource cannot be used with --class, --agent, and --provider
=#=#=#= End test: crm_resource given both -r and resource config - Incorrect usage (64) =#=#=#=
* Passed: crm_resource - crm_resource given both -r and resource config
=#=#=#= Begin test: crm_resource given resource config with invalid action =#=#=#=
crm_resource: --class, --agent, and --provider can only be used with --validate and --force-*
=#=#=#= End test: crm_resource given resource config with invalid action - Incorrect usage (64) =#=#=#=
* Passed: crm_resource - crm_resource given resource config with invalid action
=#=#=#= Begin test: Create a resource meta attribute =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Set 'dummy' option: id=dummy-meta_attributes-is-managed set=dummy-meta_attributes name=is-managed value=false
=#=#=#= Current cib after: Create a resource meta attribute =#=#=#=
=#=#=#= End test: Create a resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - Create a resource meta attribute
=#=#=#= Begin test: Query a resource meta attribute =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
false
=#=#=#= Current cib after: Query a resource meta attribute =#=#=#=
=#=#=#= End test: Query a resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - Query a resource meta attribute
=#=#=#= Begin test: Remove a resource meta attribute =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Deleted 'dummy' option: id=dummy-meta_attributes-is-managed name=is-managed
=#=#=#= Current cib after: Remove a resource meta attribute =#=#=#=
=#=#=#= End test: Remove a resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - Remove a resource meta attribute
=#=#=#= Begin test: Create another resource meta attribute =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
=#=#=#= End test: Create another resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - Create another resource meta attribute
=#=#=#= Begin test: Show why a resource is not running =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
=#=#=#= End test: Show why a resource is not running - OK (0) =#=#=#=
* Passed: crm_resource - Show why a resource is not running
=#=#=#= Begin test: Remove another resource meta attribute =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
=#=#=#= End test: Remove another resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - Remove another resource meta attribute
=#=#=#= Begin test: Create a resource attribute =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Set 'dummy' option: id=dummy-instance_attributes-delay set=dummy-instance_attributes name=delay value=10s
=#=#=#= Current cib after: Create a resource attribute =#=#=#=
=#=#=#= End test: Create a resource attribute - OK (0) =#=#=#=
* Passed: crm_resource - Create a resource attribute
=#=#=#= Begin test: List the configured resources =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Full List of Resources:
* dummy (ocf:pacemaker:Dummy): Stopped
=#=#=#= Current cib after: List the configured resources =#=#=#=
=#=#=#= End test: List the configured resources - OK (0) =#=#=#=
* Passed: crm_resource - List the configured resources
=#=#=#= Begin test: List the configured resources in XML =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
=#=#=#= End test: List the configured resources in XML - OK (0) =#=#=#=
* Passed: crm_resource - List the configured resources in XML
=#=#=#= Begin test: Implicitly list the configured resources =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Full List of Resources:
* dummy (ocf:pacemaker:Dummy): Stopped
=#=#=#= End test: Implicitly list the configured resources - OK (0) =#=#=#=
* Passed: crm_resource - Implicitly list the configured resources
=#=#=#= Begin test: List IDs of instantiated resources =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
dummy
=#=#=#= End test: List IDs of instantiated resources - OK (0) =#=#=#=
* Passed: crm_resource - List IDs of instantiated resources
=#=#=#= Begin test: Show XML configuration of resource =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
dummy (ocf:pacemaker:Dummy): Stopped
Resource XML:
=#=#=#= End test: Show XML configuration of resource - OK (0) =#=#=#=
* Passed: crm_resource - Show XML configuration of resource
=#=#=#= Begin test: Show XML configuration of resource, output as XML =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
]]>
=#=#=#= End test: Show XML configuration of resource, output as XML - OK (0) =#=#=#=
* Passed: crm_resource - Show XML configuration of resource, output as XML
=#=#=#= Begin test: Require a destination when migrating a resource that is stopped =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
crm_resource: Resource 'dummy' not moved: active in 0 locations.
To prevent 'dummy' from running on a specific location, specify a node.
=#=#=#= Current cib after: Require a destination when migrating a resource that is stopped =#=#=#=
=#=#=#= End test: Require a destination when migrating a resource that is stopped - Incorrect usage (64) =#=#=#=
* Passed: crm_resource - Require a destination when migrating a resource that is stopped
=#=#=#= Begin test: Don't support migration to non-existent locations =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
crm_resource: Node 'i.do.not.exist' not found
Error performing operation: No such object
=#=#=#= Current cib after: Don't support migration to non-existent locations =#=#=#=
=#=#=#= End test: Don't support migration to non-existent locations - No such object (105) =#=#=#=
* Passed: crm_resource - Don't support migration to non-existent locations
=#=#=#= Begin test: Create a fencing resource =#=#=#=
=#=#=#= Current cib after: Create a fencing resource =#=#=#=
=#=#=#= End test: Create a fencing resource - OK (0) =#=#=#=
* Passed: cibadmin - Create a fencing resource
=#=#=#= Begin test: Bring resources online =#=#=#=
Current cluster status:
* Node List:
* Online: [ node1 ]
* Full List of Resources:
* dummy (ocf:pacemaker:Dummy): Stopped
* Fence (stonith:fence_true): Stopped
Transition Summary:
* Start dummy ( node1 )
* Start Fence ( node1 )
Executing Cluster Transition:
* Resource action: dummy monitor on node1
* Resource action: Fence monitor on node1
* Resource action: dummy start on node1
* Resource action: Fence start on node1
Revised Cluster Status:
* Node List:
* Online: [ node1 ]
* Full List of Resources:
* dummy (ocf:pacemaker:Dummy): Started node1
* Fence (stonith:fence_true): Started node1
=#=#=#= Current cib after: Bring resources online =#=#=#=
=#=#=#= End test: Bring resources online - OK (0) =#=#=#=
* Passed: crm_simulate - Bring resources online
=#=#=#= Begin test: Try to move a resource to its existing location =#=#=#=
crm_resource: Error performing operation: Requested item already exists
=#=#=#= Current cib after: Try to move a resource to its existing location =#=#=#=
=#=#=#= End test: Try to move a resource to its existing location - Requested item already exists (108) =#=#=#=
* Passed: crm_resource - Try to move a resource to its existing location
=#=#=#= Begin test: Try to move a resource that doesn't exist =#=#=#=
crm_resource: Resource 'xyz' not found
Error performing operation: No such object
=#=#=#= End test: Try to move a resource that doesn't exist - No such object (105) =#=#=#=
* Passed: crm_resource - Try to move a resource that doesn't exist
=#=#=#= Begin test: Move a resource from its existing location =#=#=#=
WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score of -INFINITY for resource dummy on node1.
This will prevent dummy from running on node1 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool
This will be the case even if node1 is the last node in the cluster
=#=#=#= Current cib after: Move a resource from its existing location =#=#=#=
=#=#=#= End test: Move a resource from its existing location - OK (0) =#=#=#=
* Passed: crm_resource - Move a resource from its existing location
=#=#=#= Begin test: Clear out constraints generated by --move =#=#=#=
Removing constraint: cli-ban-dummy-on-node1
=#=#=#= Current cib after: Clear out constraints generated by --move =#=#=#=
=#=#=#= End test: Clear out constraints generated by --move - OK (0) =#=#=#=
* Passed: crm_resource - Clear out constraints generated by --move
=#=#=#= Begin test: Default ticket granted state =#=#=#=
false
=#=#=#= Current cib after: Default ticket granted state =#=#=#=
=#=#=#= End test: Default ticket granted state - OK (0) =#=#=#=
* Passed: crm_ticket - Default ticket granted state
=#=#=#= Begin test: Set ticket granted state =#=#=#=
=#=#=#= Current cib after: Set ticket granted state =#=#=#=
=#=#=#= End test: Set ticket granted state - OK (0) =#=#=#=
* Passed: crm_ticket - Set ticket granted state
=#=#=#= Begin test: Query ticket granted state =#=#=#=
false
=#=#=#= Current cib after: Query ticket granted state =#=#=#=
=#=#=#= End test: Query ticket granted state - OK (0) =#=#=#=
* Passed: crm_ticket - Query ticket granted state
=#=#=#= Begin test: Delete ticket granted state =#=#=#=
=#=#=#= Current cib after: Delete ticket granted state =#=#=#=
=#=#=#= End test: Delete ticket granted state - OK (0) =#=#=#=
* Passed: crm_ticket - Delete ticket granted state
=#=#=#= Begin test: Make a ticket standby =#=#=#=
=#=#=#= Current cib after: Make a ticket standby =#=#=#=
=#=#=#= End test: Make a ticket standby - OK (0) =#=#=#=
* Passed: crm_ticket - Make a ticket standby
=#=#=#= Begin test: Query ticket standby state =#=#=#=
true
=#=#=#= Current cib after: Query ticket standby state =#=#=#=
=#=#=#= End test: Query ticket standby state - OK (0) =#=#=#=
* Passed: crm_ticket - Query ticket standby state
=#=#=#= Begin test: Activate a ticket =#=#=#=
=#=#=#= Current cib after: Activate a ticket =#=#=#=
=#=#=#= End test: Activate a ticket - OK (0) =#=#=#=
* Passed: crm_ticket - Activate a ticket
=#=#=#= Begin test: Delete ticket standby state =#=#=#=
=#=#=#= Current cib after: Delete ticket standby state =#=#=#=
=#=#=#= End test: Delete ticket standby state - OK (0) =#=#=#=
* Passed: crm_ticket - Delete ticket standby state
=#=#=#= Begin test: Ban a resource on unknown node =#=#=#=
crm_resource: Node 'host1' not found
Error performing operation: No such object
=#=#=#= Current cib after: Ban a resource on unknown node =#=#=#=
=#=#=#= End test: Ban a resource on unknown node - No such object (105) =#=#=#=
* Passed: crm_resource - Ban a resource on unknown node
=#=#=#= Begin test: Create two more nodes and bring them online =#=#=#=
Current cluster status:
* Node List:
* Online: [ node1 ]
* Full List of Resources:
* dummy (ocf:pacemaker:Dummy): Started node1
* Fence (stonith:fence_true): Started node1
Performing Requested Modifications:
* Bringing node node2 online
* Bringing node node3 online
Transition Summary:
* Move Fence ( node1 -> node2 )
Executing Cluster Transition:
* Resource action: dummy monitor on node3
* Resource action: dummy monitor on node2
* Resource action: Fence stop on node1
* Resource action: Fence monitor on node3
* Resource action: Fence monitor on node2
* Resource action: Fence start on node2
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 node3 ]
* Full List of Resources:
* dummy (ocf:pacemaker:Dummy): Started node1
* Fence (stonith:fence_true): Started node2
=#=#=#= Current cib after: Create two more nodes and bring them online =#=#=#=
=#=#=#= End test: Create two more nodes and bring them online - OK (0) =#=#=#=
* Passed: crm_simulate - Create two more nodes and bring them online
=#=#=#= Begin test: Ban dummy from node1 =#=#=#=
WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score of -INFINITY for resource dummy on node1.
This will prevent dummy from running on node1 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool
This will be the case even if node1 is the last node in the cluster
=#=#=#= Current cib after: Ban dummy from node1 =#=#=#=
=#=#=#= End test: Ban dummy from node1 - OK (0) =#=#=#=
* Passed: crm_resource - Ban dummy from node1
=#=#=#= Begin test: Show where a resource is running =#=#=#=
resource dummy is running on: node1
=#=#=#= End test: Show where a resource is running - OK (0) =#=#=#=
* Passed: crm_resource - Show where a resource is running
=#=#=#= Begin test: Show constraints on a resource =#=#=#=
Locations:
* Node node1 (score=-INFINITY, id=cli-ban-dummy-on-node1, rsc=dummy)
=#=#=#= End test: Show constraints on a resource - OK (0) =#=#=#=
* Passed: crm_resource - Show constraints on a resource
=#=#=#= Begin test: Ban dummy from node2 =#=#=#=
=#=#=#= Current cib after: Ban dummy from node2 =#=#=#=
=#=#=#= End test: Ban dummy from node2 - OK (0) =#=#=#=
* Passed: crm_resource - Ban dummy from node2
=#=#=#= Begin test: Relocate resources due to ban =#=#=#=
Current cluster status:
* Node List:
* Online: [ node1 node2 node3 ]
* Full List of Resources:
* dummy (ocf:pacemaker:Dummy): Started node1
* Fence (stonith:fence_true): Started node2
Transition Summary:
* Move dummy ( node1 -> node3 )
Executing Cluster Transition:
* Resource action: dummy stop on node1
* Resource action: dummy start on node3
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 node3 ]
* Full List of Resources:
* dummy (ocf:pacemaker:Dummy): Started node3
* Fence (stonith:fence_true): Started node2
=#=#=#= Current cib after: Relocate resources due to ban =#=#=#=
=#=#=#= End test: Relocate resources due to ban - OK (0) =#=#=#=
* Passed: crm_simulate - Relocate resources due to ban
=#=#=#= Begin test: Move dummy to node1 =#=#=#=
=#=#=#= Current cib after: Move dummy to node1 =#=#=#=
=#=#=#= End test: Move dummy to node1 - OK (0) =#=#=#=
* Passed: crm_resource - Move dummy to node1
=#=#=#= Begin test: Clear implicit constraints for dummy on node2 =#=#=#=
Removing constraint: cli-ban-dummy-on-node2
=#=#=#= Current cib after: Clear implicit constraints for dummy on node2 =#=#=#=
=#=#=#= End test: Clear implicit constraints for dummy on node2 - OK (0) =#=#=#=
* Passed: crm_resource - Clear implicit constraints for dummy on node2
=#=#=#= Begin test: Drop the status section =#=#=#=
=#=#=#= End test: Drop the status section - OK (0) =#=#=#=
* Passed: cibadmin - Drop the status section
=#=#=#= Begin test: Create a clone =#=#=#=
=#=#=#= End test: Create a clone - OK (0) =#=#=#=
* Passed: cibadmin - Create a clone
=#=#=#= Begin test: Create a resource meta attribute =#=#=#=
Performing update of 'is-managed' on 'test-clone', the parent of 'test-primitive'
Set 'test-clone' option: id=test-clone-meta_attributes-is-managed set=test-clone-meta_attributes name=is-managed value=false
=#=#=#= Current cib after: Create a resource meta attribute =#=#=#=
=#=#=#= End test: Create a resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - Create a resource meta attribute
=#=#=#= Begin test: Create a resource meta attribute in the primitive =#=#=#=
Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed set=test-primitive-meta_attributes name=is-managed value=false
=#=#=#= Current cib after: Create a resource meta attribute in the primitive =#=#=#=
=#=#=#= End test: Create a resource meta attribute in the primitive - OK (0) =#=#=#=
* Passed: crm_resource - Create a resource meta attribute in the primitive
=#=#=#= Begin test: Update resource meta attribute with duplicates =#=#=#=
Multiple attributes match name=is-managed
Value: false (id=test-primitive-meta_attributes-is-managed)
Value: false (id=test-clone-meta_attributes-is-managed)
A value for 'is-managed' already exists in child 'test-primitive', performing update on that instead of 'test-clone'
Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed value=true
=#=#=#= Current cib after: Update resource meta attribute with duplicates =#=#=#=
=#=#=#= End test: Update resource meta attribute with duplicates - OK (0) =#=#=#=
* Passed: crm_resource - Update resource meta attribute with duplicates
=#=#=#= Begin test: Update resource meta attribute with duplicates (force clone) =#=#=#=
Set 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-managed value=true
=#=#=#= Current cib after: Update resource meta attribute with duplicates (force clone) =#=#=#=
=#=#=#= End test: Update resource meta attribute with duplicates (force clone) - OK (0) =#=#=#=
* Passed: crm_resource - Update resource meta attribute with duplicates (force clone)
=#=#=#= Begin test: Update child resource meta attribute with duplicates =#=#=#=
Multiple attributes match name=is-managed
Value: true (id=test-primitive-meta_attributes-is-managed)
Value: true (id=test-clone-meta_attributes-is-managed)
Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed value=false
=#=#=#= Current cib after: Update child resource meta attribute with duplicates =#=#=#=
=#=#=#= End test: Update child resource meta attribute with duplicates - OK (0) =#=#=#=
* Passed: crm_resource - Update child resource meta attribute with duplicates
=#=#=#= Begin test: Delete resource meta attribute with duplicates =#=#=#=
Multiple attributes match name=is-managed
Value: false (id=test-primitive-meta_attributes-is-managed)
Value: true (id=test-clone-meta_attributes-is-managed)
A value for 'is-managed' already exists in child 'test-primitive', performing delete on that instead of 'test-clone'
Deleted 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed
=#=#=#= Current cib after: Delete resource meta attribute with duplicates =#=#=#=
=#=#=#= End test: Delete resource meta attribute with duplicates - OK (0) =#=#=#=
* Passed: crm_resource - Delete resource meta attribute with duplicates
=#=#=#= Begin test: Delete resource meta attribute in parent =#=#=#=
Performing delete of 'is-managed' on 'test-clone', the parent of 'test-primitive'
Deleted 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-managed
=#=#=#= Current cib after: Delete resource meta attribute in parent =#=#=#=
=#=#=#= End test: Delete resource meta attribute in parent - OK (0) =#=#=#=
* Passed: crm_resource - Delete resource meta attribute in parent
=#=#=#= Begin test: Create a resource meta attribute in the primitive =#=#=#=
Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed set=test-primitive-meta_attributes name=is-managed value=false
=#=#=#= Current cib after: Create a resource meta attribute in the primitive =#=#=#=
=#=#=#= End test: Create a resource meta attribute in the primitive - OK (0) =#=#=#=
* Passed: crm_resource - Create a resource meta attribute in the primitive
=#=#=#= Begin test: Update existing resource meta attribute =#=#=#=
A value for 'is-managed' already exists in child 'test-primitive', performing update on that instead of 'test-clone'
Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed value=true
=#=#=#= Current cib after: Update existing resource meta attribute =#=#=#=
=#=#=#= End test: Update existing resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - Update existing resource meta attribute
=#=#=#= Begin test: Create a resource meta attribute in the parent =#=#=#=
Set 'test-clone' option: id=test-clone-meta_attributes-is-managed set=test-clone-meta_attributes name=is-managed value=true
=#=#=#= Current cib after: Create a resource meta attribute in the parent =#=#=#=
=#=#=#= End test: Create a resource meta attribute in the parent - OK (0) =#=#=#=
* Passed: crm_resource - Create a resource meta attribute in the parent
=#=#=#= Begin test: Copy resources =#=#=#=
=#=#=#= End test: Copy resources - OK (0) =#=#=#=
* Passed: cibadmin - Copy resources
=#=#=#= Begin test: Delete resource parent meta attribute (force) =#=#=#=
Deleted 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-managed
=#=#=#= Current cib after: Delete resource parent meta attribute (force) =#=#=#=
=#=#=#= End test: Delete resource parent meta attribute (force) - OK (0) =#=#=#=
* Passed: crm_resource - Delete resource parent meta attribute (force)
=#=#=#= Begin test: Restore duplicates =#=#=#=
=#=#=#= Current cib after: Restore duplicates =#=#=#=
=#=#=#= End test: Restore duplicates - OK (0) =#=#=#=
* Passed: cibadmin - Restore duplicates
=#=#=#= Begin test: Delete resource child meta attribute =#=#=#=
Multiple attributes match name=is-managed
Value: true (id=test-primitive-meta_attributes-is-managed)
Value: true (id=test-clone-meta_attributes-is-managed)
Deleted 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed
=#=#=#= Current cib after: Delete resource child meta attribute =#=#=#=
=#=#=#= End test: Delete resource child meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - Delete resource child meta attribute
=#=#=#= Begin test: Create a resource meta attribute in dummy1 =#=#=#=
Set 'dummy1' option: id=dummy1-meta_attributes-is-managed set=dummy1-meta_attributes name=is-managed value=true
=#=#=#= Current cib after: Create a resource meta attribute in dummy1 =#=#=#=
=#=#=#= End test: Create a resource meta attribute in dummy1 - OK (0) =#=#=#=
* Passed: crm_resource - Create a resource meta attribute in dummy1
=#=#=#= Begin test: Create a resource meta attribute in dummy-group =#=#=#=
Set 'dummy1' option: id=dummy1-meta_attributes-is-managed name=is-managed value=false
Set 'dummy-group' option: id=dummy-group-meta_attributes-is-managed set=dummy-group-meta_attributes name=is-managed value=false
=#=#=#= Current cib after: Create a resource meta attribute in dummy-group =#=#=#=
=#=#=#= End test: Create a resource meta attribute in dummy-group - OK (0) =#=#=#=
* Passed: crm_resource - Create a resource meta attribute in dummy-group
=#=#=#= Begin test: Specify a lifetime when moving a resource =#=#=#=
Migration will take effect until:
=#=#=#= Current cib after: Specify a lifetime when moving a resource =#=#=#=
=#=#=#= End test: Specify a lifetime when moving a resource - OK (0) =#=#=#=
* Passed: crm_resource - Specify a lifetime when moving a resource
=#=#=#= Begin test: Try to move a resource previously moved with a lifetime =#=#=#=
=#=#=#= Current cib after: Try to move a resource previously moved with a lifetime =#=#=#=
=#=#=#= End test: Try to move a resource previously moved with a lifetime - OK (0) =#=#=#=
* Passed: crm_resource - Try to move a resource previously moved with a lifetime
=#=#=#= Begin test: Ban dummy from node1 for a short time =#=#=#=
Migration will take effect until:
WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score of -INFINITY for resource dummy on node1.
This will prevent dummy from running on node1 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool
This will be the case even if node1 is the last node in the cluster
=#=#=#= Current cib after: Ban dummy from node1 for a short time =#=#=#=
=#=#=#= End test: Ban dummy from node1 for a short time - OK (0) =#=#=#=
* Passed: crm_resource - Ban dummy from node1 for a short time
=#=#=#= Begin test: Remove expired constraints =#=#=#=
Removing constraint: cli-ban-dummy-on-node1
=#=#=#= Current cib after: Remove expired constraints =#=#=#=
=#=#=#= End test: Remove expired constraints - OK (0) =#=#=#=
* Passed: crm_resource - Remove expired constraints
=#=#=#= Begin test: Clear all implicit constraints for dummy =#=#=#=
Removing constraint: cli-prefer-dummy
=#=#=#= Current cib after: Clear all implicit constraints for dummy =#=#=#=
=#=#=#= End test: Clear all implicit constraints for dummy - OK (0) =#=#=#=
* Passed: crm_resource - Clear all implicit constraints for dummy
=#=#=#= Begin test: Set a node health strategy =#=#=#=
=#=#=#= Current cib after: Set a node health strategy =#=#=#=
=#=#=#= End test: Set a node health strategy - OK (0) =#=#=#=
* Passed: crm_attribute - Set a node health strategy
=#=#=#= Begin test: Set a node health attribute =#=#=#=
=#=#=#= Current cib after: Set a node health attribute =#=#=#=
=#=#=#= End test: Set a node health attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Set a node health attribute
=#=#=#= Begin test: Show why a resource is not running on an unhealthy node =#=#=#=
=#=#=#= End test: Show why a resource is not running on an unhealthy node - OK (0) =#=#=#=
* Passed: crm_resource - Show why a resource is not running on an unhealthy node
=#=#=#= Begin test: Delete a resource =#=#=#=
=#=#=#= Current cib after: Delete a resource =#=#=#=
=#=#=#= End test: Delete a resource - OK (0) =#=#=#=
* Passed: crm_resource - Delete a resource
=#=#=#= Begin test: Create an XML patchset =#=#=#=
=#=#=#= End test: Create an XML patchset - Error occurred (1) =#=#=#=
* Passed: crm_diff - Create an XML patchset
=#=#=#= Begin test: Check locations and constraints for prim1 =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
=#=#=#= End test: Check locations and constraints for prim1 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim1
=#=#=#= Begin test: Recursively check locations and constraints for prim1 =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
=#=#=#= End test: Recursively check locations and constraints for prim1 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim1
=#=#=#= Begin test: Check locations and constraints for prim1 in XML =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
=#=#=#= End test: Check locations and constraints for prim1 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim1 in XML
=#=#=#= Begin test: Recursively check locations and constraints for prim1 in XML =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
=#=#=#= End test: Recursively check locations and constraints for prim1 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim1 in XML
=#=#=#= Begin test: Check locations and constraints for prim2 =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Locations:
* Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2)
Resources prim2 is colocated with:
* prim3 (score=INFINITY, id=colocation-prim2-prim3-INFINITY)
=#=#=#= End test: Check locations and constraints for prim2 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim2
=#=#=#= Begin test: Recursively check locations and constraints for prim2 =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Locations:
* Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2)
Resources prim2 is colocated with:
* prim3 (score=INFINITY, id=colocation-prim2-prim3-INFINITY)
* Resources prim3 is colocated with:
* prim4 (score=INFINITY, id=colocation-prim3-prim4-INFINITY)
* Locations:
* Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4)
* Resources prim4 is colocated with:
* prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY)
=#=#=#= End test: Recursively check locations and constraints for prim2 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim2
=#=#=#= Begin test: Check locations and constraints for prim2 in XML =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
=#=#=#= End test: Check locations and constraints for prim2 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim2 in XML
=#=#=#= Begin test: Recursively check locations and constraints for prim2 in XML =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
=#=#=#= End test: Recursively check locations and constraints for prim2 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim2 in XML
=#=#=#= Begin test: Check locations and constraints for prim3 =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Resources colocated with prim3:
* prim2 (score=INFINITY, id=colocation-prim2-prim3-INFINITY)
* Locations:
* Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2)
Resources prim3 is colocated with:
* prim4 (score=INFINITY, id=colocation-prim3-prim4-INFINITY)
* Locations:
* Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4)
=#=#=#= End test: Check locations and constraints for prim3 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim3
=#=#=#= Begin test: Recursively check locations and constraints for prim3 =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Resources colocated with prim3:
* prim2 (score=INFINITY, id=colocation-prim2-prim3-INFINITY)
* Locations:
* Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2)
Resources prim3 is colocated with:
* prim4 (score=INFINITY, id=colocation-prim3-prim4-INFINITY)
* Locations:
* Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4)
* Resources prim4 is colocated with:
* prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY)
=#=#=#= End test: Recursively check locations and constraints for prim3 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim3
=#=#=#= Begin test: Check locations and constraints for prim3 in XML =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
=#=#=#= End test: Check locations and constraints for prim3 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim3 in XML
=#=#=#= Begin test: Recursively check locations and constraints for prim3 in XML =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
=#=#=#= End test: Recursively check locations and constraints for prim3 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim3 in XML
=#=#=#= Begin test: Check locations and constraints for prim4 =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Locations:
* Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4)
Resources colocated with prim4:
* prim10 (score=INFINITY, id=colocation-prim10-prim4-INFINITY)
* prim3 (score=INFINITY, id=colocation-prim3-prim4-INFINITY)
Resources prim4 is colocated with:
* prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY)
=#=#=#= End test: Check locations and constraints for prim4 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim4
=#=#=#= Begin test: Recursively check locations and constraints for prim4 =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Locations:
* Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4)
Resources colocated with prim4:
* prim10 (score=INFINITY, id=colocation-prim10-prim4-INFINITY)
* prim3 (score=INFINITY, id=colocation-prim3-prim4-INFINITY)
* Resources colocated with prim3:
* prim2 (score=INFINITY, id=colocation-prim2-prim3-INFINITY)
* Locations:
* Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2)
Resources prim4 is colocated with:
* prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY)
=#=#=#= End test: Recursively check locations and constraints for prim4 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim4
=#=#=#= Begin test: Check locations and constraints for prim4 in XML =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
=#=#=#= End test: Check locations and constraints for prim4 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim4 in XML
=#=#=#= Begin test: Recursively check locations and constraints for prim4 in XML =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
=#=#=#= End test: Recursively check locations and constraints for prim4 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim4 in XML
=#=#=#= Begin test: Check locations and constraints for prim5 =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Resources colocated with prim5:
* prim4 (score=INFINITY, id=colocation-prim4-prim5-INFINITY)
* Locations:
* Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4)
=#=#=#= End test: Check locations and constraints for prim5 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim5
=#=#=#= Begin test: Recursively check locations and constraints for prim5 =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Resources colocated with prim5:
* prim4 (score=INFINITY, id=colocation-prim4-prim5-INFINITY)
* Locations:
* Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4)
* Resources colocated with prim4:
* prim10 (score=INFINITY, id=colocation-prim10-prim4-INFINITY)
* prim3 (score=INFINITY, id=colocation-prim3-prim4-INFINITY)
* Resources colocated with prim3:
* prim2 (score=INFINITY, id=colocation-prim2-prim3-INFINITY)
* Locations:
* Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2)
=#=#=#= End test: Recursively check locations and constraints for prim5 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim5
=#=#=#= Begin test: Check locations and constraints for prim5 in XML =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
=#=#=#= End test: Check locations and constraints for prim5 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim5 in XML
=#=#=#= Begin test: Recursively check locations and constraints for prim5 in XML =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
=#=#=#= End test: Recursively check locations and constraints for prim5 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim5 in XML
=#=#=#= Begin test: Check locations and constraints for prim6 =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Locations:
* Node cluster02 (score=-INFINITY, id=prim6-not-on-cluster2, rsc=prim6)
=#=#=#= End test: Check locations and constraints for prim6 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim6
=#=#=#= Begin test: Recursively check locations and constraints for prim6 =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Locations:
* Node cluster02 (score=-INFINITY, id=prim6-not-on-cluster2, rsc=prim6)
=#=#=#= End test: Recursively check locations and constraints for prim6 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim6
=#=#=#= Begin test: Check locations and constraints for prim6 in XML =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
=#=#=#= End test: Check locations and constraints for prim6 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim6 in XML
=#=#=#= Begin test: Recursively check locations and constraints for prim6 in XML =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
=#=#=#= End test: Recursively check locations and constraints for prim6 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim6 in XML
=#=#=#= Begin test: Check locations and constraints for prim7 =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Resources prim7 is colocated with:
* group (score=INFINITY, id=colocation-prim7-group-INFINITY)
=#=#=#= End test: Check locations and constraints for prim7 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim7
=#=#=#= Begin test: Recursively check locations and constraints for prim7 =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Resources prim7 is colocated with:
* group (score=INFINITY, id=colocation-prim7-group-INFINITY)
=#=#=#= End test: Recursively check locations and constraints for prim7 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim7
=#=#=#= Begin test: Check locations and constraints for prim7 in XML =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
=#=#=#= End test: Check locations and constraints for prim7 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim7 in XML
=#=#=#= Begin test: Recursively check locations and constraints for prim7 in XML =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
=#=#=#= End test: Recursively check locations and constraints for prim7 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim7 in XML
=#=#=#= Begin test: Check locations and constraints for prim8 =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Resources prim8 is colocated with:
* gr2 (score=INFINITY, id=colocation-prim8-gr2-INFINITY)
=#=#=#= End test: Check locations and constraints for prim8 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim8
=#=#=#= Begin test: Recursively check locations and constraints for prim8 =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Resources prim8 is colocated with:
* gr2 (score=INFINITY, id=colocation-prim8-gr2-INFINITY)
=#=#=#= End test: Recursively check locations and constraints for prim8 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim8
=#=#=#= Begin test: Check locations and constraints for prim8 in XML =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
=#=#=#= End test: Check locations and constraints for prim8 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim8 in XML
=#=#=#= Begin test: Recursively check locations and constraints for prim8 in XML =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
=#=#=#= End test: Recursively check locations and constraints for prim8 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim8 in XML
=#=#=#= Begin test: Check locations and constraints for prim9 =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Resources prim9 is colocated with:
* clone (score=INFINITY, id=colocation-prim9-clone-INFINITY)
=#=#=#= End test: Check locations and constraints for prim9 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim9
=#=#=#= Begin test: Recursively check locations and constraints for prim9 =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Resources prim9 is colocated with:
* clone (score=INFINITY, id=colocation-prim9-clone-INFINITY)
=#=#=#= End test: Recursively check locations and constraints for prim9 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim9
=#=#=#= Begin test: Check locations and constraints for prim9 in XML =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
=#=#=#= End test: Check locations and constraints for prim9 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim9 in XML
=#=#=#= Begin test: Recursively check locations and constraints for prim9 in XML =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
=#=#=#= End test: Recursively check locations and constraints for prim9 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim9 in XML
=#=#=#= Begin test: Check locations and constraints for prim10 =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Resources prim10 is colocated with:
* prim4 (score=INFINITY, id=colocation-prim10-prim4-INFINITY)
* Locations:
* Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4)
=#=#=#= End test: Check locations and constraints for prim10 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim10
=#=#=#= Begin test: Recursively check locations and constraints for prim10 =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Resources prim10 is colocated with:
* prim4 (score=INFINITY, id=colocation-prim10-prim4-INFINITY)
* Locations:
* Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4)
* Resources prim4 is colocated with:
* prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY)
=#=#=#= End test: Recursively check locations and constraints for prim10 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim10
=#=#=#= Begin test: Check locations and constraints for prim10 in XML =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
=#=#=#= End test: Check locations and constraints for prim10 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim10 in XML
=#=#=#= Begin test: Recursively check locations and constraints for prim10 in XML =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
=#=#=#= End test: Recursively check locations and constraints for prim10 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim10 in XML
=#=#=#= Begin test: Check locations and constraints for prim11 =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Resources colocated with prim11:
* prim13 (score=INFINITY, id=colocation-prim13-prim11-INFINITY)
Resources prim11 is colocated with:
* prim12 (score=INFINITY, id=colocation-prim11-prim12-INFINITY)
=#=#=#= End test: Check locations and constraints for prim11 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim11
=#=#=#= Begin test: Recursively check locations and constraints for prim11 =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Resources colocated with prim11:
* prim13 (score=INFINITY, id=colocation-prim13-prim11-INFINITY)
* Resources colocated with prim13:
* prim12 (score=INFINITY, id=colocation-prim12-prim13-INFINITY)
* Resources colocated with prim12:
* prim11 (id=colocation-prim11-prim12-INFINITY - loop)
Resources prim11 is colocated with:
* prim12 (score=INFINITY, id=colocation-prim11-prim12-INFINITY)
* Resources prim12 is colocated with:
* prim13 (score=INFINITY, id=colocation-prim12-prim13-INFINITY)
* Resources prim13 is colocated with:
* prim11 (id=colocation-prim13-prim11-INFINITY - loop)
=#=#=#= End test: Recursively check locations and constraints for prim11 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim11
=#=#=#= Begin test: Check locations and constraints for prim11 in XML =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
=#=#=#= End test: Check locations and constraints for prim11 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim11 in XML
=#=#=#= Begin test: Recursively check locations and constraints for prim11 in XML =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
=#=#=#= End test: Recursively check locations and constraints for prim11 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim11 in XML
=#=#=#= Begin test: Check locations and constraints for prim12 =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Resources colocated with prim12:
* prim11 (score=INFINITY, id=colocation-prim11-prim12-INFINITY)
Resources prim12 is colocated with:
* prim13 (score=INFINITY, id=colocation-prim12-prim13-INFINITY)
=#=#=#= End test: Check locations and constraints for prim12 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim12
=#=#=#= Begin test: Recursively check locations and constraints for prim12 =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Resources colocated with prim12:
* prim11 (score=INFINITY, id=colocation-prim11-prim12-INFINITY)
* Resources colocated with prim11:
* prim13 (score=INFINITY, id=colocation-prim13-prim11-INFINITY)
* Resources colocated with prim13:
* prim12 (id=colocation-prim12-prim13-INFINITY - loop)
Resources prim12 is colocated with:
* prim13 (score=INFINITY, id=colocation-prim12-prim13-INFINITY)
* Resources prim13 is colocated with:
* prim11 (score=INFINITY, id=colocation-prim13-prim11-INFINITY)
* Resources prim11 is colocated with:
* prim12 (id=colocation-prim11-prim12-INFINITY - loop)
=#=#=#= End test: Recursively check locations and constraints for prim12 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim12
=#=#=#= Begin test: Check locations and constraints for prim12 in XML =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
=#=#=#= End test: Check locations and constraints for prim12 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim12 in XML
=#=#=#= Begin test: Recursively check locations and constraints for prim12 in XML =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
=#=#=#= End test: Recursively check locations and constraints for prim12 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim12 in XML
=#=#=#= Begin test: Check locations and constraints for prim13 =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Resources colocated with prim13:
* prim12 (score=INFINITY, id=colocation-prim12-prim13-INFINITY)
Resources prim13 is colocated with:
* prim11 (score=INFINITY, id=colocation-prim13-prim11-INFINITY)
=#=#=#= End test: Check locations and constraints for prim13 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim13
=#=#=#= Begin test: Recursively check locations and constraints for prim13 =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Resources colocated with prim13:
* prim12 (score=INFINITY, id=colocation-prim12-prim13-INFINITY)
* Resources colocated with prim12:
* prim11 (score=INFINITY, id=colocation-prim11-prim12-INFINITY)
* Resources colocated with prim11:
* prim13 (id=colocation-prim13-prim11-INFINITY - loop)
Resources prim13 is colocated with:
* prim11 (score=INFINITY, id=colocation-prim13-prim11-INFINITY)
* Resources prim11 is colocated with:
* prim12 (score=INFINITY, id=colocation-prim11-prim12-INFINITY)
* Resources prim12 is colocated with:
* prim13 (id=colocation-prim12-prim13-INFINITY - loop)
=#=#=#= End test: Recursively check locations and constraints for prim13 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim13
=#=#=#= Begin test: Check locations and constraints for prim13 in XML =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
=#=#=#= End test: Check locations and constraints for prim13 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim13 in XML
=#=#=#= Begin test: Recursively check locations and constraints for prim13 in XML =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
=#=#=#= End test: Recursively check locations and constraints for prim13 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim13 in XML
=#=#=#= Begin test: Check locations and constraints for group =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Resources colocated with group:
* prim7 (score=INFINITY, id=colocation-prim7-group-INFINITY)
=#=#=#= End test: Check locations and constraints for group - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for group
=#=#=#= Begin test: Recursively check locations and constraints for group =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Resources colocated with group:
* prim7 (score=INFINITY, id=colocation-prim7-group-INFINITY)
=#=#=#= End test: Recursively check locations and constraints for group - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for group
=#=#=#= Begin test: Check locations and constraints for group in XML =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
=#=#=#= End test: Check locations and constraints for group in XML - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for group in XML
=#=#=#= Begin test: Recursively check locations and constraints for group in XML =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
=#=#=#= End test: Recursively check locations and constraints for group in XML - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for group in XML
=#=#=#= Begin test: Check locations and constraints for clone =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Resources colocated with clone:
* prim9 (score=INFINITY, id=colocation-prim9-clone-INFINITY)
=#=#=#= End test: Check locations and constraints for clone - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for clone
=#=#=#= Begin test: Recursively check locations and constraints for clone =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Resources colocated with clone:
* prim9 (score=INFINITY, id=colocation-prim9-clone-INFINITY)
=#=#=#= End test: Recursively check locations and constraints for clone - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for clone
=#=#=#= Begin test: Check locations and constraints for clone in XML =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
=#=#=#= End test: Check locations and constraints for clone in XML - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for clone in XML
=#=#=#= Begin test: Recursively check locations and constraints for clone in XML =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
=#=#=#= End test: Recursively check locations and constraints for clone in XML - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for clone in XML
=#=#=#= Begin test: Show resource digests =#=#=#=
=#=#=#= End test: Show resource digests - OK (0) =#=#=#=
* Passed: crm_resource - Show resource digests
=#=#=#= Begin test: Show resource digests with overrides =#=#=#=
=#=#=#= End test: Show resource digests with overrides - OK (0) =#=#=#=
* Passed: crm_resource - Show resource digests with overrides
=#=#=#= Begin test: List all nodes =#=#=#=
cluster node: overcloud-controller-0 (1)
cluster node: overcloud-controller-1 (2)
cluster node: overcloud-controller-2 (3)
cluster node: overcloud-galera-0 (4)
cluster node: overcloud-galera-1 (5)
cluster node: overcloud-galera-2 (6)
guest node: lxc1 (lxc1)
guest node: lxc2 (lxc2)
remote node: overcloud-rabbit-0 (overcloud-rabbit-0)
remote node: overcloud-rabbit-1 (overcloud-rabbit-1)
remote node: overcloud-rabbit-2 (overcloud-rabbit-2)
=#=#=#= End test: List all nodes - OK (0) =#=#=#=
* Passed: crmadmin - List all nodes
=#=#=#= Begin test: Minimally list all nodes =#=#=#=
overcloud-controller-0
overcloud-controller-1
overcloud-controller-2
overcloud-galera-0
overcloud-galera-1
overcloud-galera-2
lxc1
lxc2
overcloud-rabbit-0
overcloud-rabbit-1
overcloud-rabbit-2
=#=#=#= End test: Minimally list all nodes - OK (0) =#=#=#=
* Passed: crmadmin - Minimally list all nodes
=#=#=#= Begin test: List all nodes as bash exports =#=#=#=
export overcloud-controller-0=1
export overcloud-controller-1=2
export overcloud-controller-2=3
export overcloud-galera-0=4
export overcloud-galera-1=5
export overcloud-galera-2=6
export lxc1=lxc1
export lxc2=lxc2
export overcloud-rabbit-0=overcloud-rabbit-0
export overcloud-rabbit-1=overcloud-rabbit-1
export overcloud-rabbit-2=overcloud-rabbit-2
=#=#=#= End test: List all nodes as bash exports - OK (0) =#=#=#=
* Passed: crmadmin - List all nodes as bash exports
=#=#=#= Begin test: List cluster nodes =#=#=#=
6
=#=#=#= End test: List cluster nodes - OK (0) =#=#=#=
* Passed: crmadmin - List cluster nodes
=#=#=#= Begin test: List guest nodes =#=#=#=
2
=#=#=#= End test: List guest nodes - OK (0) =#=#=#=
* Passed: crmadmin - List guest nodes
=#=#=#= Begin test: List remote nodes =#=#=#=
3
=#=#=#= End test: List remote nodes - OK (0) =#=#=#=
* Passed: crmadmin - List remote nodes
=#=#=#= Begin test: List cluster,remote nodes =#=#=#=
9
=#=#=#= End test: List cluster,remote nodes - OK (0) =#=#=#=
* Passed: crmadmin - List cluster,remote nodes
=#=#=#= Begin test: List guest,remote nodes =#=#=#=
5
=#=#=#= End test: List guest,remote nodes - OK (0) =#=#=#=
* Passed: crmadmin - List guest,remote nodes
=#=#=#= Begin test: Show allocation scores with crm_simulate =#=#=#=
=#=#=#= End test: Show allocation scores with crm_simulate - OK (0) =#=#=#=
* Passed: crm_simulate - Show allocation scores with crm_simulate
=#=#=#= Begin test: Show utilization with crm_simulate =#=#=#=
4 of 32 resource instances DISABLED and 0 BLOCKED from further action due to failure
[ cluster01 cluster02 ]
-[ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
+[ httpd-bundle-0 httpd-bundle-1 ]
Started: [ cluster01 cluster02 ]
Fencing (stonith:fence_xvm): Started cluster01
dummy (ocf:pacemaker:Dummy): Started cluster02
Stopped (disabled): [ cluster01 cluster02 ]
inactive-dummy-1 (ocf:pacemaker:Dummy): Stopped (disabled)
inactive-dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled)
httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01
httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02
httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped
Public-IP (ocf:heartbeat:IPaddr): Started cluster02
Email (lsb:exim): Started cluster02
Started: [ cluster01 cluster02 ]
Promoted: [ cluster02 ]
Unpromoted: [ cluster01 ]
Only 'private' parameters to 60s-interval monitor for dummy on cluster02 changed: 0:0;16:2:0:4a9e64d6-e1dd-4395-917c-1596312eafe4
Original: cluster01 capacity:
Original: cluster02 capacity:
Original: httpd-bundle-0 capacity:
Original: httpd-bundle-1 capacity:
Original: httpd-bundle-2 capacity:
pcmk__assign_primitive: ping:0 utilization on cluster02:
pcmk__assign_primitive: ping:1 utilization on cluster01:
pcmk__assign_primitive: Fencing utilization on cluster01:
pcmk__assign_primitive: dummy utilization on cluster02:
pcmk__assign_primitive: httpd-bundle-docker-0 utilization on cluster01:
pcmk__assign_primitive: httpd-bundle-docker-1 utilization on cluster02:
pcmk__assign_primitive: httpd-bundle-ip-192.168.122.131 utilization on cluster01:
pcmk__assign_primitive: httpd-bundle-0 utilization on cluster01:
pcmk__assign_primitive: httpd:0 utilization on httpd-bundle-0:
pcmk__assign_primitive: httpd-bundle-ip-192.168.122.132 utilization on cluster02:
pcmk__assign_primitive: httpd-bundle-1 utilization on cluster02:
pcmk__assign_primitive: httpd:1 utilization on httpd-bundle-1:
pcmk__assign_primitive: httpd-bundle-2 utilization on cluster01:
pcmk__assign_primitive: httpd:2 utilization on httpd-bundle-2:
pcmk__assign_primitive: Public-IP utilization on cluster02:
pcmk__assign_primitive: Email utilization on cluster02:
pcmk__assign_primitive: mysql-proxy:0 utilization on cluster02:
pcmk__assign_primitive: mysql-proxy:1 utilization on cluster01:
pcmk__assign_primitive: promotable-rsc:0 utilization on cluster02:
pcmk__assign_primitive: promotable-rsc:1 utilization on cluster01:
Remaining: cluster01 capacity:
Remaining: cluster02 capacity:
Remaining: httpd-bundle-0 capacity:
Remaining: httpd-bundle-1 capacity:
Remaining: httpd-bundle-2 capacity:
Start httpd-bundle-2 ( cluster01 ) due to unrunnable httpd-bundle-docker-2 start (blocked)
Start httpd:2 ( httpd-bundle-2 ) due to unrunnable httpd-bundle-docker-2 start (blocked)
=#=#=#= End test: Show utilization with crm_simulate - OK (0) =#=#=#=
* Passed: crm_simulate - Show utilization with crm_simulate
=#=#=#= Begin test: Simulate injecting a failure =#=#=#=
4 of 32 resource instances DISABLED and 0 BLOCKED from further action due to failure
Current cluster status:
* Node List:
* Online: [ cluster01 cluster02 ]
- * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
+ * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
* Full List of Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
* Fencing (stonith:fence_xvm): Started cluster01
* dummy (ocf:pacemaker:Dummy): Started cluster02
* Clone Set: inactive-clone [inactive-dhcpd] (disabled):
* Stopped (disabled): [ cluster01 cluster02 ]
* Resource Group: inactive-group (disabled):
* inactive-dummy-1 (ocf:pacemaker:Dummy): Stopped (disabled)
* inactive-dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled)
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01
* httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02
* httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped
* Resource Group: exim-group:
* Public-IP (ocf:heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
* Clone Set: mysql-clone-group [mysql-group]:
* Started: [ cluster01 cluster02 ]
* Clone Set: promotable-clone [promotable-rsc] (promotable):
* Promoted: [ cluster02 ]
* Unpromoted: [ cluster01 ]
Performing Requested Modifications:
* Injecting ping_monitor_10000@cluster02=1 into the configuration
* Injecting attribute fail-count-ping#monitor_10000=value++ into /node_state '2'
* Injecting attribute last-failure-ping#monitor_10000= into /node_state '2'
Transition Summary:
* Recover ping:0 ( cluster02 )
* Start httpd-bundle-2 ( cluster01 ) due to unrunnable httpd-bundle-docker-2 start (blocked)
* Start httpd:2 ( httpd-bundle-2 ) due to unrunnable httpd-bundle-docker-2 start (blocked)
Executing Cluster Transition:
* Cluster action: clear_failcount for ping on cluster02
* Pseudo action: ping-clone_stop_0
* Pseudo action: httpd-bundle_start_0
* Resource action: ping stop on cluster02
* Pseudo action: ping-clone_stopped_0
* Pseudo action: ping-clone_start_0
* Pseudo action: httpd-bundle-clone_start_0
* Resource action: ping start on cluster02
* Resource action: ping monitor=10000 on cluster02
* Pseudo action: ping-clone_running_0
* Pseudo action: httpd-bundle-clone_running_0
* Pseudo action: httpd-bundle_running_0
Revised Cluster Status:
* Node List:
* Online: [ cluster01 cluster02 ]
- * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
+ * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
* Full List of Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
* Fencing (stonith:fence_xvm): Started cluster01
* dummy (ocf:pacemaker:Dummy): Started cluster02
* Clone Set: inactive-clone [inactive-dhcpd] (disabled):
* Stopped (disabled): [ cluster01 cluster02 ]
* Resource Group: inactive-group (disabled):
* inactive-dummy-1 (ocf:pacemaker:Dummy): Stopped (disabled)
* inactive-dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled)
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01
* httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02
* httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped
* Resource Group: exim-group:
* Public-IP (ocf:heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
* Clone Set: mysql-clone-group [mysql-group]:
* Started: [ cluster01 cluster02 ]
* Clone Set: promotable-clone [promotable-rsc] (promotable):
* Promoted: [ cluster02 ]
* Unpromoted: [ cluster01 ]
=#=#=#= End test: Simulate injecting a failure - OK (0) =#=#=#=
* Passed: crm_simulate - Simulate injecting a failure
=#=#=#= Begin test: Simulate bringing a node down =#=#=#=
4 of 32 resource instances DISABLED and 0 BLOCKED from further action due to failure
Current cluster status:
* Node List:
* Online: [ cluster01 cluster02 ]
- * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
+ * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
* Full List of Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
* Fencing (stonith:fence_xvm): Started cluster01
* dummy (ocf:pacemaker:Dummy): Started cluster02
* Clone Set: inactive-clone [inactive-dhcpd] (disabled):
* Stopped (disabled): [ cluster01 cluster02 ]
* Resource Group: inactive-group (disabled):
* inactive-dummy-1 (ocf:pacemaker:Dummy): Stopped (disabled)
* inactive-dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled)
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01
* httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02
* httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped
* Resource Group: exim-group:
* Public-IP (ocf:heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
* Clone Set: mysql-clone-group [mysql-group]:
* Started: [ cluster01 cluster02 ]
* Clone Set: promotable-clone [promotable-rsc] (promotable):
* Promoted: [ cluster02 ]
* Unpromoted: [ cluster01 ]
Performing Requested Modifications:
* Taking node cluster01 offline
Transition Summary:
* Fence (off) httpd-bundle-0 (resource: httpd-bundle-docker-0) 'guest is unclean'
* Start Fencing ( cluster02 )
* Start httpd-bundle-0 ( cluster02 ) due to unrunnable httpd-bundle-docker-0 start (blocked)
* Stop httpd:0 ( httpd-bundle-0 ) due to unrunnable httpd-bundle-docker-0 start
* Start httpd-bundle-2 ( cluster02 ) due to unrunnable httpd-bundle-docker-2 start (blocked)
* Start httpd:2 ( httpd-bundle-2 ) due to unrunnable httpd-bundle-docker-2 start (blocked)
Executing Cluster Transition:
* Resource action: Fencing start on cluster02
* Pseudo action: stonith-httpd-bundle-0-off on httpd-bundle-0
* Pseudo action: httpd-bundle_stop_0
* Pseudo action: httpd-bundle_start_0
* Resource action: Fencing monitor=60000 on cluster02
* Pseudo action: httpd-bundle-clone_stop_0
* Pseudo action: httpd_stop_0
* Pseudo action: httpd-bundle-clone_stopped_0
* Pseudo action: httpd-bundle-clone_start_0
* Pseudo action: httpd-bundle_stopped_0
* Pseudo action: httpd-bundle-clone_running_0
* Pseudo action: httpd-bundle_running_0
Revised Cluster Status:
* Node List:
* Online: [ cluster02 ]
* OFFLINE: [ cluster01 ]
- * GuestOnline: [ httpd-bundle-1@cluster02 ]
+ * GuestOnline: [ httpd-bundle-1 ]
* Full List of Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster02 ]
* Stopped: [ cluster01 ]
* Fencing (stonith:fence_xvm): Started cluster02
* dummy (ocf:pacemaker:Dummy): Started cluster02
* Clone Set: inactive-clone [inactive-dhcpd] (disabled):
* Stopped (disabled): [ cluster01 cluster02 ]
* Resource Group: inactive-group (disabled):
* inactive-dummy-1 (ocf:pacemaker:Dummy): Stopped (disabled)
* inactive-dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled)
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): FAILED
* httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02
* httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped
* Resource Group: exim-group:
* Public-IP (ocf:heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
* Clone Set: mysql-clone-group [mysql-group]:
* Started: [ cluster02 ]
* Stopped: [ cluster01 ]
* Clone Set: promotable-clone [promotable-rsc] (promotable):
* Promoted: [ cluster02 ]
* Stopped: [ cluster01 ]
=#=#=#= End test: Simulate bringing a node down - OK (0) =#=#=#=
* Passed: crm_simulate - Simulate bringing a node down
=#=#=#= Begin test: Simulate a node failing =#=#=#=
4 of 32 resource instances DISABLED and 0 BLOCKED from further action due to failure
Current cluster status:
* Node List:
* Online: [ cluster01 cluster02 ]
- * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
+ * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
* Full List of Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
* Fencing (stonith:fence_xvm): Started cluster01
* dummy (ocf:pacemaker:Dummy): Started cluster02
* Clone Set: inactive-clone [inactive-dhcpd] (disabled):
* Stopped (disabled): [ cluster01 cluster02 ]
* Resource Group: inactive-group (disabled):
* inactive-dummy-1 (ocf:pacemaker:Dummy): Stopped (disabled)
* inactive-dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled)
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01
* httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02
* httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped
* Resource Group: exim-group:
* Public-IP (ocf:heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
* Clone Set: mysql-clone-group [mysql-group]:
* Started: [ cluster01 cluster02 ]
* Clone Set: promotable-clone [promotable-rsc] (promotable):
* Promoted: [ cluster02 ]
* Unpromoted: [ cluster01 ]
Performing Requested Modifications:
* Failing node cluster02
Transition Summary:
* Fence (off) httpd-bundle-1 (resource: httpd-bundle-docker-1) 'guest is unclean'
* Fence (reboot) cluster02 'peer is no longer part of the cluster'
* Stop ping:0 ( cluster02 ) due to node availability
* Stop dummy ( cluster02 ) due to node availability
* Stop httpd-bundle-ip-192.168.122.132 ( cluster02 ) due to node availability
* Stop httpd-bundle-docker-1 ( cluster02 ) due to node availability
* Stop httpd-bundle-1 ( cluster02 ) due to unrunnable httpd-bundle-docker-1 start
* Stop httpd:1 ( httpd-bundle-1 ) due to unrunnable httpd-bundle-docker-1 start
* Start httpd-bundle-2 ( cluster01 ) due to unrunnable httpd-bundle-docker-2 start (blocked)
* Start httpd:2 ( httpd-bundle-2 ) due to unrunnable httpd-bundle-docker-2 start (blocked)
* Move Public-IP ( cluster02 -> cluster01 )
* Move Email ( cluster02 -> cluster01 )
* Stop mysql-proxy:0 ( cluster02 ) due to node availability
* Stop promotable-rsc:0 ( Promoted cluster02 ) due to node availability
Executing Cluster Transition:
* Pseudo action: httpd-bundle-1_stop_0
* Pseudo action: promotable-clone_demote_0
* Pseudo action: httpd-bundle_stop_0
* Pseudo action: httpd-bundle_start_0
* Fencing cluster02 (reboot)
* Pseudo action: ping-clone_stop_0
* Pseudo action: dummy_stop_0
* Pseudo action: httpd-bundle-docker-1_stop_0
* Pseudo action: exim-group_stop_0
* Pseudo action: Email_stop_0
* Pseudo action: mysql-clone-group_stop_0
* Pseudo action: promotable-rsc_demote_0
* Pseudo action: promotable-clone_demoted_0
* Pseudo action: promotable-clone_stop_0
* Pseudo action: stonith-httpd-bundle-1-off on httpd-bundle-1
* Pseudo action: ping_stop_0
* Pseudo action: ping-clone_stopped_0
* Pseudo action: httpd-bundle-clone_stop_0
* Pseudo action: httpd-bundle-ip-192.168.122.132_stop_0
* Pseudo action: Public-IP_stop_0
* Pseudo action: mysql-group:0_stop_0
* Pseudo action: mysql-proxy_stop_0
* Pseudo action: promotable-rsc_stop_0
* Pseudo action: promotable-clone_stopped_0
* Pseudo action: httpd_stop_0
* Pseudo action: httpd-bundle-clone_stopped_0
* Pseudo action: httpd-bundle-clone_start_0
* Pseudo action: exim-group_stopped_0
* Pseudo action: exim-group_start_0
* Resource action: Public-IP start on cluster01
* Resource action: Email start on cluster01
* Pseudo action: mysql-group:0_stopped_0
* Pseudo action: mysql-clone-group_stopped_0
* Pseudo action: httpd-bundle_stopped_0
* Pseudo action: httpd-bundle-clone_running_0
* Pseudo action: exim-group_running_0
* Pseudo action: httpd-bundle_running_0
Revised Cluster Status:
* Node List:
* Online: [ cluster01 ]
* OFFLINE: [ cluster02 ]
- * GuestOnline: [ httpd-bundle-0@cluster01 ]
+ * GuestOnline: [ httpd-bundle-0 ]
* Full List of Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 ]
* Stopped: [ cluster02 ]
* Fencing (stonith:fence_xvm): Started cluster01
* dummy (ocf:pacemaker:Dummy): Stopped
* Clone Set: inactive-clone [inactive-dhcpd] (disabled):
* Stopped (disabled): [ cluster01 cluster02 ]
* Resource Group: inactive-group (disabled):
* inactive-dummy-1 (ocf:pacemaker:Dummy): Stopped (disabled)
* inactive-dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled)
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01
* httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): FAILED
* httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped
* Resource Group: exim-group:
* Public-IP (ocf:heartbeat:IPaddr): Started cluster01
* Email (lsb:exim): Started cluster01
* Clone Set: mysql-clone-group [mysql-group]:
* Started: [ cluster01 ]
* Stopped: [ cluster02 ]
* Clone Set: promotable-clone [promotable-rsc] (promotable):
* Unpromoted: [ cluster01 ]
* Stopped: [ cluster02 ]
=#=#=#= End test: Simulate a node failing - OK (0) =#=#=#=
* Passed: crm_simulate - Simulate a node failing
=#=#=#= Begin test: List a promotable clone resource =#=#=#=
resource promotable-clone is running on: cluster01
resource promotable-clone is running on: cluster02 Promoted
=#=#=#= End test: List a promotable clone resource - OK (0) =#=#=#=
* Passed: crm_resource - List a promotable clone resource
=#=#=#= Begin test: List the primitive of a promotable clone resource =#=#=#=
resource promotable-rsc is running on: cluster01
resource promotable-rsc is running on: cluster02 Promoted
=#=#=#= End test: List the primitive of a promotable clone resource - OK (0) =#=#=#=
* Passed: crm_resource - List the primitive of a promotable clone resource
=#=#=#= Begin test: List a single instance of a promotable clone resource =#=#=#=
resource promotable-rsc:0 is running on: cluster02 Promoted
=#=#=#= End test: List a single instance of a promotable clone resource - OK (0) =#=#=#=
* Passed: crm_resource - List a single instance of a promotable clone resource
=#=#=#= Begin test: List another instance of a promotable clone resource =#=#=#=
resource promotable-rsc:1 is running on: cluster01
=#=#=#= End test: List another instance of a promotable clone resource - OK (0) =#=#=#=
* Passed: crm_resource - List another instance of a promotable clone resource
=#=#=#= Begin test: List a promotable clone resource in XML =#=#=#=
cluster01
cluster02
=#=#=#= End test: List a promotable clone resource in XML - OK (0) =#=#=#=
* Passed: crm_resource - List a promotable clone resource in XML
=#=#=#= Begin test: List the primitive of a promotable clone resource in XML =#=#=#=
cluster01
cluster02
=#=#=#= End test: List the primitive of a promotable clone resource in XML - OK (0) =#=#=#=
* Passed: crm_resource - List the primitive of a promotable clone resource in XML
=#=#=#= Begin test: List a single instance of a promotable clone resource in XML =#=#=#=
cluster02
=#=#=#= End test: List a single instance of a promotable clone resource in XML - OK (0) =#=#=#=
* Passed: crm_resource - List a single instance of a promotable clone resource in XML
=#=#=#= Begin test: List another instance of a promotable clone resource in XML =#=#=#=
cluster01
=#=#=#= End test: List another instance of a promotable clone resource in XML - OK (0) =#=#=#=
* Passed: crm_resource - List another instance of a promotable clone resource in XML
=#=#=#= Begin test: Try to move an instance of a cloned resource =#=#=#=
crm_resource: Cannot operate on clone resource instance 'promotable-rsc:0'
Error performing operation: Invalid parameter
=#=#=#= End test: Try to move an instance of a cloned resource - Invalid parameter (2) =#=#=#=
* Passed: crm_resource - Try to move an instance of a cloned resource
=#=#=#= Begin test: Query a nonexistent promotable score attribute =#=#=#=
crm_attribute: Error performing operation: No such device or address
=#=#=#= End test: Query a nonexistent promotable score attribute - No such object (105) =#=#=#=
* Passed: crm_attribute - Query a nonexistent promotable score attribute
=#=#=#= Begin test: Query a nonexistent promotable score attribute (XML) =#=#=#=
crm_attribute: Error performing operation: No such device or address
=#=#=#= End test: Query a nonexistent promotable score attribute (XML) - No such object (105) =#=#=#=
* Passed: crm_attribute - Query a nonexistent promotable score attribute (XML)
=#=#=#= Begin test: Delete a nonexistent promotable score attribute =#=#=#=
=#=#=#= End test: Delete a nonexistent promotable score attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Delete a nonexistent promotable score attribute
=#=#=#= Begin test: Delete a nonexistent promotable score attribute (XML) =#=#=#=
=#=#=#= End test: Delete a nonexistent promotable score attribute (XML) - OK (0) =#=#=#=
* Passed: crm_attribute - Delete a nonexistent promotable score attribute (XML)
=#=#=#= Begin test: Query after deleting a nonexistent promotable score attribute =#=#=#=
crm_attribute: Error performing operation: No such device or address
=#=#=#= End test: Query after deleting a nonexistent promotable score attribute - No such object (105) =#=#=#=
* Passed: crm_attribute - Query after deleting a nonexistent promotable score attribute
=#=#=#= Begin test: Query after deleting a nonexistent promotable score attribute (XML) =#=#=#=
crm_attribute: Error performing operation: No such device or address
=#=#=#= End test: Query after deleting a nonexistent promotable score attribute (XML) - No such object (105) =#=#=#=
* Passed: crm_attribute - Query after deleting a nonexistent promotable score attribute (XML)
=#=#=#= Begin test: Update a nonexistent promotable score attribute =#=#=#=
=#=#=#= End test: Update a nonexistent promotable score attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Update a nonexistent promotable score attribute
=#=#=#= Begin test: Update a nonexistent promotable score attribute (XML) =#=#=#=
=#=#=#= End test: Update a nonexistent promotable score attribute (XML) - OK (0) =#=#=#=
* Passed: crm_attribute - Update a nonexistent promotable score attribute (XML)
=#=#=#= Begin test: Query after updating a nonexistent promotable score attribute =#=#=#=
scope=status name=master-promotable-rsc value=1
=#=#=#= End test: Query after updating a nonexistent promotable score attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Query after updating a nonexistent promotable score attribute
=#=#=#= Begin test: Query after updating a nonexistent promotable score attribute (XML) =#=#=#=
=#=#=#= End test: Query after updating a nonexistent promotable score attribute (XML) - OK (0) =#=#=#=
* Passed: crm_attribute - Query after updating a nonexistent promotable score attribute (XML)
=#=#=#= Begin test: Update an existing promotable score attribute =#=#=#=
=#=#=#= End test: Update an existing promotable score attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Update an existing promotable score attribute
=#=#=#= Begin test: Update an existing promotable score attribute (XML) =#=#=#=
=#=#=#= End test: Update an existing promotable score attribute (XML) - OK (0) =#=#=#=
* Passed: crm_attribute - Update an existing promotable score attribute (XML)
=#=#=#= Begin test: Query after updating an existing promotable score attribute =#=#=#=
scope=status name=master-promotable-rsc value=5
=#=#=#= End test: Query after updating an existing promotable score attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Query after updating an existing promotable score attribute
=#=#=#= Begin test: Query after updating an existing promotable score attribute (XML) =#=#=#=
=#=#=#= End test: Query after updating an existing promotable score attribute (XML) - OK (0) =#=#=#=
* Passed: crm_attribute - Query after updating an existing promotable score attribute (XML)
=#=#=#= Begin test: Delete an existing promotable score attribute =#=#=#=
Deleted status attribute: id=status-1-master-promotable-rsc name=master-promotable-rsc
=#=#=#= End test: Delete an existing promotable score attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Delete an existing promotable score attribute
=#=#=#= Begin test: Delete an existing promotable score attribute (XML) =#=#=#=
=#=#=#= End test: Delete an existing promotable score attribute (XML) - OK (0) =#=#=#=
* Passed: crm_attribute - Delete an existing promotable score attribute (XML)
=#=#=#= Begin test: Query after deleting an existing promotable score attribute =#=#=#=
crm_attribute: Error performing operation: No such device or address
=#=#=#= End test: Query after deleting an existing promotable score attribute - No such object (105) =#=#=#=
* Passed: crm_attribute - Query after deleting an existing promotable score attribute
=#=#=#= Begin test: Query after deleting an existing promotable score attribute (XML) =#=#=#=
crm_attribute: Error performing operation: No such device or address
=#=#=#= End test: Query after deleting an existing promotable score attribute (XML) - No such object (105) =#=#=#=
* Passed: crm_attribute - Query after deleting an existing promotable score attribute (XML)
=#=#=#= Begin test: Check that CIB_file="-" works - crm_mon =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
- * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
+ * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
Active Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
* Fencing (stonith:fence_xvm): Started cluster01
* dummy (ocf:pacemaker:Dummy): Started cluster02
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01
* httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02
* httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped
* Resource Group: exim-group:
* Public-IP (ocf:heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
* Clone Set: mysql-clone-group [mysql-group]:
* Started: [ cluster01 cluster02 ]
* Clone Set: promotable-clone [promotable-rsc] (promotable):
* Promoted: [ cluster02 ]
* Unpromoted: [ cluster01 ]
=#=#=#= End test: Check that CIB_file="-" works - crm_mon - OK (0) =#=#=#=
* Passed: cat - Check that CIB_file="-" works - crm_mon
=#=#=#= Begin test: Check that CIB_file="-" works - crm_resource =#=#=#=
=#=#=#= End test: Check that CIB_file="-" works - crm_resource - OK (0) =#=#=#=
* Passed: cat - Check that CIB_file="-" works - crm_resource
=#=#=#= Begin test: Check that CIB_file="-" works - crmadmin =#=#=#=
11
=#=#=#= End test: Check that CIB_file="-" works - crmadmin - OK (0) =#=#=#=
* Passed: cat - Check that CIB_file="-" works - crmadmin
diff --git a/cts/cts-cli.in b/cts/cts-cli.in
index ef9d7824b3..a725d6cd8e 100755
--- a/cts/cts-cli.in
+++ b/cts/cts-cli.in
@@ -1,2522 +1,2536 @@
#!@BASH_PATH@
#
# Copyright 2008-2022 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
# This source code is licensed under the GNU General Public License version 2
# or later (GPLv2+) WITHOUT ANY WARRANTY.
#
# Set the exit status of a command to the exit code of the last program to
# exit non-zero. This is bash-specific.
set -o pipefail
#
# Note on portable usage of sed: GNU/POSIX/*BSD sed have a limited subset of
# compatible functionality. Do not use the -i option, alternation (\|),
# \0, or character sequences such as \n or \s.
#
USAGE_TEXT="Usage: cts-cli []
Options:
--help Display this text, then exit
-V, --verbose Display any differences from expected output
-t 'TEST [...]' Run only specified tests
(default: 'dates error_codes tools crm_mon acls validity
upgrade rules feature_set').
Other tests: agents (must be run in an installed environment).
-p DIR Look for executables in DIR (may be specified multiple times)
-v, --valgrind Run all commands under valgrind
-s Save actual output as expected output"
# If readlink supports -e (i.e. GNU), use it
readlink -e / >/dev/null 2>/dev/null
if [ $? -eq 0 ]; then
test_home="$(dirname "$(readlink -e "$0")")"
else
test_home="$(dirname "$0")"
fi
: ${shadow=cts-cli}
shadow_dir=$(mktemp -d ${TMPDIR:-/tmp}/cts-cli.shadow.XXXXXXXXXX)
num_errors=0
num_passed=0
verbose=0
tests="dates error_codes tools crm_mon acls validity upgrade rules feature_set"
do_save=0
XMLLINT_CMD=
VALGRIND_CMD=
VALGRIND_OPTS="
-q
--gen-suppressions=all
--show-reachable=no
--leak-check=full
--trace-children=no
--time-stamp=yes
--num-callers=20
--suppressions=$test_home/valgrind-pcmk.suppressions
"
# Temp files for saving a command's stdout/stderr in _test_assert()
test_assert_outfile=$(mktemp ${TMPDIR:-/tmp}/cts-cli.ta_outfile.XXXXXXXXXX)
test_assert_errfile=$(mktemp ${TMPDIR:-/tmp}/cts-cli.ta_errfile.XXXXXXXXXX)
xmllint_outfile=$(mktemp ${TMPDIR:-/tmp}/cts-cli.xmllint_outfile.XXXXXXXXXX)
# Log test errors to stderr
export PCMK_stderr=1
# Output when PCMK_trace_functions is undefined is different from when it's
# empty. Later we save the value of PCMK_trace_functions, do work, and restore
# the original value. Getting back to the initial state is simplest if we assume
# the variable is defined.
: ${PCMK_trace_functions=""}
export PCMK_trace_functions
# These constants must track crm_exit_t values
CRM_EX_OK=0
CRM_EX_ERROR=1
CRM_EX_INVALID_PARAM=2
CRM_EX_UNIMPLEMENT_FEATURE=3
CRM_EX_INSUFFICIENT_PRIV=4
CRM_EX_NOT_CONFIGURED=6
CRM_EX_USAGE=64
CRM_EX_DATAERR=65
CRM_EX_CONFIG=78
CRM_EX_OLD=103
CRM_EX_DIGEST=104
CRM_EX_NOSUCH=105
CRM_EX_UNSAFE=107
CRM_EX_EXISTS=108
CRM_EX_MULTIPLE=109
CRM_EX_EXPIRED=110
CRM_EX_NOT_YET_IN_EFFECT=111
reset_shadow_cib_version() {
local SHADOWPATH
SHADOWPATH="$(crm_shadow --file)"
# sed -i isn't portable :-(
cp -p "$SHADOWPATH" "${SHADOWPATH}.$$" # preserve permissions
sed -e 's/epoch="[0-9]*"/epoch="1"/g' \
-e 's/num_updates="[0-9]*"/num_updates="0"/g' \
-e 's/admin_epoch="[0-9]*"/admin_epoch="0"/g' \
"$SHADOWPATH" > "${SHADOWPATH}.$$"
mv -- "${SHADOWPATH}.$$" "$SHADOWPATH"
}
# A newly created empty CIB might or might not have a rsc_defaults section
# depending on whether the --with-resource-stickiness-default configure
# option was used. To ensure regression tests behave the same either way,
# delete any rsc_defaults after creating or erasing a CIB.
delete_shadow_resource_defaults() {
cibadmin --delete --xml-text ''
# The above command might or might not bump the CIB version, so reset it
# to ensure future changes result in the same version for comparison.
reset_shadow_cib_version
}
create_shadow_cib() {
local VALIDATE_WITH
local SHADOW_CMD
VALIDATE_WITH="$1"
export CIB_shadow_dir="${shadow_dir}"
SHADOW_CMD="$VALGRIND_CMD crm_shadow --batch --force --create-empty"
if [ -z "$VALIDATE_WITH" ]; then
$SHADOW_CMD "$shadow" 2>&1
else
$SHADOW_CMD "$shadow" --validate-with="${VALIDATE_WITH}" 2>&1
fi
export CIB_shadow="$shadow"
delete_shadow_resource_defaults
}
function _test_assert() {
target=$1; shift
validate=$1; shift
cib=$1; shift
app=`echo "$cmd" | sed 's/\ .*//'`
printf "* Running: $app - $desc\n" 1>&2
printf "=#=#=#= Begin test: $desc =#=#=#=\n"
# Capture stderr and stdout separately, then print them consecutively
eval $VALGRIND_CMD $cmd > "$test_assert_outfile" 2> "$test_assert_errfile"
rc=$?
cat "$test_assert_errfile"
cat "$test_assert_outfile"
if [ x$cib != x0 ]; then
printf "=#=#=#= Current cib after: $desc =#=#=#=\n"
CIB_user=root cibadmin -Q
fi
# Do not validate if running under valgrind, even if told to do so. Valgrind
# will output a lot more stuff that is not XML, so it wouldn't validate anyway.
if [ "$validate" = "1" ] && [ "$VALGRIND_CMD" = "" ] && [ $rc = 0 ] && [ "$XMLLINT_CMD" != "" ]; then
# The sed command filters out the "- validates" line that xmllint will output
# on success. grep cannot be used here because "grep -v 'validates$'" will
# return an exit code of 1 if its input consists entirely of "- validates".
$XMLLINT_CMD --noout --relaxng \
"$PCMK_schema_directory/api/api-result.rng" "$test_assert_outfile" \
> "$xmllint_outfile" 2>&1
rc=$?
sed -n '/validates$/ !p' "$xmllint_outfile"
if [ $rc = 0 ]; then
printf "=#=#=#= End test: %s - $(crm_error --exit $rc) (%d) =#=#=#=\n" "$desc" $rc
else
printf "=#=#=#= End test: %s - Failed to validate (%d) =#=#=#=\n" "$desc" $rc
fi
else
printf "=#=#=#= End test: %s - $(crm_error --exit $rc) (%d) =#=#=#=\n" "$desc" $rc
fi
if [ $rc -ne $target ]; then
num_errors=$(( $num_errors + 1 ))
printf "* Failed (rc=%.3d): %-14s - %s\n" $rc $app "$desc"
printf "* Failed (rc=%.3d): %-14s - %s\n" $rc $app "$desc (`which $app`)" 1>&2
return
exit $CRM_EX_ERROR
else
printf "* Passed: %-14s - %s\n" $app "$desc"
num_passed=$(( $num_passed + 1 ))
fi
}
function test_assert() {
_test_assert $1 0 $2
}
function test_assert_validate() {
_test_assert $1 1 $2
}
# Tests that depend on resource agents and must be run in an installed
# environment
function test_agents() {
desc="Validate a valid resource configuration"
cmd="crm_resource --validate --class ocf --provider pacemaker --agent Dummy"
test_assert $CRM_EX_OK 0
desc="Validate a valid resource configuration (XML)"
cmd="crm_resource --validate --class ocf --provider pacemaker --agent Dummy"
cmd="$cmd --output-as=xml"
test_assert_validate $CRM_EX_OK 0
# Make the Dummy configuration invalid (op_sleep can't be a generic string)
export OCF_RESKEY_op_sleep=asdf
desc="Validate an invalid resource configuration"
cmd="crm_resource --validate --class ocf --provider pacemaker --agent Dummy"
test_assert $CRM_EX_NOT_CONFIGURED 0
desc="Validate an invalid resource configuration (XML)"
cmd="crm_resource --validate --class ocf --provider pacemaker --agent Dummy"
cmd="$cmd --output-as=xml"
test_assert_validate $CRM_EX_NOT_CONFIGURED 0
unset OCF_RESKEY_op_sleep
export OCF_RESKEY_op_sleep
}
function test_crm_mon() {
local TMPXML
export CIB_file="$test_home/cli/crm_mon.xml"
desc="Basic text output"
cmd="crm_mon -1"
test_assert $CRM_EX_OK 0
desc="XML output"
cmd="crm_mon --output-as=xml"
test_assert_validate $CRM_EX_OK 0
desc="Basic text output without node section"
cmd="crm_mon -1 --exclude=nodes"
test_assert $CRM_EX_OK 0
desc="XML output without the node section"
cmd="crm_mon --output-as=xml --exclude=nodes"
test_assert_validate $CRM_EX_OK 0
desc="Text output with only the node section"
cmd="crm_mon -1 --exclude=all --include=nodes"
test_assert $CRM_EX_OK 0
# The above test doesn't need to be performed for other output formats. It's
# really just a test to make sure that blank lines are correct.
desc="Complete text output"
cmd="crm_mon -1 --include=all"
test_assert $CRM_EX_OK 0
# XML includes everything already so there's no need for a complete test
desc="Complete text output with detail"
cmd="crm_mon -1R --include=all"
test_assert $CRM_EX_OK 0
# XML includes detailed output already
desc="Complete brief text output"
cmd="crm_mon -1 --include=all --brief"
test_assert $CRM_EX_OK 0
desc="Complete text output grouped by node"
cmd="crm_mon -1 --include=all --group-by-node"
test_assert $CRM_EX_OK 0
# XML does not have a brief output option
desc="Complete brief text output grouped by node"
cmd="crm_mon -1 --include=all --group-by-node --brief"
test_assert $CRM_EX_OK 0
desc="XML output grouped by node"
cmd="crm_mon -1 --output-as=xml --group-by-node"
test_assert_validate $CRM_EX_OK 0
desc="Complete text output filtered by node"
cmd="crm_mon -1 --include=all --node=cluster01"
test_assert $CRM_EX_OK 0
desc="XML output filtered by node"
cmd="crm_mon --output-as xml --include=all --node=cluster01"
test_assert_validate $CRM_EX_OK 0
desc="Complete text output filtered by tag"
cmd="crm_mon -1 --include=all --node=even-nodes"
test_assert $CRM_EX_OK 0
desc="XML output filtered by tag"
cmd="crm_mon --output-as=xml --include=all --node=even-nodes"
test_assert_validate $CRM_EX_OK 0
desc="Complete text output filtered by resource tag"
cmd="crm_mon -1 --include=all --resource=fencing-rscs"
test_assert $CRM_EX_OK 0
desc="XML output filtered by resource tag"
cmd="crm_mon --output-as=xml --include=all --resource=fencing-rscs"
test_assert_validate $CRM_EX_OK 0
desc="Basic text output filtered by node that doesn't exist"
cmd="crm_mon -1 --node=blah"
test_assert $CRM_EX_OK 0
desc="XML output filtered by node that doesn't exist"
cmd="crm_mon --output-as=xml --node=blah"
test_assert_validate $CRM_EX_OK 0
desc="Basic text output with inactive resources"
cmd="crm_mon -1 -r"
test_assert $CRM_EX_OK 0
# XML already includes inactive resources
desc="Basic text output with inactive resources, filtered by node"
cmd="crm_mon -1 -r --node=cluster02"
test_assert $CRM_EX_OK 0
# XML already includes inactive resources
desc="Complete text output filtered by primitive resource"
cmd="crm_mon -1 --include=all --resource=Fencing"
test_assert $CRM_EX_OK 0
desc="XML output filtered by primitive resource"
cmd="crm_mon --output-as=xml --resource=Fencing"
test_assert_validate $CRM_EX_OK 0
desc="Complete text output filtered by group resource"
cmd="crm_mon -1 --include=all --resource=exim-group"
test_assert $CRM_EX_OK 0
desc="XML output filtered by group resource"
cmd="crm_mon --output-as=xml --resource=exim-group"
test_assert_validate $CRM_EX_OK 0
desc="Complete text output filtered by group resource member"
cmd="crm_mon -1 --include=all --resource=Public-IP"
test_assert $CRM_EX_OK 0
desc="XML output filtered by group resource member"
cmd="crm_mon --output-as=xml --resource=Email"
test_assert_validate $CRM_EX_OK 0
desc="Complete text output filtered by clone resource"
cmd="crm_mon -1 --include=all --resource=ping-clone"
test_assert $CRM_EX_OK 0
desc="XML output filtered by clone resource"
cmd="crm_mon --output-as=xml --resource=ping-clone"
test_assert_validate $CRM_EX_OK 0
desc="Complete text output filtered by clone resource instance"
cmd="crm_mon -1 --include=all --resource=ping"
test_assert $CRM_EX_OK 0
desc="XML output filtered by clone resource instance"
cmd="crm_mon --output-as=xml --resource=ping"
test_assert_validate $CRM_EX_OK 0
desc="Complete text output filtered by exact clone resource instance"
cmd="crm_mon -1 --include=all --show-detail --resource=ping:0"
test_assert $CRM_EX_OK 0
desc="XML output filtered by exact clone resource instance"
cmd="crm_mon --output-as=xml --resource=ping:1"
test_assert_validate $CRM_EX_OK 0
desc="Basic text output filtered by resource that doesn't exist"
cmd="crm_mon -1 --resource=blah"
test_assert $CRM_EX_OK 0
desc="XML output filtered by resource that doesn't exist"
cmd="crm_mon --output-as=xml --resource=blah"
test_assert_validate $CRM_EX_OK 0
desc="Basic text output with inactive resources, filtered by tag"
cmd="crm_mon -1 -r --resource=inactive-rscs"
test_assert $CRM_EX_OK 0
desc="Basic text output with inactive resources, filtered by bundle resource"
cmd="crm_mon -1 -r --resource=httpd-bundle"
test_assert $CRM_EX_OK 0
desc="XML output filtered by inactive bundle resource"
cmd="crm_mon --output-as=xml --resource=httpd-bundle"
test_assert_validate $CRM_EX_OK 0
desc="Basic text output with inactive resources, filtered by bundled IP address resource"
cmd="crm_mon -1 -r --resource=httpd-bundle-ip-192.168.122.131"
test_assert $CRM_EX_OK 0
desc="XML output filtered by bundled IP address resource"
cmd="crm_mon --output-as=xml --resource=httpd-bundle-ip-192.168.122.132"
test_assert_validate $CRM_EX_OK 0
desc="Basic text output with inactive resources, filtered by bundled container"
cmd="crm_mon -1 -r --resource=httpd-bundle-docker-1"
test_assert $CRM_EX_OK 0
desc="XML output filtered by bundled container"
cmd="crm_mon --output-as=xml --resource=httpd-bundle-docker-2"
test_assert_validate $CRM_EX_OK 0
desc="Basic text output with inactive resources, filtered by bundle connection"
cmd="crm_mon -1 -r --resource=httpd-bundle-0"
test_assert $CRM_EX_OK 0
desc="XML output filtered by bundle connection"
cmd="crm_mon --output-as=xml --resource=httpd-bundle-0"
test_assert_validate $CRM_EX_OK 0
desc="Basic text output with inactive resources, filtered by bundled primitive resource"
cmd="crm_mon -1 -r --resource=httpd"
test_assert $CRM_EX_OK 0
desc="XML output filtered by bundled primitive resource"
cmd="crm_mon --output-as=xml --resource=httpd"
test_assert_validate $CRM_EX_OK 0
desc="Complete text output, filtered by clone name in cloned group"
cmd="crm_mon -1 --include=all --show-detail --resource=mysql-clone-group"
test_assert $CRM_EX_OK 0
desc="XML output, filtered by clone name in cloned group"
cmd="crm_mon --output-as=xml --resource=mysql-clone-group"
test_assert_validate $CRM_EX_OK 0
desc="Complete text output, filtered by group name in cloned group"
cmd="crm_mon -1 --include=all --show-detail --resource=mysql-group"
test_assert $CRM_EX_OK 0
desc="XML output, filtered by group name in cloned group"
cmd="crm_mon --output-as=xml --resource=mysql-group"
test_assert_validate $CRM_EX_OK 0
desc="Complete text output, filtered by exact group instance name in cloned group"
cmd="crm_mon -1 --include=all --show-detail --resource=mysql-group:1"
test_assert $CRM_EX_OK 0
desc="XML output, filtered by exact group instance name in cloned group"
cmd="crm_mon --output-as=xml --resource=mysql-group:1"
test_assert_validate $CRM_EX_OK 0
desc="Complete text output, filtered by primitive name in cloned group"
cmd="crm_mon -1 --include=all --show-detail --resource=mysql-proxy"
test_assert $CRM_EX_OK 0
desc="XML output, filtered by primitive name in cloned group"
cmd="crm_mon --output-as=xml --resource=mysql-proxy"
test_assert_validate $CRM_EX_OK 0
desc="Complete text output, filtered by exact primitive instance name in cloned group"
cmd="crm_mon -1 --include=all --show-detail --resource=mysql-proxy:1"
test_assert $CRM_EX_OK 0
desc="XML output, filtered by exact primitive instance name in cloned group"
cmd="crm_mon --output-as=xml --resource=mysql-proxy:1"
test_assert_validate $CRM_EX_OK 0
unset CIB_file
export CIB_file="$test_home/cli/crm_mon-partial.xml"
desc="Text output of partially active resources"
cmd="crm_mon -1 --show-detail"
test_assert $CRM_EX_OK 0
desc="XML output of partially active resources"
cmd="crm_mon -1 --output-as=xml"
test_assert_validate $CRM_EX_OK 0
desc="Text output of partially active resources, with inactive resources"
cmd="crm_mon -1 -r --show-detail"
test_assert $CRM_EX_OK 0
# XML already includes inactive resources
desc="Complete brief text output, with inactive resources"
cmd="crm_mon -1 -r --include=all --brief --show-detail"
test_assert $CRM_EX_OK 0
# XML does not have a brief output option
desc="Text output of partially active group"
cmd="crm_mon -1 --resource=partially-active-group"
test_assert $CRM_EX_OK 0
desc="Text output of partially active group, with inactive resources"
cmd="crm_mon -1 --resource=partially-active-group -r"
test_assert $CRM_EX_OK 0
desc="Text output of active member of partially active group"
cmd="crm_mon -1 --resource=dummy-1"
test_assert $CRM_EX_OK 0
desc="Text output of inactive member of partially active group"
cmd="crm_mon -1 --resource=dummy-2 --show-detail"
test_assert $CRM_EX_OK 0
desc="Complete brief text output grouped by node, with inactive resources"
cmd="crm_mon -1 -r --include=all --group-by-node --brief --show-detail"
test_assert $CRM_EX_OK 0
desc="Text output of partially active resources, with inactive resources, filtered by node"
cmd="crm_mon -1 -r --node=cluster01"
test_assert $CRM_EX_OK 0
desc="Text output of partially active resources, filtered by node"
cmd="crm_mon -1 --output-as=xml --node=cluster01"
test_assert_validate $CRM_EX_OK 0
unset CIB_file
export CIB_file="$test_home/cli/crm_mon-unmanaged.xml"
desc="Text output of active unmanaged resource on offline node"
cmd="crm_mon -1"
test_assert $CRM_EX_OK 0
desc="XML output of active unmanaged resource on offline node"
cmd="crm_mon -1 --output-as=xml"
test_assert $CRM_EX_OK 0
desc="Brief text output of active unmanaged resource on offline node"
cmd="crm_mon -1 --brief"
test_assert $CRM_EX_OK 0
desc="Brief text output of active unmanaged resource on offline node, grouped by node"
cmd="crm_mon -1 --brief --group-by-node"
test_assert $CRM_EX_OK 0
export CIB_file=$(mktemp ${TMPDIR:-/tmp}/cts-cli.crm_mon.xml.XXXXXXXXXX)
sed -e '/maintenance-mode/ s/false/true/' "$test_home/cli/crm_mon.xml" > $CIB_file
desc="Text output of all resources with maintenance-mode enabled"
cmd="crm_mon -1 -r"
test_assert $CRM_EX_OK 0
rm -r "$CIB_file"
unset CIB_file
+
+ export CIB_file="$test_home/cli/crm_mon-T180.xml"
+
+ desc="Text output of guest node's container on different node from its"
+ desc="$desc remote resource"
+ cmd="crm_mon -1"
+ test_assert $CRM_EX_OK 0
+
+ desc="Complete text output of guest node's container on different node from"
+ desc="$desc its remote resource"
+ cmd="crm_mon -1 --show-detail"
+ test_assert $CRM_EX_OK 0
+
+ unset CIB_file
}
function test_error_codes() {
# Note: At the time of this writing, crm_error returns success even for
# unknown error codes. We don't want to cause a regression by changing that.
# Due to the way _test_assert() formats output, we need "crm_error" to be
# the first token of cmd. We can't start with a parenthesis or variable
# assignment. However, in the "list result codes" tests, we also need to
# save some output for later processing. We'll use a temp file for this.
local TMPFILE
TMPFILE=$(mktemp ${TMPDIR:-/tmp}/cts-cli.crm_error_out.XXXXXXXXXX)
# Legacy return codes
#
# Don't test unknown legacy code. FreeBSD includes a colon in strerror(),
# while other distros do not.
desc="Get legacy return code"
cmd="crm_error -- 201"
test_assert $CRM_EX_OK 0
desc="Get legacy return code (XML)"
cmd="crm_error --output-as=xml -- 201"
test_assert_validate $CRM_EX_OK 0
desc="Get legacy return code (with name)"
cmd="crm_error -n -- 201"
test_assert $CRM_EX_OK 0
desc="Get legacy return code (with name) (XML)"
cmd="crm_error -n --output-as=xml -- 201"
test_assert_validate $CRM_EX_OK 0
desc="Get multiple legacy return codes"
cmd="crm_error -- 201 202"
test_assert $CRM_EX_OK 0
desc="Get multiple legacy return codes (XML)"
cmd="crm_error --output-as=xml -- 201 202"
test_assert_validate $CRM_EX_OK 0
desc="Get multiple legacy return codes (with names)"
cmd="crm_error -n -- 201 202"
test_assert $CRM_EX_OK 0
desc="Get multiple legacy return codes (with names) (XML)"
cmd="crm_error -n --output-as=xml -- 201 202"
test_assert_validate $CRM_EX_OK 0
# We can only rely on our custom codes, so we'll spot-check codes 201-209
desc="List legacy return codes (spot check)"
cmd="crm_error -l | grep 20[1-9]"
test_assert $CRM_EX_OK 0
desc="List legacy return codes (spot check) (XML)"
cmd="crm_error -l --output-as=xml > $TMPFILE; rc=$?"
cmd="$cmd; grep -Ev ' "$TMPORIG"
desc="Set cluster option"
cmd="crm_attribute -n cluster-delay -v 60s"
test_assert $CRM_EX_OK
desc="Query new cluster option"
cmd="cibadmin -Q -o crm_config | grep cib-bootstrap-options-cluster-delay"
test_assert $CRM_EX_OK
desc="Query cluster options"
cmd="cibadmin -Q -o crm_config > $TMPXML"
test_assert $CRM_EX_OK
desc="Set no-quorum policy"
cmd="crm_attribute -n no-quorum-policy -v ignore"
test_assert $CRM_EX_OK
desc="Delete nvpair"
cmd="cibadmin -D -o crm_config --xml-text ''"
test_assert $CRM_EX_OK
desc="Create operation should fail"
cmd="cibadmin -C -o crm_config --xml-file $TMPXML"
test_assert $CRM_EX_EXISTS
desc="Modify cluster options section"
cmd="cibadmin -M -o crm_config --xml-file $TMPXML"
test_assert $CRM_EX_OK
desc="Query updated cluster option"
cmd="cibadmin -Q -o crm_config | grep cib-bootstrap-options-cluster-delay"
test_assert $CRM_EX_OK
desc="Set duplicate cluster option"
cmd="crm_attribute -n cluster-delay -v 40s -s duplicate"
test_assert $CRM_EX_OK
desc="Setting multiply defined cluster option should fail"
cmd="crm_attribute -n cluster-delay -v 30s"
test_assert $CRM_EX_MULTIPLE
desc="Set cluster option with -s"
cmd="crm_attribute -n cluster-delay -v 30s -s duplicate"
test_assert $CRM_EX_OK
desc="Delete cluster option with -i"
cmd="crm_attribute -n cluster-delay -D -i cib-bootstrap-options-cluster-delay"
test_assert $CRM_EX_OK
desc="Create node1 and bring it online"
cmd="crm_simulate --live-check --in-place --node-up=node1"
test_assert $CRM_EX_OK
desc="Create node attribute"
cmd="crm_attribute -n ram -v 1024M -N node1 -t nodes"
test_assert $CRM_EX_OK
desc="Query new node attribute"
cmd="cibadmin -Q -o nodes | grep node1-ram"
test_assert $CRM_EX_OK
desc="Set a transient (fail-count) node attribute"
cmd="crm_attribute -n fail-count-foo -v 3 -N node1 -t status"
test_assert $CRM_EX_OK
desc="Query a fail count"
cmd="crm_failcount --query -r foo -N node1"
test_assert $CRM_EX_OK
desc="Show node attributes with crm_simulate"
cmd="crm_simulate --live-check --show-attrs"
test_assert $CRM_EX_OK 0
desc="Set a second transient node attribute"
cmd="crm_attribute -n fail-count-bar -v 5 -N node1 -t status"
test_assert $CRM_EX_OK
desc="Query node attributes by pattern"
cmd="crm_attribute -t status -P fail-count -N node1 --query"
test_assert $CRM_EX_OK 0
desc="Update node attributes by pattern"
cmd="crm_attribute -t status -P fail-count -N node1 -v 10"
test_assert $CRM_EX_OK
desc="Delete node attributes by pattern"
cmd="crm_attribute -t status -P fail-count -N node1 -D"
test_assert $CRM_EX_OK
desc="crm_attribute given invalid pattern usage"
cmd="crm_attribute -t nodes -P fail-count -N node1 -D"
test_assert $CRM_EX_USAGE 0
desc="crm_attribute given invalid delete usage"
cmd="crm_attribute -t nodes -N node1 -D"
test_assert $CRM_EX_USAGE 0
desc="Digest calculation"
cmd="cibadmin -Q | cibadmin -5 -p 2>&1 > /dev/null"
test_assert $CRM_EX_OK
# This update will fail because it has version numbers
desc="Replace operation should fail"
cmd="cibadmin -R --xml-file $TMPORIG"
test_assert $CRM_EX_OLD
desc="Default standby value"
cmd="crm_standby -N node1 -G"
test_assert $CRM_EX_OK
desc="Set standby status"
cmd="crm_standby -N node1 -v true"
test_assert $CRM_EX_OK
desc="Query standby value"
cmd="crm_standby -N node1 -G"
test_assert $CRM_EX_OK
desc="Delete standby value"
cmd="crm_standby -N node1 -D"
test_assert $CRM_EX_OK
desc="Create a resource"
cmd="cibadmin -C -o resources --xml-text ''"
test_assert $CRM_EX_OK
desc="crm_resource run with extra arguments"
cmd="crm_resource foo bar"
test_assert $CRM_EX_USAGE 0
desc="crm_resource given both -r and resource config"
cmd="crm_resource -r xyz --class ocf --provider pacemaker --agent Dummy"
test_assert $CRM_EX_USAGE 0
desc="crm_resource given resource config with invalid action"
cmd="crm_resource --class ocf --provider pacemaker --agent Dummy -D"
test_assert $CRM_EX_USAGE 0
desc="Create a resource meta attribute"
cmd="crm_resource -r dummy --meta -p is-managed -v false"
test_assert $CRM_EX_OK
desc="Query a resource meta attribute"
cmd="crm_resource -r dummy --meta -g is-managed"
test_assert $CRM_EX_OK
desc="Remove a resource meta attribute"
cmd="crm_resource -r dummy --meta -d is-managed"
test_assert $CRM_EX_OK
desc="Create another resource meta attribute"
cmd="crm_resource -r dummy --meta -p target-role -v Stopped --output-as=xml"
test_assert_validate $CRM_EX_OK 0
desc="Show why a resource is not running"
cmd="crm_resource -Y -r dummy --output-as=xml"
test_assert_validate $CRM_EX_OK 0
desc="Remove another resource meta attribute"
cmd="crm_resource -r dummy --meta -d target-role --output-as=xml"
test_assert_validate $CRM_EX_OK 0
desc="Create a resource attribute"
cmd="crm_resource -r dummy -p delay -v 10s"
test_assert $CRM_EX_OK
desc="List the configured resources"
cmd="crm_resource -L"
test_assert $CRM_EX_OK
desc="List the configured resources in XML"
cmd="crm_resource -L --output-as=xml"
test_assert_validate $CRM_EX_OK 0
desc="Implicitly list the configured resources"
cmd="crm_resource"
test_assert $CRM_EX_OK 0
desc="List IDs of instantiated resources"
cmd="crm_resource -l"
test_assert $CRM_EX_OK 0
desc="Show XML configuration of resource"
cmd="crm_resource -q -r dummy"
test_assert $CRM_EX_OK 0
desc="Show XML configuration of resource, output as XML"
cmd="crm_resource -q -r dummy --output-as=xml"
test_assert $CRM_EX_OK 0
desc="Require a destination when migrating a resource that is stopped"
cmd="crm_resource -r dummy -M"
test_assert $CRM_EX_USAGE
desc="Don't support migration to non-existent locations"
cmd="crm_resource -r dummy -M -N i.do.not.exist"
test_assert $CRM_EX_NOSUCH
desc="Create a fencing resource"
cmd="cibadmin -C -o resources --xml-text ''"
test_assert $CRM_EX_OK
desc="Bring resources online"
cmd="crm_simulate --live-check --in-place -S"
test_assert $CRM_EX_OK
desc="Try to move a resource to its existing location"
cmd="crm_resource -r dummy --move --node node1"
test_assert $CRM_EX_EXISTS
desc="Try to move a resource that doesn't exist"
cmd="crm_resource -r xyz --move --node node1"
test_assert $CRM_EX_NOSUCH 0
desc="Move a resource from its existing location"
cmd="crm_resource -r dummy --move"
test_assert $CRM_EX_OK
desc="Clear out constraints generated by --move"
cmd="crm_resource -r dummy --clear"
test_assert $CRM_EX_OK
desc="Default ticket granted state"
cmd="crm_ticket -t ticketA -G granted -d false"
test_assert $CRM_EX_OK
desc="Set ticket granted state"
cmd="crm_ticket -t ticketA -r --force"
test_assert $CRM_EX_OK
desc="Query ticket granted state"
cmd="crm_ticket -t ticketA -G granted"
test_assert $CRM_EX_OK
desc="Delete ticket granted state"
cmd="crm_ticket -t ticketA -D granted --force"
test_assert $CRM_EX_OK
desc="Make a ticket standby"
cmd="crm_ticket -t ticketA -s"
test_assert $CRM_EX_OK
desc="Query ticket standby state"
cmd="crm_ticket -t ticketA -G standby"
test_assert $CRM_EX_OK
desc="Activate a ticket"
cmd="crm_ticket -t ticketA -a"
test_assert $CRM_EX_OK
desc="Delete ticket standby state"
cmd="crm_ticket -t ticketA -D standby"
test_assert $CRM_EX_OK
desc="Ban a resource on unknown node"
cmd="crm_resource -r dummy -B -N host1"
test_assert $CRM_EX_NOSUCH
desc="Create two more nodes and bring them online"
cmd="crm_simulate --live-check --in-place --node-up=node2 --node-up=node3"
test_assert $CRM_EX_OK
desc="Ban dummy from node1"
cmd="crm_resource -r dummy -B -N node1"
test_assert $CRM_EX_OK
desc="Show where a resource is running"
cmd="crm_resource -r dummy -W"
test_assert $CRM_EX_OK 0
desc="Show constraints on a resource"
cmd="crm_resource -a -r dummy"
test_assert $CRM_EX_OK 0
desc="Ban dummy from node2"
cmd="crm_resource -r dummy -B -N node2 --output-as=xml"
test_assert_validate $CRM_EX_OK
desc="Relocate resources due to ban"
cmd="crm_simulate --live-check --in-place -S"
test_assert $CRM_EX_OK
desc="Move dummy to node1"
cmd="crm_resource -r dummy -M -N node1 --output-as=xml"
test_assert_validate $CRM_EX_OK
desc="Clear implicit constraints for dummy on node2"
cmd="crm_resource -r dummy -U -N node2"
test_assert $CRM_EX_OK
desc="Drop the status section"
cmd="cibadmin -R -o status --xml-text ''"
test_assert $CRM_EX_OK 0
desc="Create a clone"
cmd="cibadmin -C -o resources --xml-text ''"
test_assert $CRM_EX_OK 0
desc="Create a resource meta attribute"
cmd="crm_resource -r test-primitive --meta -p is-managed -v false"
test_assert $CRM_EX_OK
desc="Create a resource meta attribute in the primitive"
cmd="crm_resource -r test-primitive --meta -p is-managed -v false --force"
test_assert $CRM_EX_OK
desc="Update resource meta attribute with duplicates"
cmd="crm_resource -r test-clone --meta -p is-managed -v true"
test_assert $CRM_EX_OK
desc="Update resource meta attribute with duplicates (force clone)"
cmd="crm_resource -r test-clone --meta -p is-managed -v true --force"
test_assert $CRM_EX_OK
desc="Update child resource meta attribute with duplicates"
cmd="crm_resource -r test-primitive --meta -p is-managed -v false"
test_assert $CRM_EX_OK
desc="Delete resource meta attribute with duplicates"
cmd="crm_resource -r test-clone --meta -d is-managed"
test_assert $CRM_EX_OK
desc="Delete resource meta attribute in parent"
cmd="crm_resource -r test-primitive --meta -d is-managed"
test_assert $CRM_EX_OK
desc="Create a resource meta attribute in the primitive"
cmd="crm_resource -r test-primitive --meta -p is-managed -v false --force"
test_assert $CRM_EX_OK
desc="Update existing resource meta attribute"
cmd="crm_resource -r test-clone --meta -p is-managed -v true"
test_assert $CRM_EX_OK
desc="Create a resource meta attribute in the parent"
cmd="crm_resource -r test-clone --meta -p is-managed -v true --force"
test_assert $CRM_EX_OK
desc="Copy resources"
cmd="cibadmin -Q -o resources > $TMPXML"
test_assert $CRM_EX_OK 0
desc="Delete resource parent meta attribute (force)"
cmd="crm_resource -r test-clone --meta -d is-managed --force"
test_assert $CRM_EX_OK
desc="Restore duplicates"
cmd="cibadmin -R -o resources --xml-file $TMPXML"
test_assert $CRM_EX_OK
desc="Delete resource child meta attribute"
cmd="crm_resource -r test-primitive --meta -d is-managed"
test_assert $CRM_EX_OK
cibadmin -C -o resources --xml-text ' \
\
\
'
desc="Create a resource meta attribute in dummy1"
cmd="crm_resource -r dummy1 --meta -p is-managed -v true"
test_assert $CRM_EX_OK
desc="Create a resource meta attribute in dummy-group"
cmd="crm_resource -r dummy-group --meta -p is-managed -v false"
test_assert $CRM_EX_OK
cibadmin -D -o resource --xml-text ''
desc="Specify a lifetime when moving a resource"
cmd="crm_resource -r dummy --move --node node2 --lifetime=PT1H"
test_assert $CRM_EX_OK
desc="Try to move a resource previously moved with a lifetime"
cmd="crm_resource -r dummy --move --node node1"
test_assert $CRM_EX_OK
desc="Ban dummy from node1 for a short time"
cmd="crm_resource -r dummy -B -N node1 --lifetime=PT1S"
test_assert $CRM_EX_OK
desc="Remove expired constraints"
sleep 2
cmd="crm_resource --clear --expired"
test_assert $CRM_EX_OK
# Clear has already been tested elsewhere, but we need to get rid of the
# constraints so testing delete works. It won't delete if there's still
# a reference to the resource somewhere.
desc="Clear all implicit constraints for dummy"
cmd="crm_resource -r dummy -U"
test_assert $CRM_EX_OK
desc="Set a node health strategy"
cmd="crm_attribute -n node-health-strategy -v migrate-on-red"
test_assert $CRM_EX_OK
desc="Set a node health attribute"
cmd="crm_attribute -N node3 -n '#health-cts-cli' -v red"
test_assert $CRM_EX_OK
desc="Show why a resource is not running on an unhealthy node"
cmd="crm_resource -N node3 -Y -r dummy --output-as=xml"
test_assert_validate $CRM_EX_OK 0
desc="Delete a resource"
cmd="crm_resource -D -r dummy -t primitive"
test_assert $CRM_EX_OK
unset CIB_shadow
unset CIB_shadow_dir
desc="Create an XML patchset"
cmd="crm_diff -o $test_home/cli/crm_diff_old.xml -n $test_home/cli/crm_diff_new.xml"
test_assert $CRM_EX_ERROR 0
export CIB_file="$test_home/cli/constraints.xml"
for rsc in prim1 prim2 prim3 prim4 prim5 prim6 prim7 prim8 prim9 \
prim10 prim11 prim12 prim13 group clone; do
desc="Check locations and constraints for $rsc"
cmd="crm_resource -a -r $rsc"
test_assert $CRM_EX_OK 0
desc="Recursively check locations and constraints for $rsc"
cmd="crm_resource -A -r $rsc"
test_assert $CRM_EX_OK 0
desc="Check locations and constraints for $rsc in XML"
cmd="crm_resource -a -r $rsc --output-as=xml"
test_assert_validate $CRM_EX_OK 0
desc="Recursively check locations and constraints for $rsc in XML"
cmd="crm_resource -A -r $rsc --output-as=xml"
test_assert_validate $CRM_EX_OK 0
done
unset CIB_file
export CIB_file="$test_home/cli/crm_resource_digests.xml"
desc="Show resource digests"
cmd="crm_resource --digests -r rsc1 -N node1 --output-as=xml"
test_assert_validate $CRM_EX_OK 0
desc="Show resource digests with overrides"
cmd="$cmd CRM_meta_interval=10000 CRM_meta_timeout=20000"
test_assert $CRM_EX_OK 0
unset CIB_file
export CIB_file="$test_home/cli/crmadmin-cluster-remote-guest-nodes.xml"
desc="List all nodes"
cmd="crmadmin -N"
test_assert $CRM_EX_OK 0
desc="Minimally list all nodes"
cmd="crmadmin -N -q"
test_assert $CRM_EX_OK 0
desc="List all nodes as bash exports"
cmd="crmadmin -N -B"
test_assert $CRM_EX_OK 0
desc="List cluster nodes"
cmd="crmadmin -N cluster | wc -l | grep 6"
test_assert $CRM_EX_OK 0
desc="List guest nodes"
cmd="crmadmin -N guest | wc -l | grep 2"
test_assert $CRM_EX_OK 0
desc="List remote nodes"
cmd="crmadmin -N remote | wc -l | grep 3"
test_assert $CRM_EX_OK 0
desc="List cluster,remote nodes"
cmd="crmadmin -N cluster,remote | wc -l | grep 9"
test_assert $CRM_EX_OK 0
desc="List guest,remote nodes"
cmd="crmadmin -N guest,remote | wc -l | grep 5"
test_assert $CRM_EX_OK 0
unset CIB_file
export CIB_file="$test_home/cli/crm_mon.xml"
export CIB_shadow_dir="${shadow_dir}"
desc="Show allocation scores with crm_simulate"
cmd="crm_simulate -x $CIB_file --show-scores --output-as=xml"
test_assert_validate $CRM_EX_OK 0
desc="Show utilization with crm_simulate"
cmd="crm_simulate -x $CIB_file --show-utilization"
test_assert $CRM_EX_OK 0
desc="Simulate injecting a failure"
cmd="crm_simulate -x $CIB_file -S -i ping_monitor_10000@cluster02=1"
test_assert $CRM_EX_OK 0
desc="Simulate bringing a node down"
cmd="crm_simulate -x $CIB_file -S --node-down=cluster01"
test_assert $CRM_EX_OK 0
desc="Simulate a node failing"
cmd="crm_simulate -x $CIB_file -S --node-fail=cluster02"
test_assert $CRM_EX_OK 0
unset CIB_shadow_dir
desc="List a promotable clone resource"
cmd="crm_resource --locate -r promotable-clone"
test_assert $CRM_EX_OK 0
desc="List the primitive of a promotable clone resource"
cmd="crm_resource --locate -r promotable-rsc"
test_assert $CRM_EX_OK 0
desc="List a single instance of a promotable clone resource"
cmd="crm_resource --locate -r promotable-rsc:0"
test_assert $CRM_EX_OK 0
desc="List another instance of a promotable clone resource"
cmd="crm_resource --locate -r promotable-rsc:1"
test_assert $CRM_EX_OK 0
desc="List a promotable clone resource in XML"
cmd="crm_resource --locate -r promotable-clone --output-as=xml"
test_assert_validate $CRM_EX_OK 0
desc="List the primitive of a promotable clone resource in XML"
cmd="crm_resource --locate -r promotable-rsc --output-as=xml"
test_assert_validate $CRM_EX_OK 0
desc="List a single instance of a promotable clone resource in XML"
cmd="crm_resource --locate -r promotable-rsc:0 --output-as=xml"
test_assert_validate $CRM_EX_OK 0
desc="List another instance of a promotable clone resource in XML"
cmd="crm_resource --locate -r promotable-rsc:1 --output-as=xml"
test_assert_validate $CRM_EX_OK 0
desc="Try to move an instance of a cloned resource"
cmd="crm_resource -r promotable-rsc:0 --move --node node1"
test_assert $CRM_EX_INVALID_PARAM 0
# Create a sandbox copy of crm_mon.xml
cibadmin -Q > "$TMPXML"
export CIB_file="$TMPXML"
desc="Query a nonexistent promotable score attribute"
cmd="crm_attribute -N cluster01 -p promotable-rsc -G"
test_assert $CRM_EX_NOSUCH 0
desc="Query a nonexistent promotable score attribute (XML)"
cmd="crm_attribute -N cluster01 -p promotable-rsc -G --output-as=xml"
test_assert_validate $CRM_EX_NOSUCH 0
desc="Delete a nonexistent promotable score attribute"
cmd="crm_attribute -N cluster01 -p promotable-rsc -D"
test_assert $CRM_EX_OK 0
desc="Delete a nonexistent promotable score attribute (XML)"
cmd="crm_attribute -N cluster01 -p promotable-rsc -D --output-as=xml"
test_assert_validate $CRM_EX_OK 0
desc="Query after deleting a nonexistent promotable score attribute"
cmd="crm_attribute -N cluster01 -p promotable-rsc -G"
test_assert $CRM_EX_NOSUCH 0
desc="Query after deleting a nonexistent promotable score attribute (XML)"
cmd="crm_attribute -N cluster01 -p promotable-rsc -G --output-as=xml"
test_assert_validate $CRM_EX_NOSUCH 0
desc="Update a nonexistent promotable score attribute"
cmd="crm_attribute -N cluster01 -p promotable-rsc -v 1"
test_assert $CRM_EX_OK 0
desc="Update a nonexistent promotable score attribute (XML)"
cmd="crm_attribute -N cluster01 -p promotable-rsc -v 1 --output-as=xml"
test_assert_validate $CRM_EX_OK 0
desc="Query after updating a nonexistent promotable score attribute"
cmd="crm_attribute -N cluster01 -p promotable-rsc -G"
test_assert $CRM_EX_OK 0
desc="Query after updating a nonexistent promotable score attribute (XML)"
cmd="crm_attribute -N cluster01 -p promotable-rsc -G --output-as=xml"
test_assert_validate $CRM_EX_OK 0
desc="Update an existing promotable score attribute"
cmd="crm_attribute -N cluster01 -p promotable-rsc -v 5"
test_assert $CRM_EX_OK 0
desc="Update an existing promotable score attribute (XML)"
cmd="crm_attribute -N cluster01 -p promotable-rsc -v 5 --output-as=xml"
test_assert_validate $CRM_EX_OK 0
desc="Query after updating an existing promotable score attribute"
cmd="crm_attribute -N cluster01 -p promotable-rsc -G"
test_assert $CRM_EX_OK 0
desc="Query after updating an existing promotable score attribute (XML)"
cmd="crm_attribute -N cluster01 -p promotable-rsc -G --output-as=xml"
test_assert_validate $CRM_EX_OK 0
desc="Delete an existing promotable score attribute"
cmd="crm_attribute -N cluster01 -p promotable-rsc -D"
test_assert $CRM_EX_OK 0
desc="Delete an existing promotable score attribute (XML)"
cmd="crm_attribute -N cluster01 -p promotable-rsc -D --output-as=xml"
test_assert_validate $CRM_EX_OK 0
desc="Query after deleting an existing promotable score attribute"
cmd="crm_attribute -N cluster01 -p promotable-rsc -G"
test_assert $CRM_EX_NOSUCH 0
desc="Query after deleting an existing promotable score attribute (XML)"
cmd="crm_attribute -N cluster01 -p promotable-rsc -G --output-as=xml"
test_assert_validate $CRM_EX_NOSUCH 0
unset CIB_file
export CIB_file="-"
desc="Check that CIB_file=\"-\" works - crm_mon"
cmd="cat $test_home/cli/crm_mon.xml | crm_mon -1"
test_assert $CRM_EX_OK 0
desc="Check that CIB_file=\"-\" works - crm_resource"
cmd="cat $test_home/cli/crm_resource_digests.xml | crm_resource --digests -r rsc1 -N node1 --output-as=xml"
test_assert_validate $CRM_EX_OK 0
desc="Check that CIB_file=\"-\" works - crmadmin"
cmd="cat $test_home/cli/crmadmin-cluster-remote-guest-nodes.xml | crmadmin -N | wc -l | grep 11"
test_assert $CRM_EX_OK 0
unset CIB_file
rm -f "$TMPXML" "$TMPORIG"
}
INVALID_PERIODS=(
"2019-01-01 00:00:00Z" # Start with no end
"2019-01-01 00:00:00Z/" # Start with only a trailing slash
"PT2S/P1M" # Two durations
"2019-13-01 00:00:00Z/P1M" # Out-of-range month
"20191077T15/P1M" # Out-of-range day
"2019-10-01T25:00:00Z/P1M" # Out-of-range hour
"2019-10-01T24:00:01Z/P1M" # Hour 24 with anything but :00:00
"PT5H/20191001T007000Z" # Out-of-range minute
"2019-10-01 00:00:80Z/P1M" # Out-of-range second
"2019-10-01 00:00:10 +25:00/P1M" # Out-of-range offset hour
"20191001T000010 -00:61/P1M" # Out-of-range offset minute
"P1Y/2019-02-29 00:00:00Z" # Feb. 29 in non-leap-year
"2019-01-01 00:00:00Z/P" # Duration with no values
"P1Z/2019-02-20 00:00:00Z" # Invalid duration unit
"P1YM/2019-02-20 00:00:00Z" # No number for duration unit
)
function test_dates() {
# Ensure invalid period specifications are rejected
for spec in '' "${INVALID_PERIODS[@]}"; do
desc="Invalid period - [$spec]"
cmd="iso8601 -p \"$spec\""
test_assert $CRM_EX_INVALID_PARAM 0
done
desc="2014-01-01 00:30:00 - 1 Hour"
cmd="iso8601 -d '2014-01-01 00:30:00Z' -D P-1H -E '2013-12-31 23:30:00Z'"
test_assert $CRM_EX_OK 0
desc="Valid date - Feb 29 in leap year"
cmd="iso8601 -d '2020-02-29 00:00:00Z' -E '2020-02-29 00:00:00Z'"
test_assert $CRM_EX_OK 0
desc="Valid date - using 'T' and offset"
cmd="iso8601 -d '20191201T131211 -05:00' -E '2019-12-01 18:12:11Z'"
test_assert $CRM_EX_OK 0
desc="24:00:00 equivalent to 00:00:00 of next day"
cmd="iso8601 -d '2019-12-31 24:00:00Z' -E '2020-01-01 00:00:00Z'"
test_assert $CRM_EX_OK 0
for y in 06 07 08 09 10 11 12 13 14 15 16 17 18 40; do
desc="20$y-W01-7"
cmd="iso8601 -d '20$y-W01-7 00Z'"
test_assert $CRM_EX_OK 0
desc="20$y-W01-7 - round-trip"
cmd="iso8601 -d '20$y-W01-7 00Z' -W -E '20$y-W01-7 00:00:00Z'"
test_assert $CRM_EX_OK 0
desc="20$y-W01-1"
cmd="iso8601 -d '20$y-W01-1 00Z'"
test_assert $CRM_EX_OK 0
desc="20$y-W01-1 - round-trip"
cmd="iso8601 -d '20$y-W01-1 00Z' -W -E '20$y-W01-1 00:00:00Z'"
test_assert $CRM_EX_OK 0
done
desc="2009-W53-07"
cmd="iso8601 -d '2009-W53-7 00:00:00Z' -W -E '2009-W53-7 00:00:00Z'"
test_assert $CRM_EX_OK 0
desc="epoch + 2 Years 5 Months 6 Minutes"
cmd="iso8601 -d 'epoch' -D P2Y5MT6M -E '1972-06-01 00:06:00Z'"
test_assert $CRM_EX_OK 0
desc="2009-01-31 + 1 Month"
cmd="iso8601 -d '20090131T000000Z' -D P1M -E '2009-02-28 00:00:00Z'"
test_assert $CRM_EX_OK 0
desc="2009-01-31 + 2 Months"
cmd="iso8601 -d '2009-01-31 00:00:00Z' -D P2M -E '2009-03-31 00:00:00Z'"
test_assert $CRM_EX_OK 0
desc="2009-01-31 + 3 Months"
cmd="iso8601 -d '2009-01-31 00:00:00Z' -D P3M -E '2009-04-30 00:00:00Z'"
test_assert $CRM_EX_OK 0
desc="2009-03-31 - 1 Month"
cmd="iso8601 -d '2009-03-31 01:00:00 +01:00' -D P-1M -E '2009-02-28 00:00:00Z'"
test_assert $CRM_EX_OK 0
desc="2038-01-01 + 3 Months"
cmd="iso8601 -d '2038-01-01 00:00:00Z' -D P3M -E '2038-04-01 00:00:00Z'"
test_assert $CRM_EX_OK 0
}
function test_acl_loop() {
local TMPXML
TMPXML="$1"
# Make sure we're rejecting things for the right reasons
orig_trace_fns="$PCMK_trace_functions"
export PCMK_trace_functions=pcmk__check_acl,pcmk__apply_creation_acl
CIB_user=root cibadmin --replace --xml-text ''
### no ACL ###
export CIB_user=unknownguy
desc="$CIB_user: Query configuration"
cmd="cibadmin -Q"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
desc="$CIB_user: Set enable-acl"
cmd="crm_attribute -n enable-acl -v false"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
desc="$CIB_user: Set stonith-enabled"
cmd="crm_attribute -n stonith-enabled -v false"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
desc="$CIB_user: Create a resource"
cmd="cibadmin -C -o resources --xml-text ''"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
### deny /cib permission ###
export CIB_user=l33t-haxor
desc="$CIB_user: Query configuration"
cmd="cibadmin -Q"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
desc="$CIB_user: Set enable-acl"
cmd="crm_attribute -n enable-acl -v false"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
desc="$CIB_user: Set stonith-enabled"
cmd="crm_attribute -n stonith-enabled -v false"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
desc="$CIB_user: Create a resource"
cmd="cibadmin -C -o resources --xml-text ''"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
### observer role ###
export CIB_user=niceguy
desc="$CIB_user: Query configuration"
cmd="cibadmin -Q"
test_assert $CRM_EX_OK 0
desc="$CIB_user: Set enable-acl"
cmd="crm_attribute -n enable-acl -v false"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
desc="$CIB_user: Set stonith-enabled"
cmd="crm_attribute -n stonith-enabled -v false"
test_assert $CRM_EX_OK
desc="$CIB_user: Create a resource"
cmd="cibadmin -C -o resources --xml-text ''"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
export CIB_user=root
desc="$CIB_user: Query configuration"
cmd="cibadmin -Q"
test_assert $CRM_EX_OK 0
desc="$CIB_user: Set stonith-enabled"
cmd="crm_attribute -n stonith-enabled -v true"
test_assert $CRM_EX_OK
desc="$CIB_user: Create a resource"
cmd="cibadmin -C -o resources --xml-text ''"
test_assert $CRM_EX_OK
### deny /cib permission ###
export CIB_user=l33t-haxor
desc="$CIB_user: Create a resource meta attribute"
cmd="crm_resource -r dummy --meta -p target-role -v Stopped"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
desc="$CIB_user: Query a resource meta attribute"
cmd="crm_resource -r dummy --meta -g target-role"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
desc="$CIB_user: Remove a resource meta attribute"
cmd="crm_resource -r dummy --meta -d target-role"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
### observer role ###
export CIB_user=niceguy
desc="$CIB_user: Create a resource meta attribute"
cmd="crm_resource -r dummy --meta -p target-role -v Stopped"
test_assert $CRM_EX_OK
desc="$CIB_user: Query a resource meta attribute"
cmd="crm_resource -r dummy --meta -g target-role"
test_assert $CRM_EX_OK
desc="$CIB_user: Remove a resource meta attribute"
cmd="crm_resource -r dummy --meta -d target-role"
test_assert $CRM_EX_OK
desc="$CIB_user: Create a resource meta attribute"
cmd="crm_resource -r dummy --meta -p target-role -v Started"
test_assert $CRM_EX_OK
### read //meta_attributes ###
export CIB_user=badidea
desc="$CIB_user: Query configuration - implied deny"
cmd="cibadmin -Q"
test_assert $CRM_EX_OK 0
### deny /cib, read //meta_attributes ###
export CIB_user=betteridea
desc="$CIB_user: Query configuration - explicit deny"
cmd="cibadmin -Q"
test_assert $CRM_EX_OK 0
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --delete --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
### observer role ###
export CIB_user=niceguy
desc="$CIB_user: Replace - remove acls"
cmd="cibadmin --replace --xml-file $TMPXML"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -C -o resources --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - create resource"
cmd="cibadmin --replace --xml-file $TMPXML"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" crm_attribute -n enable-acl -v false
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - modify attribute (deny)"
cmd="cibadmin --replace --xml-file $TMPXML"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --replace --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - delete attribute (deny)"
cmd="cibadmin --replace --xml-file $TMPXML"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - create attribute (deny)"
cmd="cibadmin --replace --xml-file $TMPXML"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
### admin role ###
CIB_user=bob
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - create attribute (direct allow)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_OK 0
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - modify attribute (direct allow)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_OK 0
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --replace -o resources --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - delete attribute (direct allow)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_OK 0
### super_user role ###
export CIB_user=joe
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - create attribute (inherited allow)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_OK 0
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - modify attribute (inherited allow)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_OK 0
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --replace -o resources --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - delete attribute (inherited allow)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_OK 0
### rsc_writer role ###
export CIB_user=mike
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - create attribute (allow overrides deny)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_OK 0
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - modify attribute (allow overrides deny)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_OK 0
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --replace -o resources --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - delete attribute (allow overrides deny)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_OK 0
### rsc_denied role ###
export CIB_user=chris
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - create attribute (deny overrides allow)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
# Set as root since setting as chris failed
CIB_user=root cibadmin --modify --xml-text ''
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - modify attribute (deny overrides allow)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
# Set as root since setting as chris failed
CIB_user=root cibadmin --modify --xml-text ''
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --replace -o resources --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - delete attribute (deny overrides allow)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
export PCMK_trace_functions="$orig_trace_fns"
}
function test_acls() {
local SHADOWPATH
local TMPXML
TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.acls.xml.XXXXXXXXXX)
create_shadow_cib pacemaker-1.3
cat < "$TMPXML"
EOF
desc="Configure some ACLs"
cmd="cibadmin -M -o acls --xml-file $TMPXML"
test_assert $CRM_EX_OK
desc="Enable ACLs"
cmd="crm_attribute -n enable-acl -v true"
test_assert $CRM_EX_OK
desc="Set cluster option"
cmd="crm_attribute -n no-quorum-policy -v ignore"
test_assert $CRM_EX_OK
desc="New ACL"
cmd="cibadmin --create -o acls --xml-text ''"
test_assert $CRM_EX_OK
desc="Another ACL"
cmd="cibadmin --create -o acls --xml-text ''"
test_assert $CRM_EX_OK
desc="Updated ACL"
cmd="cibadmin --replace -o acls --xml-text ''"
test_assert $CRM_EX_OK
test_acl_loop "$TMPXML"
printf "\n\n !#!#!#!#! Upgrading to latest CIB schema and re-testing !#!#!#!#!\n"
printf "\nUpgrading to latest CIB schema and re-testing\n" 1>&2
export CIB_user=root
desc="$CIB_user: Upgrade to latest CIB schema"
cmd="cibadmin --upgrade --force -V"
test_assert $CRM_EX_OK
reset_shadow_cib_version
test_acl_loop "$TMPXML"
unset CIB_shadow_dir
rm -f "$TMPXML"
}
function test_validity() {
local TMPGOOD
local TMPBAD
TMPGOOD=$(mktemp ${TMPDIR:-/tmp}/cts-cli.validity.good.xml.XXXXXXXXXX)
TMPBAD=$(mktemp ${TMPDIR:-/tmp}/cts-cli.validity.bad.xml.XXXXXXXXXX)
create_shadow_cib pacemaker-1.2
orig_trace_fns="$PCMK_trace_functions"
export PCMK_trace_functions=apply_upgrade,update_validation
cibadmin -C -o resources --xml-text ''
cibadmin -C -o resources --xml-text ''
cibadmin -C -o constraints --xml-text ''
cibadmin -Q > "$TMPGOOD"
desc="Try to make resulting CIB invalid (enum violation)"
cmd="cibadmin -M -o constraints --xml-text ''"
test_assert $CRM_EX_CONFIG
sed 's|"start"|"break"|' "$TMPGOOD" > "$TMPBAD"
desc="Run crm_simulate with invalid CIB (enum violation)"
cmd="crm_simulate -x $TMPBAD -S"
test_assert $CRM_EX_CONFIG 0
desc="Try to make resulting CIB invalid (unrecognized validate-with)"
cmd="cibadmin -M --xml-text ''"
test_assert $CRM_EX_CONFIG
sed 's|"pacemaker-1.2"|"pacemaker-9999.0"|' "$TMPGOOD" > "$TMPBAD"
desc="Run crm_simulate with invalid CIB (unrecognized validate-with)"
cmd="crm_simulate -x $TMPBAD -S"
test_assert $CRM_EX_CONFIG 0
desc="Try to make resulting CIB invalid, but possibly recoverable (valid with X.Y+1)"
cmd="cibadmin -C -o configuration --xml-text ''"
test_assert $CRM_EX_CONFIG
sed 's|||' "$TMPGOOD" > "$TMPBAD"
desc="Run crm_simulate with invalid, but possibly recoverable CIB (valid with X.Y+1)"
cmd="crm_simulate -x $TMPBAD -S"
test_assert $CRM_EX_OK 0
sed 's|[ ][ ]*validate-with="[^"]*"||' "$TMPGOOD" > "$TMPBAD"
desc="Make resulting CIB valid, although without validate-with attribute"
cmd="cibadmin -R --xml-file $TMPBAD"
test_assert $CRM_EX_OK
desc="Run crm_simulate with valid CIB, but without validate-with attribute"
cmd="crm_simulate -x $TMPBAD -S"
test_assert $CRM_EX_OK 0
# this will just disable validation and accept the config, outputting
# validation errors
sed -e 's|[ ][ ]*validate-with="[^"]*"||' \
-e 's|\([ ][ ]*epoch="[^"]*\)"|\10"|' -e 's|"start"|"break"|' \
"$TMPGOOD" > "$TMPBAD"
desc="Make resulting CIB invalid, and without validate-with attribute"
cmd="cibadmin -R --xml-file $TMPBAD"
test_assert $CRM_EX_OK
desc="Run crm_simulate with invalid CIB, also without validate-with attribute"
cmd="crm_simulate -x $TMPBAD -S"
test_assert $CRM_EX_OK 0
unset CIB_shadow_dir
rm -f "$TMPGOOD" "$TMPBAD"
export PCMK_trace_functions="$orig_trace_fns"
}
test_upgrade() {
local TMPXML
TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX)
create_shadow_cib pacemaker-2.10
orig_trace_fns="$PCMK_trace_functions"
export PCMK_trace_functions=apply_upgrade,update_validation
desc="Set stonith-enabled=false"
cmd="crm_attribute -n stonith-enabled -v false"
test_assert $CRM_EX_OK
cat < "$TMPXML"
EOF
desc="Configure the initial resource"
cmd="cibadmin -M -o resources --xml-file $TMPXML"
test_assert $CRM_EX_OK
desc="Upgrade to latest CIB schema (trigger 2.10.xsl + the wrapping)"
cmd="cibadmin --upgrade --force -V -V"
test_assert $CRM_EX_OK
desc="Query a resource instance attribute (shall survive)"
cmd="crm_resource -r mySmartFuse -g requires"
test_assert $CRM_EX_OK
unset CIB_shadow_dir
rm -f "$TMPXML"
export PCMK_trace_functions="$orig_trace_fns"
}
test_rules() {
local TMPXML
create_shadow_cib
cibadmin -C -o crm_config --xml-text ''
cibadmin -C -o resources --xml-text ''
TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX)
cat < "$TMPXML"
EOF
cibadmin -C -o constraints -x "$TMPXML"
rm -f "$TMPXML"
TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX)
cat < "$TMPXML"
EOF
cibadmin -C -o constraints -x "$TMPXML"
rm -f "$TMPXML"
if [ "$(uname)" == "FreeBSD" ]; then
tomorrow=$(date -v+1d +"%F %T %z")
else
tomorrow=$(date --date=tomorrow +"%F %T %z")
fi
TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX)
cat < "$TMPXML"
EOF
cibadmin -C -o constraints -x "$TMPXML"
rm -f "$TMPXML"
TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX)
cat < "$TMPXML"
EOF
cibadmin -C -o constraints -x "$TMPXML"
rm -f "$TMPXML"
TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX)
cat < "$TMPXML"
EOF
cibadmin -C -o constraints -x "$TMPXML"
rm -f "$TMPXML"
TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX)
cat < "$TMPXML"
EOF
cibadmin -C -o constraints -x "$TMPXML"
rm -f "$TMPXML"
TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX)
cat < "$TMPXML"
EOF
cibadmin -C -o constraints -x "$TMPXML"
rm -f "$TMPXML"
desc="crm_rule given no arguments"
cmd="crm_rule"
test_assert $CRM_EX_USAGE 0
desc="crm_rule given no arguments (XML)"
cmd="crm_rule --output-as=xml"
test_assert_validate $CRM_EX_USAGE 0
desc="crm_rule given no rule to check"
cmd="crm_rule -c"
test_assert $CRM_EX_USAGE 0
desc="crm_rule given no rule to check (XML)"
cmd="crm_rule -c --output-as=xml"
test_assert_validate $CRM_EX_USAGE 0
desc="crm_rule given invalid input XML"
cmd="crm_rule -c -r blahblah -X 'invalidxml'"
test_assert $CRM_EX_DATAERR 0
desc="crm_rule given invalid input XML (XML)"
cmd="crm_rule -c -r blahblah -X 'invalidxml' --output-as=xml"
test_assert_validate $CRM_EX_DATAERR 0
desc="crm_rule given invalid input XML on stdin"
cmd="echo 'invalidxml' | crm_rule -c -r blahblah -X -"
test_assert $CRM_EX_DATAERR 0
desc="crm_rule given invalid input XML on stdin (XML)"
cmd="echo 'invalidxml' | crm_rule -c -r blahblah -X - --output-as=xml"
test_assert_validate $CRM_EX_DATAERR 0
desc="Try to check a rule that doesn't exist"
cmd="crm_rule -c -r blahblah"
test_assert $CRM_EX_NOSUCH
desc="Try to check a rule that doesn't exist, with XML output"
cmd="crm_rule -c -r blahblah --output-as=xml"
test_assert_validate $CRM_EX_NOSUCH 0
desc="Try to check a rule that has too many date_expressions"
cmd="crm_rule -c -r cli-rule-too-many-date-expressions"
test_assert $CRM_EX_UNIMPLEMENT_FEATURE 0
desc="Try to check a rule that has too many date_expressions (XML)"
cmd="crm_rule -c -r cli-rule-too-many-date-expressions --output-as=xml"
test_assert_validate $CRM_EX_UNIMPLEMENT_FEATURE 0
desc="Verify basic rule is expired"
cmd="crm_rule -c -r cli-prefer-rule-dummy-expired"
test_assert $CRM_EX_EXPIRED 0
desc="Verify basic rule is expired, with XML output"
cmd="crm_rule -c -r cli-prefer-rule-dummy-expired --output-as=xml"
test_assert_validate $CRM_EX_EXPIRED 0
desc="Verify basic rule worked in the past"
cmd="crm_rule -c -r cli-prefer-rule-dummy-expired -d 20180101"
test_assert $CRM_EX_OK 0
desc="Verify basic rule worked in the past (XML)"
cmd="crm_rule -c -r cli-prefer-rule-dummy-expired -d 20180101 --output-as=xml"
test_assert_validate $CRM_EX_OK 0
desc="Verify basic rule is not yet in effect"
cmd="crm_rule -c -r cli-prefer-rule-dummy-not-yet"
test_assert $CRM_EX_NOT_YET_IN_EFFECT 0
desc="Verify basic rule is not yet in effect (XML)"
cmd="crm_rule -c -r cli-prefer-rule-dummy-not-yet --output-as=xml"
test_assert_validate $CRM_EX_NOT_YET_IN_EFFECT 0
desc="Verify date_spec rule with years has expired"
cmd="crm_rule -c -r cli-prefer-rule-dummy-date_spec-only-years"
test_assert $CRM_EX_EXPIRED 0
desc="Verify date_spec rule with years has expired (XML)"
cmd="crm_rule -c -r cli-prefer-rule-dummy-date_spec-only-years --output-as=xml"
test_assert_validate $CRM_EX_EXPIRED 0
desc="Verify multiple rules at once"
cmd="crm_rule -c -r cli-prefer-rule-dummy-not-yet -r cli-prefer-rule-dummy-date_spec-only-years"
test_assert $CRM_EX_EXPIRED 0
desc="Verify multiple rules at once, with XML output"
cmd="crm_rule -c -r cli-prefer-rule-dummy-not-yet -r cli-prefer-rule-dummy-date_spec-only-years --output-as=xml"
test_assert_validate $CRM_EX_EXPIRED 0
desc="Verify date_spec rule with years is in effect"
cmd="crm_rule -c -r cli-prefer-rule-dummy-date_spec-only-years -d 20190201"
test_assert $CRM_EX_OK 0
desc="Verify date_spec rule with years is in effect (XML)"
cmd="crm_rule -c -r cli-prefer-rule-dummy-date_spec-only-years -d 20190201 --output-as=xml"
test_assert_validate $CRM_EX_OK 0
desc="Try to check a rule whose date_spec does not contain years="
cmd="crm_rule -c -r cli-prefer-rule-dummy-date_spec-without-years"
test_assert $CRM_EX_UNIMPLEMENT_FEATURE 0
desc="Try to check a rule whose date_spec does not contain years= (XML)"
cmd="crm_rule -c -r cli-prefer-rule-dummy-date_spec-without-years --output-as=xml"
test_assert_validate $CRM_EX_UNIMPLEMENT_FEATURE 0
desc="Try to check a rule whose date_spec contains years= and moon="
cmd="crm_rule -c -r cli-prefer-rule-dummy-date_spec-years-moon"
test_assert $CRM_EX_UNIMPLEMENT_FEATURE 0
desc="Try to check a rule whose date_spec contains years= and moon= (XML)"
cmd="crm_rule -c -r cli-prefer-rule-dummy-date_spec-years-moon --output-as=xml"
test_assert_validate $CRM_EX_UNIMPLEMENT_FEATURE 0
desc="Try to check a rule with no date_expression"
cmd="crm_rule -c -r cli-no-date_expression-rule"
test_assert $CRM_EX_UNIMPLEMENT_FEATURE 0
desc="Try to check a rule with no date_expression (XML)"
cmd="crm_rule -c -r cli-no-date_expression-rule --output-as=xml"
test_assert_validate $CRM_EX_UNIMPLEMENT_FEATURE 0
unset CIB_shadow_dir
}
# Ensure all command output is in portable locale for comparison
export LC_ALL="C"
test_access_render() {
local TMPXML
TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.access_render.xml.XXXXXXXXXX)
export CIB_shadow_dir="${shadow_dir}"
$VALGRIND_CMD crm_shadow --batch --force --create-empty $shadow 2>&1
export CIB_shadow=$shadow
# Create a test CIB that has ACL roles
cat < "$TMPXML"
EOF
desc="Configure some ACLs"
cmd="cibadmin -M -o acls --xml-file $TMPXML"
test_assert $CRM_EX_OK
desc="Enable ACLs"
cmd="crm_attribute -n enable-acl -v true"
test_assert $CRM_EX_OK
unset CIB_user
# Run cibadmin --show-access on the test CIB with different users (tony here)
desc="An instance of ACLs render (into color)"
cmd="cibadmin --force --show-access=color -Q --user tony"
test_assert $CRM_EX_OK 0
desc="An instance of ACLs render (into namespacing)"
cmd="cibadmin --force --show-access=namespace -Q --user tony"
test_assert $CRM_EX_OK 0
desc="An instance of ACLs render (into text)"
cmd="cibadmin --force --show-access=text -Q --user tony"
test_assert $CRM_EX_OK 0
unset CIB_shadow_dir
rm -f "$TMPXML"
}
function test_feature_set() {
create_shadow_cib
# Import the initial test CIB with non-mixed versions
desc="Import the test CIB"
cmd="cibadmin --replace --xml-file $test_home/cli/crm_mon-feature_set.xml"
test_assert $CRM_EX_OK
desc="Complete text output, no mixed status"
cmd="crm_mon -1 --show-detail"
test_assert $CRM_EX_OK 0
desc="XML output, no mixed status"
cmd="crm_mon --output-as=xml"
test_assert $CRM_EX_OK 0
# Modify the CIB to fake that the cluster has mixed versions
desc="Fake inconsistent feature set"
cmd="crm_attribute --node=cluster02 --name=#feature-set --update=3.15.0 --lifetime=reboot"
test_assert $CRM_EX_OK
desc="Complete text output, mixed status"
cmd="crm_mon -1 --show-detail"
test_assert $CRM_EX_OK 0
desc="XML output, mixed status"
cmd="crm_mon --output-as=xml"
test_assert $CRM_EX_OK 0
unset CIB_shadow_dir
}
# Process command-line arguments
while [ $# -gt 0 ]; do
case "$1" in
-t)
tests="$2"
shift 2
;;
-V|--verbose)
verbose=1
shift
;;
-v|--valgrind)
export G_SLICE=always-malloc
VALGRIND_CMD="valgrind $VALGRIND_OPTS"
shift
;;
-s)
do_save=1
shift
;;
-p)
export PATH="$2:$PATH"
shift
;;
--help)
echo "$USAGE_TEXT"
exit $CRM_EX_OK
;;
*)
echo "error: unknown option $1"
echo
echo "$USAGE_TEXT"
exit $CRM_EX_USAGE
;;
esac
done
for t in $tests; do
case "$t" in
agents) ;;
dates) ;;
error_codes) ;;
tools) ;;
acls) ;;
validity) ;;
upgrade) ;;
rules) ;;
crm_mon) ;;
feature_set) ;;
*)
echo "error: unknown test $t"
echo
echo "$USAGE_TEXT"
exit $CRM_EX_USAGE
;;
esac
done
XMLLINT_CMD=$(which xmllint 2>/dev/null)
if [ $? -ne 0 ]; then
XMLLINT_CMD=""
echo "xmllint is missing - install it to validate command output"
fi
# Check whether we're running from source directory
SRCDIR=$(dirname $test_home)
if [ -x "$SRCDIR/tools/crm_simulate" ]; then
export PATH="$SRCDIR/tools:$PATH"
echo "Using local binaries from: $SRCDIR/tools"
if [ -x "$SRCDIR/xml" ]; then
export PCMK_schema_directory="$SRCDIR/xml"
echo "Using local schemas from: $PCMK_schema_directory"
fi
else
export PCMK_schema_directory=@CRM_SCHEMA_DIRECTORY@
fi
for t in $tests; do
echo "Testing $t"
TMPFILE=$(mktemp ${TMPDIR:-/tmp}/cts-cli.$t.XXXXXXXXXX)
eval TMPFILE_$t="$TMPFILE"
test_$t > "$TMPFILE"
# last-rc-change= is always numeric in the CIB. However, for the crm_mon
# test we also need to compare against the XML output of the crm_mon
# program. There, these are shown as human readable strings (like the
# output of the `date` command).
sed -e 's/cib-last-written.*>/>/'\
-e 's/Last updated: .*/Last updated:/' \
-e 's/Last change: .*/Last change:/' \
-e 's/(version .*)/(version)/' \
-e 's/last_update time=\".*\"/last_update time=\"\"/' \
-e 's/last_change time=\".*\"/last_change time=\"\"/' \
-e 's/ api-version=\".*\" / api-version=\"X\" /' \
-e 's/ version="[^"]*" / version="" /' \
-e 's/request=\".*\(crm_[a-zA-Z0-9]*\)/request=\"\1/' \
-e 's/crm_feature_set="[^"]*" //'\
-e 's/validate-with="[^"]*" //'\
-e 's/Created new pacemaker-.* configuration/Created new pacemaker configuration/'\
-e 's/.*\(crm_time_parse_duration\)@.*\.c:[0-9][0-9]*)/\1/g' \
-e 's/.*\(crm_time_parse_period\)@.*\.c:[0-9][0-9]*)/\1/g' \
-e 's/.*\(crm_time_parse_sec\)@.*\.c:[0-9][0-9]*)/\1/g' \
-e 's/.*\(log_xmllib_err\)@.*\.c:[0-9][0-9]*)/\1/g' \
-e 's/.*\(parse_date\)@.*\.c:[0-9][0-9]*)/\1/g' \
-e 's/.*\(pcmk__.*\)@.*\.c:[0-9][0-9]*)/\1/g' \
-e 's/.*\(unpack_.*\)@.*\.c:[0-9][0-9]*)/\1/g' \
-e 's/.*\(update_validation\)@.*\.c:[0-9][0-9]*)/\1/g' \
-e 's/.*\(apply_upgrade\)@.*\.c:[0-9][0-9]*)/\1/g' \
-e "s/ last-rc-change=['\"][-+A-Za-z0-9: ]*['\"],\{0,1\}//" \
-e 's|^/tmp/cts-cli\.validity\.bad.xml\.[^:]*:|validity.bad.xml:|'\
-e 's/^Entity: line [0-9][0-9]*: //'\
-e 's/\(validation ([0-9][0-9]* of \)[0-9][0-9]*\().*\)/\1X\2/' \
-e 's/^Migration will take effect until: .*/Migration will take effect until:/' \
-e 's/ end=\"[0-9][-+: 0-9]*Z*\"/ end=\"\"/' \
-e 's/ start=\"[0-9][-+: 0-9]*Z*\"/ start=\"\"/' \
-e 's/^Error checking rule: Device not configured/Error checking rule: No such device or address/' \
-e 's/Error performing operation: Device not configured/Error performing operation: No such device or address/' \
-e 's/\(Injecting attribute last-failure-ping#monitor_10000=\)[0-9]*/\1/' \
-e 's/^lt-//' \
-e 's/ocf::/ocf:/' \
-e 's/Masters:/Promoted:/' \
-e 's/Slaves:/Unpromoted:/' \
-e 's/Master/Promoted/' \
-e 's/Slave/Unpromoted/' \
-e 's/\x1b/\\x1b/' \
"$TMPFILE" > "${TMPFILE}.$$"
mv -- "${TMPFILE}.$$" "$TMPFILE"
if [ $do_save -eq 1 ]; then
cp "$TMPFILE" $test_home/cli/regression.$t.exp
fi
done
rm -rf "${shadow_dir}"
rm -f "${test_assert_outfile}"
rm -f "${test_assert_errfile}"
rm -f "${xmllint_errfile}"
failed=0
if [ $verbose -eq 1 ]; then
echo -e "\n\nResults"
fi
for t in $tests; do
eval TMPFILE="\$TMPFILE_$t"
if [ $verbose -eq 1 ]; then
diff -wu $test_home/cli/regression.$t.exp "$TMPFILE"
else
diff -w $test_home/cli/regression.$t.exp "$TMPFILE" >/dev/null 2>&1
fi
if [ $? -ne 0 ]; then
failed=1
fi
done
echo -e "\n\nSummary"
for t in $tests; do
eval TMPFILE="\$TMPFILE_$t"
grep -e '^\* \(Passed\|Failed\)' "$TMPFILE"
done
function print_or_remove_file() {
eval TMPFILE="\$TMPFILE_$1"
if [[ ! $(diff -wq $test_home/cli/regression.$1.exp "$TMPFILE") ]]; then
rm -f "$TMPFILE"
else
echo " $TMPFILE"
fi
}
if [ $num_errors -ne 0 ] && [ $failed -ne 0 ]; then
echo "$num_errors tests failed; see output in:"
for t in $tests; do
print_or_remove_file "$t"
done
exit $CRM_EX_ERROR
elif [ $num_errors -ne 0 ]; then
echo "$num_errors tests failed"
for t in $tests; do
print_or_remove_file "$t"
done
exit $CRM_EX_ERROR
elif [ $failed -eq 1 ]; then
echo "$num_passed tests passed but output was unexpected; see output in:"
for t in $tests; do
print_or_remove_file "$t"
done
exit $CRM_EX_DIGEST
else
echo $num_passed tests passed
for t in $tests; do
eval TMPFILE="\$TMPFILE_$t"
rm -f "$TMPFILE"
done
crm_shadow --force --delete $shadow >/dev/null 2>&1
exit $CRM_EX_OK
fi
diff --git a/cts/scheduler/summary/bug-cl-5247.summary b/cts/scheduler/summary/bug-cl-5247.summary
index 056e526490..b18bdd8b91 100644
--- a/cts/scheduler/summary/bug-cl-5247.summary
+++ b/cts/scheduler/summary/bug-cl-5247.summary
@@ -1,87 +1,87 @@
Using the original execution date of: 2015-08-12 02:53:40Z
Current cluster status:
* Node List:
* Online: [ bl460g8n3 bl460g8n4 ]
- * GuestOnline: [ pgsr01@bl460g8n3 ]
+ * GuestOnline: [ pgsr01 ]
* Full List of Resources:
* prmDB1 (ocf:heartbeat:VirtualDomain): Started bl460g8n3
* prmDB2 (ocf:heartbeat:VirtualDomain): FAILED bl460g8n4
* Resource Group: grpStonith1:
* prmStonith1-2 (stonith:external/ipmi): Started bl460g8n4
* Resource Group: grpStonith2:
* prmStonith2-2 (stonith:external/ipmi): Started bl460g8n3
* Resource Group: master-group:
* vip-master (ocf:heartbeat:Dummy): FAILED pgsr02
* vip-rep (ocf:heartbeat:Dummy): FAILED pgsr02
* Clone Set: msPostgresql [pgsql] (promotable):
* Promoted: [ pgsr01 ]
* Stopped: [ bl460g8n3 bl460g8n4 ]
Transition Summary:
* Fence (off) pgsr02 (resource: prmDB2) 'guest is unclean'
* Stop prmDB2 ( bl460g8n4 ) due to node availability
* Recover vip-master ( pgsr02 -> pgsr01 )
* Recover vip-rep ( pgsr02 -> pgsr01 )
* Stop pgsql:0 ( Promoted pgsr02 ) due to node availability
* Stop pgsr02 ( bl460g8n4 ) due to node availability
Executing Cluster Transition:
* Resource action: vip-master monitor on pgsr01
* Resource action: vip-rep monitor on pgsr01
* Pseudo action: msPostgresql_pre_notify_demote_0
* Resource action: pgsr01 monitor on bl460g8n4
* Resource action: pgsr02 stop on bl460g8n4
* Resource action: pgsr02 monitor on bl460g8n3
* Resource action: prmDB2 stop on bl460g8n4
* Resource action: pgsql notify on pgsr01
* Pseudo action: msPostgresql_confirmed-pre_notify_demote_0
* Pseudo action: msPostgresql_demote_0
* Pseudo action: stonith-pgsr02-off on pgsr02
* Pseudo action: pgsql_post_notify_stop_0
* Pseudo action: pgsql_demote_0
* Pseudo action: msPostgresql_demoted_0
* Pseudo action: msPostgresql_post_notify_demoted_0
* Resource action: pgsql notify on pgsr01
* Pseudo action: msPostgresql_confirmed-post_notify_demoted_0
* Pseudo action: msPostgresql_pre_notify_stop_0
* Pseudo action: master-group_stop_0
* Pseudo action: vip-rep_stop_0
* Resource action: pgsql notify on pgsr01
* Pseudo action: msPostgresql_confirmed-pre_notify_stop_0
* Pseudo action: msPostgresql_stop_0
* Pseudo action: vip-master_stop_0
* Pseudo action: pgsql_stop_0
* Pseudo action: msPostgresql_stopped_0
* Pseudo action: master-group_stopped_0
* Pseudo action: master-group_start_0
* Resource action: vip-master start on pgsr01
* Resource action: vip-rep start on pgsr01
* Pseudo action: msPostgresql_post_notify_stopped_0
* Pseudo action: master-group_running_0
* Resource action: vip-master monitor=10000 on pgsr01
* Resource action: vip-rep monitor=10000 on pgsr01
* Resource action: pgsql notify on pgsr01
* Pseudo action: msPostgresql_confirmed-post_notify_stopped_0
* Pseudo action: pgsql_notified_0
* Resource action: pgsql monitor=9000 on pgsr01
Using the original execution date of: 2015-08-12 02:53:40Z
Revised Cluster Status:
* Node List:
* Online: [ bl460g8n3 bl460g8n4 ]
- * GuestOnline: [ pgsr01@bl460g8n3 ]
+ * GuestOnline: [ pgsr01 ]
* Full List of Resources:
* prmDB1 (ocf:heartbeat:VirtualDomain): Started bl460g8n3
* prmDB2 (ocf:heartbeat:VirtualDomain): FAILED
* Resource Group: grpStonith1:
* prmStonith1-2 (stonith:external/ipmi): Started bl460g8n4
* Resource Group: grpStonith2:
* prmStonith2-2 (stonith:external/ipmi): Started bl460g8n3
* Resource Group: master-group:
* vip-master (ocf:heartbeat:Dummy): FAILED [ pgsr01 pgsr02 ]
* vip-rep (ocf:heartbeat:Dummy): FAILED [ pgsr01 pgsr02 ]
* Clone Set: msPostgresql [pgsql] (promotable):
* Promoted: [ pgsr01 ]
* Stopped: [ bl460g8n3 bl460g8n4 ]
diff --git a/cts/scheduler/summary/bug-rh-1097457.summary b/cts/scheduler/summary/bug-rh-1097457.summary
index 67252c9f8c..f68a509609 100644
--- a/cts/scheduler/summary/bug-rh-1097457.summary
+++ b/cts/scheduler/summary/bug-rh-1097457.summary
@@ -1,126 +1,126 @@
2 of 26 resource instances DISABLED and 0 BLOCKED from further action due to failure
Current cluster status:
* Node List:
* Online: [ lama2 lama3 ]
- * GuestOnline: [ lamaVM1@lama2 lamaVM2@lama3 lamaVM3@lama3 ]
+ * GuestOnline: [ lamaVM1 lamaVM2 lamaVM3 ]
* Full List of Resources:
* restofencelama2 (stonith:fence_ipmilan): Started lama3
* restofencelama3 (stonith:fence_ipmilan): Started lama2
* VM1 (ocf:heartbeat:VirtualDomain): Started lama2
* FSlun1 (ocf:heartbeat:Filesystem): Started lamaVM1
* FSlun2 (ocf:heartbeat:Filesystem): Started lamaVM1
* VM2 (ocf:heartbeat:VirtualDomain): FAILED lama3
* VM3 (ocf:heartbeat:VirtualDomain): Started lama3
* FSlun3 (ocf:heartbeat:Filesystem): FAILED lamaVM2
* FSlun4 (ocf:heartbeat:Filesystem): Started lamaVM3
* FAKE5-IP (ocf:heartbeat:IPaddr2): Stopped (disabled)
* FAKE6-IP (ocf:heartbeat:IPaddr2): Stopped (disabled)
* FAKE5 (ocf:heartbeat:Dummy): Started lamaVM3
* Resource Group: lamaVM1-G1:
* FAKE1 (ocf:heartbeat:Dummy): Started lamaVM1
* FAKE1-IP (ocf:heartbeat:IPaddr2): Started lamaVM1
* Resource Group: lamaVM1-G2:
* FAKE2 (ocf:heartbeat:Dummy): Started lamaVM1
* FAKE2-IP (ocf:heartbeat:IPaddr2): Started lamaVM1
* Resource Group: lamaVM1-G3:
* FAKE3 (ocf:heartbeat:Dummy): Started lamaVM1
* FAKE3-IP (ocf:heartbeat:IPaddr2): Started lamaVM1
* Resource Group: lamaVM2-G4:
* FAKE4 (ocf:heartbeat:Dummy): Started lamaVM2
* FAKE4-IP (ocf:heartbeat:IPaddr2): Started lamaVM2
* Clone Set: FAKE6-clone [FAKE6]:
* Started: [ lamaVM1 lamaVM2 lamaVM3 ]
Transition Summary:
* Fence (reboot) lamaVM2 (resource: VM2) 'guest is unclean'
* Recover VM2 ( lama3 )
* Recover FSlun3 ( lamaVM2 -> lama2 )
* Restart FAKE4 ( lamaVM2 ) due to required VM2 start
* Restart FAKE4-IP ( lamaVM2 ) due to required VM2 start
* Restart FAKE6:2 ( lamaVM2 ) due to required VM2 start
* Restart lamaVM2 ( lama3 ) due to required VM2 start
Executing Cluster Transition:
* Resource action: FSlun1 monitor on lamaVM3
* Resource action: FSlun2 monitor on lamaVM3
* Resource action: FSlun3 monitor on lamaVM3
* Resource action: FSlun3 monitor on lamaVM1
* Resource action: FSlun4 monitor on lamaVM1
* Resource action: FAKE5-IP monitor on lamaVM3
* Resource action: FAKE5-IP monitor on lamaVM1
* Resource action: FAKE6-IP monitor on lamaVM3
* Resource action: FAKE6-IP monitor on lamaVM1
* Resource action: FAKE5 monitor on lamaVM1
* Resource action: FAKE1 monitor on lamaVM3
* Resource action: FAKE1-IP monitor on lamaVM3
* Resource action: FAKE2 monitor on lamaVM3
* Resource action: FAKE2-IP monitor on lamaVM3
* Resource action: FAKE3 monitor on lamaVM3
* Resource action: FAKE3-IP monitor on lamaVM3
* Resource action: FAKE4 monitor on lamaVM3
* Resource action: FAKE4 monitor on lamaVM1
* Resource action: FAKE4-IP monitor on lamaVM3
* Resource action: FAKE4-IP monitor on lamaVM1
* Resource action: lamaVM2 stop on lama3
* Resource action: VM2 stop on lama3
* Pseudo action: stonith-lamaVM2-reboot on lamaVM2
* Resource action: VM2 start on lama3
* Resource action: VM2 monitor=10000 on lama3
* Pseudo action: lamaVM2-G4_stop_0
* Pseudo action: FAKE4-IP_stop_0
* Pseudo action: FAKE6-clone_stop_0
* Resource action: lamaVM2 start on lama3
* Resource action: lamaVM2 monitor=30000 on lama3
* Resource action: FSlun3 monitor=10000 on lamaVM2
* Pseudo action: FAKE4_stop_0
* Pseudo action: FAKE6_stop_0
* Pseudo action: FAKE6-clone_stopped_0
* Pseudo action: FAKE6-clone_start_0
* Pseudo action: lamaVM2-G4_stopped_0
* Resource action: FAKE6 start on lamaVM2
* Resource action: FAKE6 monitor=30000 on lamaVM2
* Pseudo action: FAKE6-clone_running_0
* Pseudo action: FSlun3_stop_0
* Resource action: FSlun3 start on lama2
* Pseudo action: lamaVM2-G4_start_0
* Resource action: FAKE4 start on lamaVM2
* Resource action: FAKE4 monitor=30000 on lamaVM2
* Resource action: FAKE4-IP start on lamaVM2
* Resource action: FAKE4-IP monitor=30000 on lamaVM2
* Resource action: FSlun3 monitor=10000 on lama2
* Pseudo action: lamaVM2-G4_running_0
Revised Cluster Status:
* Node List:
* Online: [ lama2 lama3 ]
- * GuestOnline: [ lamaVM1@lama2 lamaVM2@lama3 lamaVM3@lama3 ]
+ * GuestOnline: [ lamaVM1 lamaVM2 lamaVM3 ]
* Full List of Resources:
* restofencelama2 (stonith:fence_ipmilan): Started lama3
* restofencelama3 (stonith:fence_ipmilan): Started lama2
* VM1 (ocf:heartbeat:VirtualDomain): Started lama2
* FSlun1 (ocf:heartbeat:Filesystem): Started lamaVM1
* FSlun2 (ocf:heartbeat:Filesystem): Started lamaVM1
* VM2 (ocf:heartbeat:VirtualDomain): FAILED lama3
* VM3 (ocf:heartbeat:VirtualDomain): Started lama3
* FSlun3 (ocf:heartbeat:Filesystem): FAILED [ lama2 lamaVM2 ]
* FSlun4 (ocf:heartbeat:Filesystem): Started lamaVM3
* FAKE5-IP (ocf:heartbeat:IPaddr2): Stopped (disabled)
* FAKE6-IP (ocf:heartbeat:IPaddr2): Stopped (disabled)
* FAKE5 (ocf:heartbeat:Dummy): Started lamaVM3
* Resource Group: lamaVM1-G1:
* FAKE1 (ocf:heartbeat:Dummy): Started lamaVM1
* FAKE1-IP (ocf:heartbeat:IPaddr2): Started lamaVM1
* Resource Group: lamaVM1-G2:
* FAKE2 (ocf:heartbeat:Dummy): Started lamaVM1
* FAKE2-IP (ocf:heartbeat:IPaddr2): Started lamaVM1
* Resource Group: lamaVM1-G3:
* FAKE3 (ocf:heartbeat:Dummy): Started lamaVM1
* FAKE3-IP (ocf:heartbeat:IPaddr2): Started lamaVM1
* Resource Group: lamaVM2-G4:
* FAKE4 (ocf:heartbeat:Dummy): Started lamaVM2
* FAKE4-IP (ocf:heartbeat:IPaddr2): Started lamaVM2
* Clone Set: FAKE6-clone [FAKE6]:
* Started: [ lamaVM1 lamaVM2 lamaVM3 ]
diff --git a/cts/scheduler/summary/bundle-connection-with-container.summary b/cts/scheduler/summary/bundle-connection-with-container.summary
index f418b65b8a..62e0ec683c 100644
--- a/cts/scheduler/summary/bundle-connection-with-container.summary
+++ b/cts/scheduler/summary/bundle-connection-with-container.summary
@@ -1,63 +1,63 @@
Using the original execution date of: 2022-07-13 22:13:26Z
Current cluster status:
* Node List:
* Online: [ rhel8-1 rhel8-3 rhel8-4 rhel8-5 ]
* OFFLINE: [ rhel8-2 ]
* RemoteOnline: [ remote-rhel8-2 ]
- * GuestOnline: [ httpd-bundle-1@rhel8-3 httpd-bundle-2@rhel8-1 ]
+ * GuestOnline: [ httpd-bundle-1 httpd-bundle-2 ]
* Full List of Resources:
* Fencing (stonith:fence_xvm): Started rhel8-3
* FencingPass (stonith:fence_dummy): Started rhel8-4
* FencingFail (stonith:fence_dummy): Started rhel8-5
* remote-rhel8-2 (ocf:pacemaker:remote): Started rhel8-1
* remote-rsc (ocf:pacemaker:Dummy): Started remote-rhel8-2
* Container bundle set: httpd-bundle [localhost/pcmktest:http]:
* httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): FAILED rhel8-1
* httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started rhel8-3
* httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Started remote-rhel8-2
Transition Summary:
* Fence (reboot) httpd-bundle-0 (resource: httpd-bundle-podman-0) 'guest is unclean'
* Recover httpd-bundle-podman-0 ( rhel8-1 )
* Recover httpd-bundle-0 ( rhel8-1 )
* Recover httpd:0 ( httpd-bundle-0 )
Executing Cluster Transition:
* Resource action: httpd-bundle-0 stop on rhel8-1
* Pseudo action: httpd-bundle_stop_0
* Pseudo action: httpd-bundle_start_0
* Resource action: httpd-bundle-podman-0 stop on rhel8-1
* Pseudo action: stonith-httpd-bundle-0-reboot on httpd-bundle-0
* Pseudo action: httpd-bundle-clone_stop_0
* Resource action: httpd-bundle-podman-0 start on rhel8-1
* Resource action: httpd-bundle-podman-0 monitor=60000 on rhel8-1
* Resource action: httpd-bundle-0 start on rhel8-1
* Resource action: httpd-bundle-0 monitor=30000 on rhel8-1
* Pseudo action: httpd_stop_0
* Pseudo action: httpd-bundle-clone_stopped_0
* Pseudo action: httpd-bundle-clone_start_0
* Pseudo action: httpd-bundle_stopped_0
* Resource action: httpd start on httpd-bundle-0
* Pseudo action: httpd-bundle-clone_running_0
* Pseudo action: httpd-bundle_running_0
* Resource action: httpd monitor=15000 on httpd-bundle-0
Using the original execution date of: 2022-07-13 22:13:26Z
Revised Cluster Status:
* Node List:
* Online: [ rhel8-1 rhel8-3 rhel8-4 rhel8-5 ]
* OFFLINE: [ rhel8-2 ]
* RemoteOnline: [ remote-rhel8-2 ]
- * GuestOnline: [ httpd-bundle-0@rhel8-1 httpd-bundle-1@rhel8-3 httpd-bundle-2@rhel8-1 ]
+ * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ]
* Full List of Resources:
* Fencing (stonith:fence_xvm): Started rhel8-3
* FencingPass (stonith:fence_dummy): Started rhel8-4
* FencingFail (stonith:fence_dummy): Started rhel8-5
* remote-rhel8-2 (ocf:pacemaker:remote): Started rhel8-1
* remote-rsc (ocf:pacemaker:Dummy): Started remote-rhel8-2
* Container bundle set: httpd-bundle [localhost/pcmktest:http]:
* httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started rhel8-1
* httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started rhel8-3
* httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Started remote-rhel8-2
diff --git a/cts/scheduler/summary/bundle-nested-colocation.summary b/cts/scheduler/summary/bundle-nested-colocation.summary
index 0db9d6f8b4..194909647d 100644
--- a/cts/scheduler/summary/bundle-nested-colocation.summary
+++ b/cts/scheduler/summary/bundle-nested-colocation.summary
@@ -1,106 +1,106 @@
Using the original execution date of: 2017-07-14 08:50:25Z
Current cluster status:
* Node List:
* Online: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 overcloud-galera-0 overcloud-galera-1 overcloud-galera-2 ]
* RemoteOnline: [ overcloud-rabbit-0 overcloud-rabbit-1 overcloud-rabbit-2 ]
* Full List of Resources:
* overcloud-rabbit-0 (ocf:pacemaker:remote): Started overcloud-controller-0
* overcloud-rabbit-1 (ocf:pacemaker:remote): Started overcloud-controller-1
* overcloud-rabbit-2 (ocf:pacemaker:remote): Started overcloud-controller-2
* Container bundle set: rabbitmq-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-rabbitmq:latest]:
* rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Stopped overcloud-rabbit-0
* rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): Stopped overcloud-rabbit-1
* rabbitmq-bundle-2 (ocf:heartbeat:rabbitmq-cluster): Stopped overcloud-rabbit-2
* Container bundle set: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest]:
* galera-bundle-docker-0 (ocf:heartbeat:docker): Started overcloud-galera-0
* galera-bundle-docker-1 (ocf:heartbeat:docker): Started overcloud-galera-1
* galera-bundle-docker-2 (ocf:heartbeat:docker): Started overcloud-galera-2
Transition Summary:
* Restart rabbitmq-bundle-docker-0 ( overcloud-rabbit-0 ) due to resource definition change
* Start rabbitmq-bundle-0 ( overcloud-controller-0 )
* Start rabbitmq:0 ( rabbitmq-bundle-0 )
* Restart rabbitmq-bundle-docker-1 ( overcloud-rabbit-1 ) due to resource definition change
* Start rabbitmq-bundle-1 ( overcloud-controller-1 )
* Start rabbitmq:1 ( rabbitmq-bundle-1 )
* Restart rabbitmq-bundle-docker-2 ( overcloud-rabbit-2 ) due to resource definition change
* Start rabbitmq-bundle-2 ( overcloud-controller-2 )
* Start rabbitmq:2 ( rabbitmq-bundle-2 )
Executing Cluster Transition:
* Pseudo action: rabbitmq-bundle-clone_pre_notify_start_0
* Pseudo action: rabbitmq-bundle_stop_0
* Pseudo action: rabbitmq-bundle_start_0
* Pseudo action: rabbitmq-bundle-clone_confirmed-pre_notify_start_0
* Resource action: rabbitmq-bundle-docker-0 stop on overcloud-rabbit-0
* Resource action: rabbitmq-bundle-docker-0 start on overcloud-rabbit-0
* Resource action: rabbitmq-bundle-docker-0 monitor=60000 on overcloud-rabbit-0
* Resource action: rabbitmq-bundle-0 monitor on overcloud-galera-2
* Resource action: rabbitmq-bundle-0 monitor on overcloud-galera-1
* Resource action: rabbitmq-bundle-0 monitor on overcloud-galera-0
* Resource action: rabbitmq-bundle-0 monitor on overcloud-controller-2
* Resource action: rabbitmq-bundle-0 monitor on overcloud-controller-1
* Resource action: rabbitmq-bundle-0 monitor on overcloud-controller-0
* Resource action: rabbitmq-bundle-docker-1 stop on overcloud-rabbit-1
* Resource action: rabbitmq-bundle-docker-1 start on overcloud-rabbit-1
* Resource action: rabbitmq-bundle-docker-1 monitor=60000 on overcloud-rabbit-1
* Resource action: rabbitmq-bundle-1 monitor on overcloud-galera-2
* Resource action: rabbitmq-bundle-1 monitor on overcloud-galera-1
* Resource action: rabbitmq-bundle-1 monitor on overcloud-galera-0
* Resource action: rabbitmq-bundle-1 monitor on overcloud-controller-2
* Resource action: rabbitmq-bundle-1 monitor on overcloud-controller-1
* Resource action: rabbitmq-bundle-1 monitor on overcloud-controller-0
* Resource action: rabbitmq-bundle-docker-2 stop on overcloud-rabbit-2
* Resource action: rabbitmq-bundle-docker-2 start on overcloud-rabbit-2
* Resource action: rabbitmq-bundle-docker-2 monitor=60000 on overcloud-rabbit-2
* Resource action: rabbitmq-bundle-2 monitor on overcloud-galera-2
* Resource action: rabbitmq-bundle-2 monitor on overcloud-galera-1
* Resource action: rabbitmq-bundle-2 monitor on overcloud-galera-0
* Resource action: rabbitmq-bundle-2 monitor on overcloud-controller-2
* Resource action: rabbitmq-bundle-2 monitor on overcloud-controller-1
* Resource action: rabbitmq-bundle-2 monitor on overcloud-controller-0
* Pseudo action: rabbitmq-bundle_stopped_0
* Resource action: rabbitmq-bundle-0 start on overcloud-controller-0
* Resource action: rabbitmq-bundle-1 start on overcloud-controller-1
* Resource action: rabbitmq-bundle-2 start on overcloud-controller-2
* Resource action: rabbitmq:0 monitor on rabbitmq-bundle-0
* Resource action: rabbitmq:1 monitor on rabbitmq-bundle-1
* Resource action: rabbitmq:2 monitor on rabbitmq-bundle-2
* Pseudo action: rabbitmq-bundle-clone_start_0
* Resource action: rabbitmq-bundle-0 monitor=30000 on overcloud-controller-0
* Resource action: rabbitmq-bundle-1 monitor=30000 on overcloud-controller-1
* Resource action: rabbitmq-bundle-2 monitor=30000 on overcloud-controller-2
* Resource action: rabbitmq:0 start on rabbitmq-bundle-0
* Resource action: rabbitmq:1 start on rabbitmq-bundle-1
* Resource action: rabbitmq:2 start on rabbitmq-bundle-2
* Pseudo action: rabbitmq-bundle-clone_running_0
* Pseudo action: rabbitmq-bundle-clone_post_notify_running_0
* Resource action: rabbitmq:0 notify on rabbitmq-bundle-0
* Resource action: rabbitmq:1 notify on rabbitmq-bundle-1
* Resource action: rabbitmq:2 notify on rabbitmq-bundle-2
* Pseudo action: rabbitmq-bundle-clone_confirmed-post_notify_running_0
* Pseudo action: rabbitmq-bundle_running_0
* Resource action: rabbitmq:0 monitor=10000 on rabbitmq-bundle-0
* Resource action: rabbitmq:1 monitor=10000 on rabbitmq-bundle-1
* Resource action: rabbitmq:2 monitor=10000 on rabbitmq-bundle-2
Using the original execution date of: 2017-07-14 08:50:25Z
Revised Cluster Status:
* Node List:
* Online: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 overcloud-galera-0 overcloud-galera-1 overcloud-galera-2 ]
* RemoteOnline: [ overcloud-rabbit-0 overcloud-rabbit-1 overcloud-rabbit-2 ]
- * GuestOnline: [ rabbitmq-bundle-0@overcloud-controller-0 rabbitmq-bundle-1@overcloud-controller-1 rabbitmq-bundle-2@overcloud-controller-2 ]
+ * GuestOnline: [ rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 ]
* Full List of Resources:
* overcloud-rabbit-0 (ocf:pacemaker:remote): Started overcloud-controller-0
* overcloud-rabbit-1 (ocf:pacemaker:remote): Started overcloud-controller-1
* overcloud-rabbit-2 (ocf:pacemaker:remote): Started overcloud-controller-2
* Container bundle set: rabbitmq-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-rabbitmq:latest]:
* rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Started overcloud-rabbit-0
* rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): Started overcloud-rabbit-1
* rabbitmq-bundle-2 (ocf:heartbeat:rabbitmq-cluster): Started overcloud-rabbit-2
* Container bundle set: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest]:
* galera-bundle-docker-0 (ocf:heartbeat:docker): Started overcloud-galera-0
* galera-bundle-docker-1 (ocf:heartbeat:docker): Started overcloud-galera-1
* galera-bundle-docker-2 (ocf:heartbeat:docker): Started overcloud-galera-2
diff --git a/cts/scheduler/summary/bundle-order-fencing.summary b/cts/scheduler/summary/bundle-order-fencing.summary
index ae0c42d2ef..c4487a7a3c 100644
--- a/cts/scheduler/summary/bundle-order-fencing.summary
+++ b/cts/scheduler/summary/bundle-order-fencing.summary
@@ -1,220 +1,220 @@
Using the original execution date of: 2017-09-12 10:51:59Z
Current cluster status:
* Node List:
* Node controller-0: UNCLEAN (offline)
* Online: [ controller-1 controller-2 ]
- * GuestOnline: [ galera-bundle-1@controller-1 galera-bundle-2@controller-2 rabbitmq-bundle-1@controller-1 rabbitmq-bundle-2@controller-2 redis-bundle-1@controller-1 redis-bundle-2@controller-2 ]
+ * GuestOnline: [ galera-bundle-1 galera-bundle-2 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-1 redis-bundle-2 ]
* Full List of Resources:
* Container bundle set: rabbitmq-bundle [192.168.24.1:8787/rhosp12/openstack-rabbitmq-docker:pcmklatest]:
* rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): FAILED controller-0 (UNCLEAN)
* rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): Started controller-1
* rabbitmq-bundle-2 (ocf:heartbeat:rabbitmq-cluster): Started controller-2
* Container bundle set: galera-bundle [192.168.24.1:8787/rhosp12/openstack-mariadb-docker:pcmklatest]:
* galera-bundle-0 (ocf:heartbeat:galera): FAILED Promoted controller-0 (UNCLEAN)
* galera-bundle-1 (ocf:heartbeat:galera): Promoted controller-1
* galera-bundle-2 (ocf:heartbeat:galera): Promoted controller-2
* Container bundle set: redis-bundle [192.168.24.1:8787/rhosp12/openstack-redis-docker:pcmklatest]:
* redis-bundle-0 (ocf:heartbeat:redis): FAILED Promoted controller-0 (UNCLEAN)
* redis-bundle-1 (ocf:heartbeat:redis): Unpromoted controller-1
* redis-bundle-2 (ocf:heartbeat:redis): Unpromoted controller-2
* ip-192.168.24.7 (ocf:heartbeat:IPaddr2): Started controller-0 (UNCLEAN)
* ip-10.0.0.109 (ocf:heartbeat:IPaddr2): Started controller-0 (UNCLEAN)
* ip-172.17.1.14 (ocf:heartbeat:IPaddr2): Started controller-2
* ip-172.17.1.19 (ocf:heartbeat:IPaddr2): Started controller-2
* ip-172.17.3.19 (ocf:heartbeat:IPaddr2): Started controller-2
* ip-172.17.4.11 (ocf:heartbeat:IPaddr2): Started controller-0 (UNCLEAN)
* Container bundle set: haproxy-bundle [192.168.24.1:8787/rhosp12/openstack-haproxy-docker:pcmklatest]:
* haproxy-bundle-docker-0 (ocf:heartbeat:docker): Started controller-0 (UNCLEAN)
* haproxy-bundle-docker-1 (ocf:heartbeat:docker): Started controller-2
* haproxy-bundle-docker-2 (ocf:heartbeat:docker): Started controller-1
* openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-2
* stonith-fence_ipmilan-525400efba5c (stonith:fence_ipmilan): Started controller-2
* stonith-fence_ipmilan-5254003e8e97 (stonith:fence_ipmilan): Started controller-0 (UNCLEAN)
* stonith-fence_ipmilan-5254000dcb3f (stonith:fence_ipmilan): Started controller-0 (UNCLEAN)
Transition Summary:
* Fence (off) redis-bundle-0 (resource: redis-bundle-docker-0) 'guest is unclean'
* Fence (off) rabbitmq-bundle-0 (resource: rabbitmq-bundle-docker-0) 'guest is unclean'
* Fence (off) galera-bundle-0 (resource: galera-bundle-docker-0) 'guest is unclean'
* Fence (reboot) controller-0 'peer is no longer part of the cluster'
* Stop rabbitmq-bundle-docker-0 ( controller-0 ) due to node availability
* Stop rabbitmq-bundle-0 ( controller-0 ) due to unrunnable rabbitmq-bundle-docker-0 start
* Stop rabbitmq:0 ( rabbitmq-bundle-0 ) due to unrunnable rabbitmq-bundle-docker-0 start
* Stop galera-bundle-docker-0 ( controller-0 ) due to node availability
* Stop galera-bundle-0 ( controller-0 ) due to unrunnable galera-bundle-docker-0 start
* Stop galera:0 ( Promoted galera-bundle-0 ) due to unrunnable galera-bundle-docker-0 start
* Stop redis-bundle-docker-0 ( controller-0 ) due to node availability
* Stop redis-bundle-0 ( controller-0 ) due to unrunnable redis-bundle-docker-0 start
* Stop redis:0 ( Promoted redis-bundle-0 ) due to unrunnable redis-bundle-docker-0 start
* Promote redis:1 ( Unpromoted -> Promoted redis-bundle-1 )
* Move ip-192.168.24.7 ( controller-0 -> controller-2 )
* Move ip-10.0.0.109 ( controller-0 -> controller-1 )
* Move ip-172.17.4.11 ( controller-0 -> controller-1 )
* Stop haproxy-bundle-docker-0 ( controller-0 ) due to node availability
* Move stonith-fence_ipmilan-5254003e8e97 ( controller-0 -> controller-1 )
* Move stonith-fence_ipmilan-5254000dcb3f ( controller-0 -> controller-2 )
Executing Cluster Transition:
* Pseudo action: rabbitmq-bundle-clone_pre_notify_stop_0
* Pseudo action: rabbitmq-bundle-0_stop_0
* Resource action: rabbitmq-bundle-0 monitor on controller-2
* Resource action: rabbitmq-bundle-0 monitor on controller-1
* Resource action: rabbitmq-bundle-1 monitor on controller-2
* Resource action: rabbitmq-bundle-2 monitor on controller-1
* Pseudo action: galera-bundle-0_stop_0
* Resource action: galera-bundle-0 monitor on controller-2
* Resource action: galera-bundle-0 monitor on controller-1
* Resource action: galera-bundle-1 monitor on controller-2
* Resource action: galera-bundle-2 monitor on controller-1
* Resource action: redis cancel=45000 on redis-bundle-1
* Resource action: redis cancel=60000 on redis-bundle-1
* Pseudo action: redis-bundle-master_pre_notify_demote_0
* Pseudo action: redis-bundle-0_stop_0
* Resource action: redis-bundle-0 monitor on controller-2
* Resource action: redis-bundle-0 monitor on controller-1
* Resource action: redis-bundle-1 monitor on controller-2
* Resource action: redis-bundle-2 monitor on controller-1
* Pseudo action: stonith-fence_ipmilan-5254003e8e97_stop_0
* Pseudo action: stonith-fence_ipmilan-5254000dcb3f_stop_0
* Pseudo action: haproxy-bundle_stop_0
* Pseudo action: redis-bundle_demote_0
* Pseudo action: galera-bundle_demote_0
* Pseudo action: rabbitmq-bundle_stop_0
* Pseudo action: rabbitmq-bundle_start_0
* Fencing controller-0 (reboot)
* Resource action: rabbitmq notify on rabbitmq-bundle-1
* Resource action: rabbitmq notify on rabbitmq-bundle-2
* Pseudo action: rabbitmq-bundle-clone_confirmed-pre_notify_stop_0
* Pseudo action: rabbitmq-bundle-docker-0_stop_0
* Pseudo action: galera-bundle-master_demote_0
* Resource action: redis notify on redis-bundle-1
* Resource action: redis notify on redis-bundle-2
* Pseudo action: redis-bundle-master_confirmed-pre_notify_demote_0
* Pseudo action: redis-bundle-master_demote_0
* Pseudo action: haproxy-bundle-docker-0_stop_0
* Resource action: stonith-fence_ipmilan-5254003e8e97 start on controller-1
* Resource action: stonith-fence_ipmilan-5254000dcb3f start on controller-2
* Pseudo action: stonith-redis-bundle-0-off on redis-bundle-0
* Pseudo action: stonith-rabbitmq-bundle-0-off on rabbitmq-bundle-0
* Pseudo action: stonith-galera-bundle-0-off on galera-bundle-0
* Pseudo action: haproxy-bundle_stopped_0
* Pseudo action: rabbitmq_post_notify_stop_0
* Pseudo action: rabbitmq-bundle-clone_stop_0
* Pseudo action: galera_demote_0
* Pseudo action: galera-bundle-master_demoted_0
* Pseudo action: redis_post_notify_stop_0
* Pseudo action: redis_demote_0
* Pseudo action: redis-bundle-master_demoted_0
* Pseudo action: ip-192.168.24.7_stop_0
* Pseudo action: ip-10.0.0.109_stop_0
* Pseudo action: ip-172.17.4.11_stop_0
* Resource action: stonith-fence_ipmilan-5254003e8e97 monitor=60000 on controller-1
* Resource action: stonith-fence_ipmilan-5254000dcb3f monitor=60000 on controller-2
* Pseudo action: galera-bundle_demoted_0
* Pseudo action: galera-bundle_stop_0
* Pseudo action: rabbitmq_stop_0
* Pseudo action: rabbitmq-bundle-clone_stopped_0
* Pseudo action: galera-bundle-master_stop_0
* Pseudo action: galera-bundle-docker-0_stop_0
* Pseudo action: redis-bundle-master_post_notify_demoted_0
* Resource action: ip-192.168.24.7 start on controller-2
* Resource action: ip-10.0.0.109 start on controller-1
* Resource action: ip-172.17.4.11 start on controller-1
* Pseudo action: rabbitmq-bundle-clone_post_notify_stopped_0
* Pseudo action: galera_stop_0
* Pseudo action: galera-bundle-master_stopped_0
* Pseudo action: galera-bundle-master_start_0
* Resource action: redis notify on redis-bundle-1
* Resource action: redis notify on redis-bundle-2
* Pseudo action: redis-bundle-master_confirmed-post_notify_demoted_0
* Pseudo action: redis-bundle-master_pre_notify_stop_0
* Resource action: ip-192.168.24.7 monitor=10000 on controller-2
* Resource action: ip-10.0.0.109 monitor=10000 on controller-1
* Resource action: ip-172.17.4.11 monitor=10000 on controller-1
* Pseudo action: redis-bundle_demoted_0
* Pseudo action: redis-bundle_stop_0
* Pseudo action: galera-bundle_stopped_0
* Resource action: rabbitmq notify on rabbitmq-bundle-1
* Resource action: rabbitmq notify on rabbitmq-bundle-2
* Pseudo action: rabbitmq-bundle-clone_confirmed-post_notify_stopped_0
* Pseudo action: rabbitmq-bundle-clone_pre_notify_start_0
* Pseudo action: galera-bundle-master_running_0
* Resource action: redis notify on redis-bundle-1
* Resource action: redis notify on redis-bundle-2
* Pseudo action: redis-bundle-master_confirmed-pre_notify_stop_0
* Pseudo action: redis-bundle-master_stop_0
* Pseudo action: redis-bundle-docker-0_stop_0
* Pseudo action: galera-bundle_running_0
* Pseudo action: rabbitmq-bundle_stopped_0
* Pseudo action: rabbitmq_notified_0
* Pseudo action: rabbitmq-bundle-clone_confirmed-pre_notify_start_0
* Pseudo action: rabbitmq-bundle-clone_start_0
* Pseudo action: redis_stop_0
* Pseudo action: redis-bundle-master_stopped_0
* Pseudo action: rabbitmq-bundle-clone_running_0
* Pseudo action: redis-bundle-master_post_notify_stopped_0
* Pseudo action: rabbitmq-bundle-clone_post_notify_running_0
* Resource action: redis notify on redis-bundle-1
* Resource action: redis notify on redis-bundle-2
* Pseudo action: redis-bundle-master_confirmed-post_notify_stopped_0
* Pseudo action: redis-bundle-master_pre_notify_start_0
* Pseudo action: redis-bundle_stopped_0
* Pseudo action: rabbitmq-bundle-clone_confirmed-post_notify_running_0
* Pseudo action: redis_notified_0
* Pseudo action: redis-bundle-master_confirmed-pre_notify_start_0
* Pseudo action: redis-bundle-master_start_0
* Pseudo action: rabbitmq-bundle_running_0
* Pseudo action: redis-bundle-master_running_0
* Pseudo action: redis-bundle-master_post_notify_running_0
* Pseudo action: redis-bundle-master_confirmed-post_notify_running_0
* Pseudo action: redis-bundle_running_0
* Pseudo action: redis-bundle-master_pre_notify_promote_0
* Pseudo action: redis-bundle_promote_0
* Resource action: redis notify on redis-bundle-1
* Resource action: redis notify on redis-bundle-2
* Pseudo action: redis-bundle-master_confirmed-pre_notify_promote_0
* Pseudo action: redis-bundle-master_promote_0
* Resource action: redis promote on redis-bundle-1
* Pseudo action: redis-bundle-master_promoted_0
* Pseudo action: redis-bundle-master_post_notify_promoted_0
* Resource action: redis notify on redis-bundle-1
* Resource action: redis notify on redis-bundle-2
* Pseudo action: redis-bundle-master_confirmed-post_notify_promoted_0
* Pseudo action: redis-bundle_promoted_0
* Resource action: redis monitor=20000 on redis-bundle-1
Using the original execution date of: 2017-09-12 10:51:59Z
Revised Cluster Status:
* Node List:
* Online: [ controller-1 controller-2 ]
* OFFLINE: [ controller-0 ]
- * GuestOnline: [ galera-bundle-1@controller-1 galera-bundle-2@controller-2 rabbitmq-bundle-1@controller-1 rabbitmq-bundle-2@controller-2 redis-bundle-1@controller-1 redis-bundle-2@controller-2 ]
+ * GuestOnline: [ galera-bundle-1 galera-bundle-2 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-1 redis-bundle-2 ]
* Full List of Resources:
* Container bundle set: rabbitmq-bundle [192.168.24.1:8787/rhosp12/openstack-rabbitmq-docker:pcmklatest]:
* rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): FAILED
* rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): Started controller-1
* rabbitmq-bundle-2 (ocf:heartbeat:rabbitmq-cluster): Started controller-2
* Container bundle set: galera-bundle [192.168.24.1:8787/rhosp12/openstack-mariadb-docker:pcmklatest]:
* galera-bundle-0 (ocf:heartbeat:galera): FAILED Promoted
* galera-bundle-1 (ocf:heartbeat:galera): Promoted controller-1
* galera-bundle-2 (ocf:heartbeat:galera): Promoted controller-2
* Container bundle set: redis-bundle [192.168.24.1:8787/rhosp12/openstack-redis-docker:pcmklatest]:
* redis-bundle-0 (ocf:heartbeat:redis): FAILED Promoted
* redis-bundle-1 (ocf:heartbeat:redis): Promoted controller-1
* redis-bundle-2 (ocf:heartbeat:redis): Unpromoted controller-2
* ip-192.168.24.7 (ocf:heartbeat:IPaddr2): Started controller-2
* ip-10.0.0.109 (ocf:heartbeat:IPaddr2): Started controller-1
* ip-172.17.1.14 (ocf:heartbeat:IPaddr2): Started controller-2
* ip-172.17.1.19 (ocf:heartbeat:IPaddr2): Started controller-2
* ip-172.17.3.19 (ocf:heartbeat:IPaddr2): Started controller-2
* ip-172.17.4.11 (ocf:heartbeat:IPaddr2): Started controller-1
* Container bundle set: haproxy-bundle [192.168.24.1:8787/rhosp12/openstack-haproxy-docker:pcmklatest]:
* haproxy-bundle-docker-0 (ocf:heartbeat:docker): Stopped
* haproxy-bundle-docker-1 (ocf:heartbeat:docker): Started controller-2
* haproxy-bundle-docker-2 (ocf:heartbeat:docker): Started controller-1
* openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-2
* stonith-fence_ipmilan-525400efba5c (stonith:fence_ipmilan): Started controller-2
* stonith-fence_ipmilan-5254003e8e97 (stonith:fence_ipmilan): Started controller-1
* stonith-fence_ipmilan-5254000dcb3f (stonith:fence_ipmilan): Started controller-2
diff --git a/cts/scheduler/summary/bundle-order-partial-start-2.summary b/cts/scheduler/summary/bundle-order-partial-start-2.summary
index 9ca81d6ebd..7f70a841fe 100644
--- a/cts/scheduler/summary/bundle-order-partial-start-2.summary
+++ b/cts/scheduler/summary/bundle-order-partial-start-2.summary
@@ -1,100 +1,100 @@
Current cluster status:
* Node List:
* Online: [ undercloud ]
- * GuestOnline: [ galera-bundle-0@undercloud rabbitmq-bundle-0@undercloud redis-bundle-0@undercloud ]
+ * GuestOnline: [ galera-bundle-0 rabbitmq-bundle-0 redis-bundle-0 ]
* Full List of Resources:
* Container bundle: rabbitmq-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-rabbitmq:latest]:
* rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Stopped undercloud
* Container bundle: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest]:
* galera-bundle-0 (ocf:heartbeat:galera): Stopped undercloud
* Container bundle: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest]:
* redis-bundle-0 (ocf:heartbeat:redis): Unpromoted undercloud
* ip-192.168.122.254 (ocf:heartbeat:IPaddr2): Started undercloud
* ip-192.168.122.250 (ocf:heartbeat:IPaddr2): Started undercloud
* ip-192.168.122.249 (ocf:heartbeat:IPaddr2): Started undercloud
* ip-192.168.122.253 (ocf:heartbeat:IPaddr2): Started undercloud
* ip-192.168.122.247 (ocf:heartbeat:IPaddr2): Started undercloud
* ip-192.168.122.248 (ocf:heartbeat:IPaddr2): Started undercloud
* Container bundle: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest]:
* haproxy-bundle-docker-0 (ocf:heartbeat:docker): Stopped
* Container bundle: openstack-cinder-volume [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-volume:latest]:
* openstack-cinder-volume-docker-0 (ocf:heartbeat:docker): Started undercloud
Transition Summary:
* Start rabbitmq:0 ( rabbitmq-bundle-0 )
* Restart galera-bundle-docker-0 ( undercloud ) due to required haproxy-bundle running
* Restart galera-bundle-0 ( undercloud ) due to required galera-bundle-docker-0 start
* Start galera:0 ( galera-bundle-0 )
* Promote redis:0 ( Unpromoted -> Promoted redis-bundle-0 )
* Start haproxy-bundle-docker-0 ( undercloud )
Executing Cluster Transition:
* Resource action: rabbitmq:0 monitor on rabbitmq-bundle-0
* Pseudo action: rabbitmq-bundle-clone_pre_notify_start_0
* Resource action: galera-bundle-0 stop on undercloud
* Pseudo action: redis-bundle-master_pre_notify_promote_0
* Resource action: haproxy-bundle-docker-0 monitor on undercloud
* Pseudo action: haproxy-bundle_start_0
* Pseudo action: redis-bundle_promote_0
* Pseudo action: galera-bundle_stop_0
* Pseudo action: rabbitmq-bundle_start_0
* Pseudo action: rabbitmq-bundle-clone_confirmed-pre_notify_start_0
* Pseudo action: rabbitmq-bundle-clone_start_0
* Resource action: galera-bundle-docker-0 stop on undercloud
* Resource action: redis notify on redis-bundle-0
* Pseudo action: redis-bundle-master_confirmed-pre_notify_promote_0
* Pseudo action: redis-bundle-master_promote_0
* Resource action: haproxy-bundle-docker-0 start on undercloud
* Pseudo action: haproxy-bundle_running_0
* Pseudo action: galera-bundle_stopped_0
* Resource action: rabbitmq:0 start on rabbitmq-bundle-0
* Pseudo action: rabbitmq-bundle-clone_running_0
* Resource action: redis promote on redis-bundle-0
* Pseudo action: redis-bundle-master_promoted_0
* Resource action: haproxy-bundle-docker-0 monitor=60000 on undercloud
* Pseudo action: rabbitmq-bundle-clone_post_notify_running_0
* Pseudo action: redis-bundle-master_post_notify_promoted_0
* Resource action: rabbitmq:0 notify on rabbitmq-bundle-0
* Pseudo action: rabbitmq-bundle-clone_confirmed-post_notify_running_0
* Resource action: redis notify on redis-bundle-0
* Pseudo action: redis-bundle-master_confirmed-post_notify_promoted_0
* Pseudo action: redis-bundle_promoted_0
* Pseudo action: rabbitmq-bundle_running_0
* Resource action: rabbitmq:0 monitor=10000 on rabbitmq-bundle-0
* Resource action: redis monitor=20000 on redis-bundle-0
* Pseudo action: galera-bundle_start_0
* Resource action: galera-bundle-docker-0 start on undercloud
* Resource action: galera-bundle-docker-0 monitor=60000 on undercloud
* Resource action: galera-bundle-0 start on undercloud
* Resource action: galera-bundle-0 monitor=30000 on undercloud
* Resource action: galera:0 monitor on galera-bundle-0
* Pseudo action: galera-bundle-master_start_0
* Resource action: galera:0 start on galera-bundle-0
* Pseudo action: galera-bundle-master_running_0
* Pseudo action: galera-bundle_running_0
* Resource action: galera:0 monitor=30000 on galera-bundle-0
* Resource action: galera:0 monitor=20000 on galera-bundle-0
Revised Cluster Status:
* Node List:
* Online: [ undercloud ]
- * GuestOnline: [ galera-bundle-0@undercloud rabbitmq-bundle-0@undercloud redis-bundle-0@undercloud ]
+ * GuestOnline: [ galera-bundle-0 rabbitmq-bundle-0 redis-bundle-0 ]
* Full List of Resources:
* Container bundle: rabbitmq-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-rabbitmq:latest]:
* rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Started undercloud
* Container bundle: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest]:
* galera-bundle-0 (ocf:heartbeat:galera): Unpromoted undercloud
* Container bundle: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest]:
* redis-bundle-0 (ocf:heartbeat:redis): Promoted undercloud
* ip-192.168.122.254 (ocf:heartbeat:IPaddr2): Started undercloud
* ip-192.168.122.250 (ocf:heartbeat:IPaddr2): Started undercloud
* ip-192.168.122.249 (ocf:heartbeat:IPaddr2): Started undercloud
* ip-192.168.122.253 (ocf:heartbeat:IPaddr2): Started undercloud
* ip-192.168.122.247 (ocf:heartbeat:IPaddr2): Started undercloud
* ip-192.168.122.248 (ocf:heartbeat:IPaddr2): Started undercloud
* Container bundle: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest]:
* haproxy-bundle-docker-0 (ocf:heartbeat:docker): Started undercloud
* Container bundle: openstack-cinder-volume [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-volume:latest]:
* openstack-cinder-volume-docker-0 (ocf:heartbeat:docker): Started undercloud
diff --git a/cts/scheduler/summary/bundle-order-partial-start.summary b/cts/scheduler/summary/bundle-order-partial-start.summary
index 7951a3fcf2..2ed8cd1dad 100644
--- a/cts/scheduler/summary/bundle-order-partial-start.summary
+++ b/cts/scheduler/summary/bundle-order-partial-start.summary
@@ -1,97 +1,97 @@
Current cluster status:
* Node List:
* Online: [ undercloud ]
- * GuestOnline: [ rabbitmq-bundle-0@undercloud redis-bundle-0@undercloud ]
+ * GuestOnline: [ rabbitmq-bundle-0 redis-bundle-0 ]
* Full List of Resources:
* Container bundle: rabbitmq-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-rabbitmq:latest]:
* rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Stopped undercloud
* Container bundle: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest]:
* galera-bundle-0 (ocf:heartbeat:galera): Stopped
* Container bundle: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest]:
* redis-bundle-0 (ocf:heartbeat:redis): Unpromoted undercloud
* ip-192.168.122.254 (ocf:heartbeat:IPaddr2): Started undercloud
* ip-192.168.122.250 (ocf:heartbeat:IPaddr2): Started undercloud
* ip-192.168.122.249 (ocf:heartbeat:IPaddr2): Started undercloud
* ip-192.168.122.253 (ocf:heartbeat:IPaddr2): Started undercloud
* ip-192.168.122.247 (ocf:heartbeat:IPaddr2): Started undercloud
* ip-192.168.122.248 (ocf:heartbeat:IPaddr2): Started undercloud
* Container bundle: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest]:
* haproxy-bundle-docker-0 (ocf:heartbeat:docker): Stopped
* Container bundle: openstack-cinder-volume [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-volume:latest]:
* openstack-cinder-volume-docker-0 (ocf:heartbeat:docker): Started undercloud
Transition Summary:
* Start rabbitmq:0 ( rabbitmq-bundle-0 )
* Start galera-bundle-docker-0 ( undercloud )
* Start galera-bundle-0 ( undercloud )
* Start galera:0 ( galera-bundle-0 )
* Promote redis:0 ( Unpromoted -> Promoted redis-bundle-0 )
* Start haproxy-bundle-docker-0 ( undercloud )
Executing Cluster Transition:
* Resource action: rabbitmq:0 monitor on rabbitmq-bundle-0
* Pseudo action: rabbitmq-bundle-clone_pre_notify_start_0
* Resource action: galera-bundle-docker-0 monitor on undercloud
* Pseudo action: redis-bundle-master_pre_notify_promote_0
* Resource action: haproxy-bundle-docker-0 monitor on undercloud
* Pseudo action: haproxy-bundle_start_0
* Pseudo action: redis-bundle_promote_0
* Pseudo action: rabbitmq-bundle_start_0
* Pseudo action: rabbitmq-bundle-clone_confirmed-pre_notify_start_0
* Pseudo action: rabbitmq-bundle-clone_start_0
* Resource action: redis notify on redis-bundle-0
* Pseudo action: redis-bundle-master_confirmed-pre_notify_promote_0
* Pseudo action: redis-bundle-master_promote_0
* Resource action: haproxy-bundle-docker-0 start on undercloud
* Pseudo action: haproxy-bundle_running_0
* Resource action: rabbitmq:0 start on rabbitmq-bundle-0
* Pseudo action: rabbitmq-bundle-clone_running_0
* Resource action: redis promote on redis-bundle-0
* Pseudo action: redis-bundle-master_promoted_0
* Resource action: haproxy-bundle-docker-0 monitor=60000 on undercloud
* Pseudo action: rabbitmq-bundle-clone_post_notify_running_0
* Pseudo action: redis-bundle-master_post_notify_promoted_0
* Resource action: rabbitmq:0 notify on rabbitmq-bundle-0
* Pseudo action: rabbitmq-bundle-clone_confirmed-post_notify_running_0
* Resource action: redis notify on redis-bundle-0
* Pseudo action: redis-bundle-master_confirmed-post_notify_promoted_0
* Pseudo action: redis-bundle_promoted_0
* Pseudo action: rabbitmq-bundle_running_0
* Resource action: rabbitmq:0 monitor=10000 on rabbitmq-bundle-0
* Resource action: redis monitor=20000 on redis-bundle-0
* Pseudo action: galera-bundle_start_0
* Pseudo action: galera-bundle-master_start_0
* Resource action: galera-bundle-docker-0 start on undercloud
* Resource action: galera-bundle-0 monitor on undercloud
* Resource action: galera-bundle-docker-0 monitor=60000 on undercloud
* Resource action: galera-bundle-0 start on undercloud
* Resource action: galera:0 start on galera-bundle-0
* Pseudo action: galera-bundle-master_running_0
* Resource action: galera-bundle-0 monitor=30000 on undercloud
* Pseudo action: galera-bundle_running_0
* Resource action: galera:0 monitor=30000 on galera-bundle-0
* Resource action: galera:0 monitor=20000 on galera-bundle-0
Revised Cluster Status:
* Node List:
* Online: [ undercloud ]
- * GuestOnline: [ galera-bundle-0@undercloud rabbitmq-bundle-0@undercloud redis-bundle-0@undercloud ]
+ * GuestOnline: [ galera-bundle-0 rabbitmq-bundle-0 redis-bundle-0 ]
* Full List of Resources:
* Container bundle: rabbitmq-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-rabbitmq:latest]:
* rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Started undercloud
* Container bundle: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest]:
* galera-bundle-0 (ocf:heartbeat:galera): Unpromoted undercloud
* Container bundle: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest]:
* redis-bundle-0 (ocf:heartbeat:redis): Promoted undercloud
* ip-192.168.122.254 (ocf:heartbeat:IPaddr2): Started undercloud
* ip-192.168.122.250 (ocf:heartbeat:IPaddr2): Started undercloud
* ip-192.168.122.249 (ocf:heartbeat:IPaddr2): Started undercloud
* ip-192.168.122.253 (ocf:heartbeat:IPaddr2): Started undercloud
* ip-192.168.122.247 (ocf:heartbeat:IPaddr2): Started undercloud
* ip-192.168.122.248 (ocf:heartbeat:IPaddr2): Started undercloud
* Container bundle: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest]:
* haproxy-bundle-docker-0 (ocf:heartbeat:docker): Started undercloud
* Container bundle: openstack-cinder-volume [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-volume:latest]:
* openstack-cinder-volume-docker-0 (ocf:heartbeat:docker): Started undercloud
diff --git a/cts/scheduler/summary/bundle-order-partial-stop.summary b/cts/scheduler/summary/bundle-order-partial-stop.summary
index 4313a6ce00..39eab8f93e 100644
--- a/cts/scheduler/summary/bundle-order-partial-stop.summary
+++ b/cts/scheduler/summary/bundle-order-partial-stop.summary
@@ -1,127 +1,127 @@
Current cluster status:
* Node List:
* Online: [ undercloud ]
- * GuestOnline: [ galera-bundle-0@undercloud rabbitmq-bundle-0@undercloud redis-bundle-0@undercloud ]
+ * GuestOnline: [ galera-bundle-0 rabbitmq-bundle-0 redis-bundle-0 ]
* Full List of Resources:
* Container bundle: rabbitmq-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-rabbitmq:latest]:
* rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Started undercloud
* Container bundle: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest]:
* galera-bundle-0 (ocf:heartbeat:galera): Promoted undercloud
* Container bundle: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest]:
* redis-bundle-0 (ocf:heartbeat:redis): Promoted undercloud
* ip-192.168.122.254 (ocf:heartbeat:IPaddr2): Started undercloud
* ip-192.168.122.250 (ocf:heartbeat:IPaddr2): Started undercloud
* ip-192.168.122.249 (ocf:heartbeat:IPaddr2): Started undercloud
* ip-192.168.122.253 (ocf:heartbeat:IPaddr2): Started undercloud
* ip-192.168.122.247 (ocf:heartbeat:IPaddr2): Started undercloud
* ip-192.168.122.248 (ocf:heartbeat:IPaddr2): Started undercloud
* Container bundle: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest]:
* haproxy-bundle-docker-0 (ocf:heartbeat:docker): Started undercloud
* Container bundle: openstack-cinder-volume [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-volume:latest]:
* openstack-cinder-volume-docker-0 (ocf:heartbeat:docker): Started undercloud
Transition Summary:
* Stop rabbitmq-bundle-docker-0 ( undercloud ) due to node availability
* Stop rabbitmq-bundle-0 ( undercloud ) due to node availability
* Stop rabbitmq:0 ( rabbitmq-bundle-0 ) due to unrunnable rabbitmq-bundle-0 start
* Stop galera-bundle-docker-0 ( undercloud ) due to node availability
* Stop galera-bundle-0 ( undercloud ) due to node availability
* Stop galera:0 ( Promoted galera-bundle-0 ) due to unrunnable galera-bundle-0 start
* Stop redis-bundle-docker-0 ( undercloud ) due to node availability
* Stop redis-bundle-0 ( undercloud ) due to node availability
* Stop redis:0 ( Promoted redis-bundle-0 ) due to unrunnable redis-bundle-0 start
* Stop ip-192.168.122.254 ( undercloud ) due to node availability
* Stop ip-192.168.122.250 ( undercloud ) due to node availability
* Stop ip-192.168.122.249 ( undercloud ) due to node availability
* Stop ip-192.168.122.253 ( undercloud ) due to node availability
* Stop ip-192.168.122.247 ( undercloud ) due to node availability
* Stop ip-192.168.122.248 ( undercloud ) due to node availability
* Stop haproxy-bundle-docker-0 ( undercloud ) due to node availability
* Stop openstack-cinder-volume-docker-0 ( undercloud ) due to node availability
Executing Cluster Transition:
* Pseudo action: rabbitmq-bundle-clone_pre_notify_stop_0
* Resource action: galera cancel=10000 on galera-bundle-0
* Resource action: redis cancel=20000 on redis-bundle-0
* Pseudo action: redis-bundle-master_pre_notify_demote_0
* Pseudo action: openstack-cinder-volume_stop_0
* Pseudo action: redis-bundle_demote_0
* Pseudo action: galera-bundle_demote_0
* Pseudo action: rabbitmq-bundle_stop_0
* Resource action: rabbitmq notify on rabbitmq-bundle-0
* Pseudo action: rabbitmq-bundle-clone_confirmed-pre_notify_stop_0
* Pseudo action: rabbitmq-bundle-clone_stop_0
* Pseudo action: galera-bundle-master_demote_0
* Resource action: redis notify on redis-bundle-0
* Pseudo action: redis-bundle-master_confirmed-pre_notify_demote_0
* Pseudo action: redis-bundle-master_demote_0
* Resource action: openstack-cinder-volume-docker-0 stop on undercloud
* Pseudo action: openstack-cinder-volume_stopped_0
* Resource action: rabbitmq stop on rabbitmq-bundle-0
* Pseudo action: rabbitmq-bundle-clone_stopped_0
* Resource action: rabbitmq-bundle-0 stop on undercloud
* Resource action: galera demote on galera-bundle-0
* Pseudo action: galera-bundle-master_demoted_0
* Resource action: redis demote on redis-bundle-0
* Pseudo action: redis-bundle-master_demoted_0
* Pseudo action: galera-bundle_demoted_0
* Pseudo action: galera-bundle_stop_0
* Pseudo action: rabbitmq-bundle-clone_post_notify_stopped_0
* Resource action: rabbitmq-bundle-docker-0 stop on undercloud
* Pseudo action: galera-bundle-master_stop_0
* Pseudo action: redis-bundle-master_post_notify_demoted_0
* Pseudo action: rabbitmq-bundle-clone_confirmed-post_notify_stopped_0
* Resource action: galera stop on galera-bundle-0
* Pseudo action: galera-bundle-master_stopped_0
* Resource action: galera-bundle-0 stop on undercloud
* Resource action: redis notify on redis-bundle-0
* Pseudo action: redis-bundle-master_confirmed-post_notify_demoted_0
* Pseudo action: redis-bundle-master_pre_notify_stop_0
* Pseudo action: redis-bundle_demoted_0
* Pseudo action: rabbitmq-bundle_stopped_0
* Resource action: galera-bundle-docker-0 stop on undercloud
* Resource action: redis notify on redis-bundle-0
* Pseudo action: redis-bundle-master_confirmed-pre_notify_stop_0
* Pseudo action: galera-bundle_stopped_0
* Pseudo action: redis-bundle_stop_0
* Pseudo action: redis-bundle-master_stop_0
* Resource action: redis stop on redis-bundle-0
* Pseudo action: redis-bundle-master_stopped_0
* Resource action: redis-bundle-0 stop on undercloud
* Pseudo action: redis-bundle-master_post_notify_stopped_0
* Resource action: redis-bundle-docker-0 stop on undercloud
* Pseudo action: redis-bundle-master_confirmed-post_notify_stopped_0
* Pseudo action: redis-bundle_stopped_0
* Pseudo action: haproxy-bundle_stop_0
* Resource action: haproxy-bundle-docker-0 stop on undercloud
* Pseudo action: haproxy-bundle_stopped_0
* Resource action: ip-192.168.122.254 stop on undercloud
* Resource action: ip-192.168.122.250 stop on undercloud
* Resource action: ip-192.168.122.249 stop on undercloud
* Resource action: ip-192.168.122.253 stop on undercloud
* Resource action: ip-192.168.122.247 stop on undercloud
* Resource action: ip-192.168.122.248 stop on undercloud
* Cluster action: do_shutdown on undercloud
Revised Cluster Status:
* Node List:
* Online: [ undercloud ]
* Full List of Resources:
* Container bundle: rabbitmq-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-rabbitmq:latest]:
* rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Stopped
* Container bundle: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest]:
* galera-bundle-0 (ocf:heartbeat:galera): Stopped
* Container bundle: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest]:
* redis-bundle-0 (ocf:heartbeat:redis): Stopped
* ip-192.168.122.254 (ocf:heartbeat:IPaddr2): Stopped
* ip-192.168.122.250 (ocf:heartbeat:IPaddr2): Stopped
* ip-192.168.122.249 (ocf:heartbeat:IPaddr2): Stopped
* ip-192.168.122.253 (ocf:heartbeat:IPaddr2): Stopped
* ip-192.168.122.247 (ocf:heartbeat:IPaddr2): Stopped
* ip-192.168.122.248 (ocf:heartbeat:IPaddr2): Stopped
* Container bundle: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest]:
* haproxy-bundle-docker-0 (ocf:heartbeat:docker): Stopped
* Container bundle: openstack-cinder-volume [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-volume:latest]:
* openstack-cinder-volume-docker-0 (ocf:heartbeat:docker): Stopped
diff --git a/cts/scheduler/summary/bundle-order-startup-clone-2.summary b/cts/scheduler/summary/bundle-order-startup-clone-2.summary
index 8fc4cc1f88..585fe0bb03 100644
--- a/cts/scheduler/summary/bundle-order-startup-clone-2.summary
+++ b/cts/scheduler/summary/bundle-order-startup-clone-2.summary
@@ -1,213 +1,213 @@
Current cluster status:
* Node List:
* Online: [ metal-1 metal-2 metal-3 ]
* RemoteOFFLINE: [ rabbitmq-bundle-0 ]
* Full List of Resources:
* Clone Set: storage-clone [storage]:
* Stopped: [ metal-1 metal-2 metal-3 rabbitmq-bundle-0 ]
* Container bundle set: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest]:
* galera-bundle-0 (ocf:heartbeat:galera): Stopped
* galera-bundle-1 (ocf:heartbeat:galera): Stopped
* galera-bundle-2 (ocf:heartbeat:galera): Stopped
* Container bundle set: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest]:
* haproxy-bundle-docker-0 (ocf:heartbeat:docker): Stopped
* haproxy-bundle-docker-1 (ocf:heartbeat:docker): Stopped
* haproxy-bundle-docker-2 (ocf:heartbeat:docker): Stopped
* Container bundle set: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest]:
* redis-bundle-0 (ocf:heartbeat:redis): Stopped
* redis-bundle-1 (ocf:heartbeat:redis): Stopped
* redis-bundle-2 (ocf:heartbeat:redis): Stopped
Transition Summary:
* Start storage:0 ( metal-1 )
* Start storage:1 ( metal-2 )
* Start storage:2 ( metal-3 )
* Start galera-bundle-docker-0 ( metal-1 )
* Start galera-bundle-0 ( metal-1 )
* Start galera:0 ( galera-bundle-0 )
* Start galera-bundle-docker-1 ( metal-2 )
* Start galera-bundle-1 ( metal-2 )
* Start galera:1 ( galera-bundle-1 )
* Start galera-bundle-docker-2 ( metal-3 )
* Start galera-bundle-2 ( metal-3 )
* Start galera:2 ( galera-bundle-2 )
* Start haproxy-bundle-docker-0 ( metal-1 )
* Start haproxy-bundle-docker-1 ( metal-2 )
* Start haproxy-bundle-docker-2 ( metal-3 )
* Start redis-bundle-docker-0 ( metal-1 )
* Start redis-bundle-0 ( metal-1 )
* Promote redis:0 ( Stopped -> Promoted redis-bundle-0 )
* Start redis-bundle-docker-1 ( metal-2 )
* Start redis-bundle-1 ( metal-2 )
* Promote redis:1 ( Stopped -> Promoted redis-bundle-1 )
* Start redis-bundle-docker-2 ( metal-3 )
* Start redis-bundle-2 ( metal-3 )
* Promote redis:2 ( Stopped -> Promoted redis-bundle-2 )
Executing Cluster Transition:
* Resource action: storage:0 monitor on metal-1
* Resource action: storage:1 monitor on metal-2
* Resource action: storage:2 monitor on metal-3
* Pseudo action: storage-clone_pre_notify_start_0
* Resource action: galera-bundle-docker-0 monitor on metal-3
* Resource action: galera-bundle-docker-0 monitor on metal-2
* Resource action: galera-bundle-docker-0 monitor on metal-1
* Resource action: galera-bundle-docker-1 monitor on metal-3
* Resource action: galera-bundle-docker-1 monitor on metal-2
* Resource action: galera-bundle-docker-1 monitor on metal-1
* Resource action: galera-bundle-docker-2 monitor on metal-3
* Resource action: galera-bundle-docker-2 monitor on metal-2
* Resource action: galera-bundle-docker-2 monitor on metal-1
* Resource action: haproxy-bundle-docker-0 monitor on metal-3
* Resource action: haproxy-bundle-docker-0 monitor on metal-2
* Resource action: haproxy-bundle-docker-0 monitor on metal-1
* Resource action: haproxy-bundle-docker-1 monitor on metal-3
* Resource action: haproxy-bundle-docker-1 monitor on metal-2
* Resource action: haproxy-bundle-docker-1 monitor on metal-1
* Resource action: haproxy-bundle-docker-2 monitor on metal-3
* Resource action: haproxy-bundle-docker-2 monitor on metal-2
* Resource action: haproxy-bundle-docker-2 monitor on metal-1
* Pseudo action: redis-bundle-master_pre_notify_start_0
* Resource action: redis-bundle-docker-0 monitor on metal-3
* Resource action: redis-bundle-docker-0 monitor on metal-2
* Resource action: redis-bundle-docker-0 monitor on metal-1
* Resource action: redis-bundle-docker-1 monitor on metal-3
* Resource action: redis-bundle-docker-1 monitor on metal-2
* Resource action: redis-bundle-docker-1 monitor on metal-1
* Resource action: redis-bundle-docker-2 monitor on metal-3
* Resource action: redis-bundle-docker-2 monitor on metal-2
* Resource action: redis-bundle-docker-2 monitor on metal-1
* Pseudo action: redis-bundle_start_0
* Pseudo action: haproxy-bundle_start_0
* Pseudo action: storage-clone_confirmed-pre_notify_start_0
* Resource action: haproxy-bundle-docker-0 start on metal-1
* Resource action: haproxy-bundle-docker-1 start on metal-2
* Resource action: haproxy-bundle-docker-2 start on metal-3
* Pseudo action: redis-bundle-master_confirmed-pre_notify_start_0
* Pseudo action: redis-bundle-master_start_0
* Resource action: redis-bundle-docker-0 start on metal-1
* Resource action: redis-bundle-0 monitor on metal-3
* Resource action: redis-bundle-0 monitor on metal-2
* Resource action: redis-bundle-0 monitor on metal-1
* Resource action: redis-bundle-docker-1 start on metal-2
* Resource action: redis-bundle-1 monitor on metal-3
* Resource action: redis-bundle-1 monitor on metal-2
* Resource action: redis-bundle-1 monitor on metal-1
* Resource action: redis-bundle-docker-2 start on metal-3
* Resource action: redis-bundle-2 monitor on metal-3
* Resource action: redis-bundle-2 monitor on metal-2
* Resource action: redis-bundle-2 monitor on metal-1
* Pseudo action: haproxy-bundle_running_0
* Resource action: haproxy-bundle-docker-0 monitor=60000 on metal-1
* Resource action: haproxy-bundle-docker-1 monitor=60000 on metal-2
* Resource action: haproxy-bundle-docker-2 monitor=60000 on metal-3
* Resource action: redis-bundle-docker-0 monitor=60000 on metal-1
* Resource action: redis-bundle-0 start on metal-1
* Resource action: redis-bundle-docker-1 monitor=60000 on metal-2
* Resource action: redis-bundle-1 start on metal-2
* Resource action: redis-bundle-docker-2 monitor=60000 on metal-3
* Resource action: redis-bundle-2 start on metal-3
* Resource action: redis:0 start on redis-bundle-0
* Resource action: redis:1 start on redis-bundle-1
* Resource action: redis:2 start on redis-bundle-2
* Pseudo action: redis-bundle-master_running_0
* Resource action: redis-bundle-0 monitor=30000 on metal-1
* Resource action: redis-bundle-1 monitor=30000 on metal-2
* Resource action: redis-bundle-2 monitor=30000 on metal-3
* Pseudo action: redis-bundle-master_post_notify_running_0
* Resource action: redis:0 notify on redis-bundle-0
* Resource action: redis:1 notify on redis-bundle-1
* Resource action: redis:2 notify on redis-bundle-2
* Pseudo action: redis-bundle-master_confirmed-post_notify_running_0
* Pseudo action: redis-bundle_running_0
* Pseudo action: redis-bundle-master_pre_notify_promote_0
* Pseudo action: redis-bundle_promote_0
* Resource action: redis:0 notify on redis-bundle-0
* Resource action: redis:1 notify on redis-bundle-1
* Resource action: redis:2 notify on redis-bundle-2
* Pseudo action: redis-bundle-master_confirmed-pre_notify_promote_0
* Pseudo action: redis-bundle-master_promote_0
* Resource action: redis:0 promote on redis-bundle-0
* Resource action: redis:1 promote on redis-bundle-1
* Resource action: redis:2 promote on redis-bundle-2
* Pseudo action: redis-bundle-master_promoted_0
* Pseudo action: redis-bundle-master_post_notify_promoted_0
* Resource action: redis:0 notify on redis-bundle-0
* Resource action: redis:1 notify on redis-bundle-1
* Resource action: redis:2 notify on redis-bundle-2
* Pseudo action: redis-bundle-master_confirmed-post_notify_promoted_0
* Pseudo action: redis-bundle_promoted_0
* Pseudo action: storage-clone_start_0
* Resource action: redis:0 monitor=20000 on redis-bundle-0
* Resource action: redis:1 monitor=20000 on redis-bundle-1
* Resource action: redis:2 monitor=20000 on redis-bundle-2
* Resource action: storage:0 start on metal-1
* Resource action: storage:1 start on metal-2
* Resource action: storage:2 start on metal-3
* Pseudo action: storage-clone_running_0
* Pseudo action: storage-clone_post_notify_running_0
* Resource action: storage:0 notify on metal-1
* Resource action: storage:1 notify on metal-2
* Resource action: storage:2 notify on metal-3
* Pseudo action: storage-clone_confirmed-post_notify_running_0
* Pseudo action: galera-bundle_start_0
* Resource action: storage:0 monitor=30000 on metal-1
* Resource action: storage:1 monitor=30000 on metal-2
* Resource action: storage:2 monitor=30000 on metal-3
* Pseudo action: galera-bundle-master_start_0
* Resource action: galera-bundle-docker-0 start on metal-1
* Resource action: galera-bundle-0 monitor on metal-3
* Resource action: galera-bundle-0 monitor on metal-2
* Resource action: galera-bundle-0 monitor on metal-1
* Resource action: galera-bundle-docker-1 start on metal-2
* Resource action: galera-bundle-1 monitor on metal-3
* Resource action: galera-bundle-1 monitor on metal-2
* Resource action: galera-bundle-1 monitor on metal-1
* Resource action: galera-bundle-docker-2 start on metal-3
* Resource action: galera-bundle-2 monitor on metal-3
* Resource action: galera-bundle-2 monitor on metal-2
* Resource action: galera-bundle-2 monitor on metal-1
* Resource action: galera-bundle-docker-0 monitor=60000 on metal-1
* Resource action: galera-bundle-0 start on metal-1
* Resource action: galera-bundle-docker-1 monitor=60000 on metal-2
* Resource action: galera-bundle-1 start on metal-2
* Resource action: galera-bundle-docker-2 monitor=60000 on metal-3
* Resource action: galera-bundle-2 start on metal-3
* Resource action: galera:0 start on galera-bundle-0
* Resource action: galera:1 start on galera-bundle-1
* Resource action: galera:2 start on galera-bundle-2
* Pseudo action: galera-bundle-master_running_0
* Resource action: galera-bundle-0 monitor=30000 on metal-1
* Resource action: galera-bundle-1 monitor=30000 on metal-2
* Resource action: galera-bundle-2 monitor=30000 on metal-3
* Pseudo action: galera-bundle_running_0
* Resource action: galera:0 monitor=30000 on galera-bundle-0
* Resource action: galera:0 monitor=20000 on galera-bundle-0
* Resource action: galera:1 monitor=30000 on galera-bundle-1
* Resource action: galera:1 monitor=20000 on galera-bundle-1
* Resource action: galera:2 monitor=30000 on galera-bundle-2
* Resource action: galera:2 monitor=20000 on galera-bundle-2
Revised Cluster Status:
* Node List:
* Online: [ metal-1 metal-2 metal-3 ]
* RemoteOFFLINE: [ rabbitmq-bundle-0 ]
- * GuestOnline: [ galera-bundle-0@metal-1 galera-bundle-1@metal-2 galera-bundle-2@metal-3 redis-bundle-0@metal-1 redis-bundle-1@metal-2 redis-bundle-2@metal-3 ]
+ * GuestOnline: [ galera-bundle-0 galera-bundle-1 galera-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2 ]
* Full List of Resources:
* Clone Set: storage-clone [storage]:
* Started: [ metal-1 metal-2 metal-3 ]
* Stopped: [ rabbitmq-bundle-0 ]
* Container bundle set: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest]:
* galera-bundle-0 (ocf:heartbeat:galera): Unpromoted metal-1
* galera-bundle-1 (ocf:heartbeat:galera): Unpromoted metal-2
* galera-bundle-2 (ocf:heartbeat:galera): Unpromoted metal-3
* Container bundle set: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest]:
* haproxy-bundle-docker-0 (ocf:heartbeat:docker): Started metal-1
* haproxy-bundle-docker-1 (ocf:heartbeat:docker): Started metal-2
* haproxy-bundle-docker-2 (ocf:heartbeat:docker): Started metal-3
* Container bundle set: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest]:
* redis-bundle-0 (ocf:heartbeat:redis): Promoted metal-1
* redis-bundle-1 (ocf:heartbeat:redis): Promoted metal-2
* redis-bundle-2 (ocf:heartbeat:redis): Promoted metal-3
diff --git a/cts/scheduler/summary/bundle-order-startup-clone.summary b/cts/scheduler/summary/bundle-order-startup-clone.summary
index 4f6d9165d6..fa4e719c97 100644
--- a/cts/scheduler/summary/bundle-order-startup-clone.summary
+++ b/cts/scheduler/summary/bundle-order-startup-clone.summary
@@ -1,79 +1,79 @@
Current cluster status:
* Node List:
* Online: [ metal-1 metal-2 metal-3 ]
* RemoteOFFLINE: [ rabbitmq-bundle-0 ]
* Full List of Resources:
* Clone Set: storage-clone [storage]:
* Stopped: [ metal-1 metal-2 metal-3 rabbitmq-bundle-0 ]
* Container bundle: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest]:
* galera-bundle-0 (ocf:heartbeat:galera): Stopped
* Container bundle: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest]:
* haproxy-bundle-docker-0 (ocf:heartbeat:docker): Stopped
* Container bundle: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest]:
* redis-bundle-0 (ocf:heartbeat:redis): Stopped
Transition Summary:
* Start storage:0 ( metal-1 ) due to unrunnable redis-bundle promoted (blocked)
* Start storage:1 ( metal-2 ) due to unrunnable redis-bundle promoted (blocked)
* Start storage:2 ( metal-3 ) due to unrunnable redis-bundle promoted (blocked)
* Start galera-bundle-docker-0 ( metal-1 ) due to unrunnable storage-clone notified (blocked)
* Start galera-bundle-0 ( metal-1 ) due to unrunnable galera-bundle-docker-0 start (blocked)
* Start galera:0 ( galera-bundle-0 ) due to unrunnable galera-bundle-docker-0 start (blocked)
* Start haproxy-bundle-docker-0 ( metal-2 )
* Start redis-bundle-docker-0 ( metal-2 )
* Start redis-bundle-0 ( metal-2 )
* Start redis:0 ( redis-bundle-0 )
Executing Cluster Transition:
* Resource action: storage:0 monitor on metal-1
* Resource action: storage:1 monitor on metal-2
* Resource action: storage:2 monitor on metal-3
* Resource action: galera-bundle-docker-0 monitor on metal-3
* Resource action: galera-bundle-docker-0 monitor on metal-2
* Resource action: galera-bundle-docker-0 monitor on metal-1
* Resource action: haproxy-bundle-docker-0 monitor on metal-3
* Resource action: haproxy-bundle-docker-0 monitor on metal-2
* Resource action: haproxy-bundle-docker-0 monitor on metal-1
* Pseudo action: redis-bundle-master_pre_notify_start_0
* Resource action: redis-bundle-docker-0 monitor on metal-3
* Resource action: redis-bundle-docker-0 monitor on metal-2
* Resource action: redis-bundle-docker-0 monitor on metal-1
* Pseudo action: redis-bundle_start_0
* Pseudo action: haproxy-bundle_start_0
* Resource action: haproxy-bundle-docker-0 start on metal-2
* Pseudo action: redis-bundle-master_confirmed-pre_notify_start_0
* Pseudo action: redis-bundle-master_start_0
* Resource action: redis-bundle-docker-0 start on metal-2
* Resource action: redis-bundle-0 monitor on metal-3
* Resource action: redis-bundle-0 monitor on metal-2
* Resource action: redis-bundle-0 monitor on metal-1
* Pseudo action: haproxy-bundle_running_0
* Resource action: haproxy-bundle-docker-0 monitor=60000 on metal-2
* Resource action: redis-bundle-docker-0 monitor=60000 on metal-2
* Resource action: redis-bundle-0 start on metal-2
* Resource action: redis:0 start on redis-bundle-0
* Pseudo action: redis-bundle-master_running_0
* Resource action: redis-bundle-0 monitor=30000 on metal-2
* Pseudo action: redis-bundle-master_post_notify_running_0
* Resource action: redis:0 notify on redis-bundle-0
* Pseudo action: redis-bundle-master_confirmed-post_notify_running_0
* Pseudo action: redis-bundle_running_0
* Resource action: redis:0 monitor=60000 on redis-bundle-0
* Resource action: redis:0 monitor=45000 on redis-bundle-0
Revised Cluster Status:
* Node List:
* Online: [ metal-1 metal-2 metal-3 ]
* RemoteOFFLINE: [ rabbitmq-bundle-0 ]
- * GuestOnline: [ redis-bundle-0@metal-2 ]
+ * GuestOnline: [ redis-bundle-0 ]
* Full List of Resources:
* Clone Set: storage-clone [storage]:
* Stopped: [ metal-1 metal-2 metal-3 rabbitmq-bundle-0 ]
* Container bundle: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest]:
* galera-bundle-0 (ocf:heartbeat:galera): Stopped
* Container bundle: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest]:
* haproxy-bundle-docker-0 (ocf:heartbeat:docker): Started metal-2
* Container bundle: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest]:
* redis-bundle-0 (ocf:heartbeat:redis): Unpromoted metal-2
diff --git a/cts/scheduler/summary/bundle-order-startup.summary b/cts/scheduler/summary/bundle-order-startup.summary
index 3144e83a7b..7347a4adbf 100644
--- a/cts/scheduler/summary/bundle-order-startup.summary
+++ b/cts/scheduler/summary/bundle-order-startup.summary
@@ -1,141 +1,141 @@
Current cluster status:
* Node List:
* Online: [ undercloud ]
* Full List of Resources:
* Container bundle: rabbitmq-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-rabbitmq:latest]:
* rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Stopped
* Container bundle: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest]:
* galera-bundle-0 (ocf:heartbeat:galera): Stopped
* Container bundle: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest]:
* redis-bundle-0 (ocf:heartbeat:redis): Stopped
* ip-192.168.122.254 (ocf:heartbeat:IPaddr2): Stopped
* ip-192.168.122.250 (ocf:heartbeat:IPaddr2): Stopped
* ip-192.168.122.249 (ocf:heartbeat:IPaddr2): Stopped
* ip-192.168.122.253 (ocf:heartbeat:IPaddr2): Stopped
* ip-192.168.122.247 (ocf:heartbeat:IPaddr2): Stopped
* ip-192.168.122.248 (ocf:heartbeat:IPaddr2): Stopped
* Container bundle: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest]:
* haproxy-bundle-docker-0 (ocf:heartbeat:docker): Stopped
* Container bundle: openstack-cinder-volume [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-volume:latest]:
* openstack-cinder-volume-docker-0 (ocf:heartbeat:docker): Stopped
Transition Summary:
* Start rabbitmq-bundle-docker-0 ( undercloud )
* Start rabbitmq-bundle-0 ( undercloud )
* Start rabbitmq:0 ( rabbitmq-bundle-0 )
* Start galera-bundle-docker-0 ( undercloud )
* Start galera-bundle-0 ( undercloud )
* Start galera:0 ( galera-bundle-0 )
* Start redis-bundle-docker-0 ( undercloud )
* Start redis-bundle-0 ( undercloud )
* Start redis:0 ( redis-bundle-0 )
* Start ip-192.168.122.254 ( undercloud )
* Start ip-192.168.122.250 ( undercloud )
* Start ip-192.168.122.249 ( undercloud )
* Start ip-192.168.122.253 ( undercloud )
* Start ip-192.168.122.247 ( undercloud )
* Start ip-192.168.122.248 ( undercloud )
* Start haproxy-bundle-docker-0 ( undercloud )
* Start openstack-cinder-volume-docker-0 ( undercloud )
Executing Cluster Transition:
* Pseudo action: rabbitmq-bundle-clone_pre_notify_start_0
* Resource action: rabbitmq-bundle-docker-0 monitor on undercloud
* Resource action: galera-bundle-docker-0 monitor on undercloud
* Pseudo action: redis-bundle-master_pre_notify_start_0
* Resource action: redis-bundle-docker-0 monitor on undercloud
* Resource action: ip-192.168.122.254 monitor on undercloud
* Resource action: ip-192.168.122.250 monitor on undercloud
* Resource action: ip-192.168.122.249 monitor on undercloud
* Resource action: ip-192.168.122.253 monitor on undercloud
* Resource action: ip-192.168.122.247 monitor on undercloud
* Resource action: ip-192.168.122.248 monitor on undercloud
* Resource action: haproxy-bundle-docker-0 monitor on undercloud
* Resource action: openstack-cinder-volume-docker-0 monitor on undercloud
* Pseudo action: openstack-cinder-volume_start_0
* Pseudo action: rabbitmq-bundle_start_0
* Pseudo action: rabbitmq-bundle-clone_confirmed-pre_notify_start_0
* Pseudo action: rabbitmq-bundle-clone_start_0
* Resource action: rabbitmq-bundle-docker-0 start on undercloud
* Resource action: rabbitmq-bundle-0 monitor on undercloud
* Pseudo action: redis-bundle-master_confirmed-pre_notify_start_0
* Resource action: ip-192.168.122.254 start on undercloud
* Resource action: ip-192.168.122.250 start on undercloud
* Resource action: ip-192.168.122.249 start on undercloud
* Resource action: ip-192.168.122.253 start on undercloud
* Resource action: ip-192.168.122.247 start on undercloud
* Resource action: ip-192.168.122.248 start on undercloud
* Resource action: openstack-cinder-volume-docker-0 start on undercloud
* Pseudo action: openstack-cinder-volume_running_0
* Pseudo action: haproxy-bundle_start_0
* Resource action: rabbitmq-bundle-docker-0 monitor=60000 on undercloud
* Resource action: rabbitmq-bundle-0 start on undercloud
* Resource action: ip-192.168.122.254 monitor=10000 on undercloud
* Resource action: ip-192.168.122.250 monitor=10000 on undercloud
* Resource action: ip-192.168.122.249 monitor=10000 on undercloud
* Resource action: ip-192.168.122.253 monitor=10000 on undercloud
* Resource action: ip-192.168.122.247 monitor=10000 on undercloud
* Resource action: ip-192.168.122.248 monitor=10000 on undercloud
* Resource action: haproxy-bundle-docker-0 start on undercloud
* Resource action: openstack-cinder-volume-docker-0 monitor=60000 on undercloud
* Pseudo action: haproxy-bundle_running_0
* Pseudo action: redis-bundle_start_0
* Resource action: rabbitmq:0 start on rabbitmq-bundle-0
* Pseudo action: rabbitmq-bundle-clone_running_0
* Resource action: rabbitmq-bundle-0 monitor=30000 on undercloud
* Pseudo action: redis-bundle-master_start_0
* Resource action: redis-bundle-docker-0 start on undercloud
* Resource action: redis-bundle-0 monitor on undercloud
* Resource action: haproxy-bundle-docker-0 monitor=60000 on undercloud
* Pseudo action: rabbitmq-bundle-clone_post_notify_running_0
* Resource action: redis-bundle-docker-0 monitor=60000 on undercloud
* Resource action: redis-bundle-0 start on undercloud
* Resource action: rabbitmq:0 notify on rabbitmq-bundle-0
* Pseudo action: rabbitmq-bundle-clone_confirmed-post_notify_running_0
* Resource action: redis:0 start on redis-bundle-0
* Pseudo action: redis-bundle-master_running_0
* Resource action: redis-bundle-0 monitor=30000 on undercloud
* Pseudo action: rabbitmq-bundle_running_0
* Resource action: rabbitmq:0 monitor=10000 on rabbitmq-bundle-0
* Pseudo action: redis-bundle-master_post_notify_running_0
* Resource action: redis:0 notify on redis-bundle-0
* Pseudo action: redis-bundle-master_confirmed-post_notify_running_0
* Pseudo action: redis-bundle_running_0
* Pseudo action: galera-bundle_start_0
* Pseudo action: galera-bundle-master_start_0
* Resource action: galera-bundle-docker-0 start on undercloud
* Resource action: galera-bundle-0 monitor on undercloud
* Resource action: redis:0 monitor=60000 on redis-bundle-0
* Resource action: redis:0 monitor=45000 on redis-bundle-0
* Resource action: galera-bundle-docker-0 monitor=60000 on undercloud
* Resource action: galera-bundle-0 start on undercloud
* Resource action: galera:0 start on galera-bundle-0
* Pseudo action: galera-bundle-master_running_0
* Resource action: galera-bundle-0 monitor=30000 on undercloud
* Pseudo action: galera-bundle_running_0
* Resource action: galera:0 monitor=30000 on galera-bundle-0
* Resource action: galera:0 monitor=20000 on galera-bundle-0
Revised Cluster Status:
* Node List:
* Online: [ undercloud ]
- * GuestOnline: [ galera-bundle-0@undercloud rabbitmq-bundle-0@undercloud redis-bundle-0@undercloud ]
+ * GuestOnline: [ galera-bundle-0 rabbitmq-bundle-0 redis-bundle-0 ]
* Full List of Resources:
* Container bundle: rabbitmq-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-rabbitmq:latest]:
* rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Started undercloud
* Container bundle: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest]:
* galera-bundle-0 (ocf:heartbeat:galera): Unpromoted undercloud
* Container bundle: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest]:
* redis-bundle-0 (ocf:heartbeat:redis): Unpromoted undercloud
* ip-192.168.122.254 (ocf:heartbeat:IPaddr2): Started undercloud
* ip-192.168.122.250 (ocf:heartbeat:IPaddr2): Started undercloud
* ip-192.168.122.249 (ocf:heartbeat:IPaddr2): Started undercloud
* ip-192.168.122.253 (ocf:heartbeat:IPaddr2): Started undercloud
* ip-192.168.122.247 (ocf:heartbeat:IPaddr2): Started undercloud
* ip-192.168.122.248 (ocf:heartbeat:IPaddr2): Started undercloud
* Container bundle: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest]:
* haproxy-bundle-docker-0 (ocf:heartbeat:docker): Started undercloud
* Container bundle: openstack-cinder-volume [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-volume:latest]:
* openstack-cinder-volume-docker-0 (ocf:heartbeat:docker): Started undercloud
diff --git a/cts/scheduler/summary/bundle-order-stop-clone.summary b/cts/scheduler/summary/bundle-order-stop-clone.summary
index b278a00d52..46708d06e9 100644
--- a/cts/scheduler/summary/bundle-order-stop-clone.summary
+++ b/cts/scheduler/summary/bundle-order-stop-clone.summary
@@ -1,88 +1,88 @@
Current cluster status:
* Node List:
* Online: [ metal-1 metal-2 metal-3 ]
* RemoteOFFLINE: [ rabbitmq-bundle-0 ]
- * GuestOnline: [ galera-bundle-0@metal-1 galera-bundle-1@metal-2 galera-bundle-2@metal-3 redis-bundle-0@metal-1 redis-bundle-1@metal-2 redis-bundle-2@metal-3 ]
+ * GuestOnline: [ galera-bundle-0 galera-bundle-1 galera-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2 ]
* Full List of Resources:
* Clone Set: storage-clone [storage]:
* Started: [ metal-1 metal-2 metal-3 ]
* Stopped: [ rabbitmq-bundle-0 ]
* Container bundle set: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest]:
* galera-bundle-0 (ocf:heartbeat:galera): Unpromoted metal-1
* galera-bundle-1 (ocf:heartbeat:galera): Unpromoted metal-2
* galera-bundle-2 (ocf:heartbeat:galera): Unpromoted metal-3
* Container bundle set: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest]:
* haproxy-bundle-docker-0 (ocf:heartbeat:docker): Started metal-1
* haproxy-bundle-docker-1 (ocf:heartbeat:docker): Started metal-2
* haproxy-bundle-docker-2 (ocf:heartbeat:docker): Started metal-3
* Container bundle set: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest]:
* redis-bundle-0 (ocf:heartbeat:redis): Promoted metal-1
* redis-bundle-1 (ocf:heartbeat:redis): Promoted metal-2
* redis-bundle-2 (ocf:heartbeat:redis): Promoted metal-3
Transition Summary:
* Stop storage:0 ( metal-1 ) due to node availability
* Stop galera-bundle-docker-0 ( metal-1 ) due to node availability
* Stop galera-bundle-0 ( metal-1 ) due to unrunnable galera-bundle-docker-0 start
* Stop galera:0 ( Unpromoted galera-bundle-0 ) due to unrunnable galera-bundle-docker-0 start
Executing Cluster Transition:
* Pseudo action: storage-clone_pre_notify_stop_0
* Resource action: galera-bundle-0 monitor on metal-3
* Resource action: galera-bundle-0 monitor on metal-2
* Resource action: galera-bundle-1 monitor on metal-3
* Resource action: galera-bundle-1 monitor on metal-1
* Resource action: galera-bundle-2 monitor on metal-2
* Resource action: galera-bundle-2 monitor on metal-1
* Resource action: redis-bundle-0 monitor on metal-3
* Resource action: redis-bundle-0 monitor on metal-2
* Resource action: redis-bundle-1 monitor on metal-3
* Resource action: redis-bundle-1 monitor on metal-1
* Resource action: redis-bundle-2 monitor on metal-2
* Resource action: redis-bundle-2 monitor on metal-1
* Pseudo action: galera-bundle_stop_0
* Resource action: storage:0 notify on metal-1
* Resource action: storage:1 notify on metal-2
* Resource action: storage:2 notify on metal-3
* Pseudo action: storage-clone_confirmed-pre_notify_stop_0
* Pseudo action: galera-bundle-master_stop_0
* Resource action: galera:0 stop on galera-bundle-0
* Pseudo action: galera-bundle-master_stopped_0
* Resource action: galera-bundle-0 stop on metal-1
* Resource action: galera-bundle-docker-0 stop on metal-1
* Pseudo action: galera-bundle_stopped_0
* Pseudo action: galera-bundle_start_0
* Pseudo action: storage-clone_stop_0
* Pseudo action: galera-bundle-master_start_0
* Resource action: storage:0 stop on metal-1
* Pseudo action: storage-clone_stopped_0
* Pseudo action: galera-bundle-master_running_0
* Pseudo action: galera-bundle_running_0
* Pseudo action: storage-clone_post_notify_stopped_0
* Resource action: storage:1 notify on metal-2
* Resource action: storage:2 notify on metal-3
* Pseudo action: storage-clone_confirmed-post_notify_stopped_0
Revised Cluster Status:
* Node List:
* Online: [ metal-1 metal-2 metal-3 ]
* RemoteOFFLINE: [ rabbitmq-bundle-0 ]
- * GuestOnline: [ galera-bundle-1@metal-2 galera-bundle-2@metal-3 redis-bundle-0@metal-1 redis-bundle-1@metal-2 redis-bundle-2@metal-3 ]
+ * GuestOnline: [ galera-bundle-1 galera-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2 ]
* Full List of Resources:
* Clone Set: storage-clone [storage]:
* Started: [ metal-2 metal-3 ]
* Stopped: [ metal-1 rabbitmq-bundle-0 ]
* Container bundle set: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest]:
* galera-bundle-0 (ocf:heartbeat:galera): Stopped
* galera-bundle-1 (ocf:heartbeat:galera): Unpromoted metal-2
* galera-bundle-2 (ocf:heartbeat:galera): Unpromoted metal-3
* Container bundle set: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest]:
* haproxy-bundle-docker-0 (ocf:heartbeat:docker): Started metal-1
* haproxy-bundle-docker-1 (ocf:heartbeat:docker): Started metal-2
* haproxy-bundle-docker-2 (ocf:heartbeat:docker): Started metal-3
* Container bundle set: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest]:
* redis-bundle-0 (ocf:heartbeat:redis): Promoted metal-1
* redis-bundle-1 (ocf:heartbeat:redis): Promoted metal-2
* redis-bundle-2 (ocf:heartbeat:redis): Promoted metal-3
diff --git a/cts/scheduler/summary/bundle-order-stop-on-remote.summary b/cts/scheduler/summary/bundle-order-stop-on-remote.summary
index fa4ef5798a..5e2e367180 100644
--- a/cts/scheduler/summary/bundle-order-stop-on-remote.summary
+++ b/cts/scheduler/summary/bundle-order-stop-on-remote.summary
@@ -1,224 +1,224 @@
Current cluster status:
* Node List:
* RemoteNode database-0: UNCLEAN (offline)
* RemoteNode database-2: UNCLEAN (offline)
* Online: [ controller-0 controller-1 controller-2 ]
* RemoteOnline: [ database-1 messaging-0 messaging-1 messaging-2 ]
- * GuestOnline: [ galera-bundle-1@controller-2 rabbitmq-bundle-0@controller-2 rabbitmq-bundle-1@controller-2 rabbitmq-bundle-2@controller-2 redis-bundle-0@controller-0 redis-bundle-2@controller-2 ]
+ * GuestOnline: [ galera-bundle-1 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-2 ]
* Full List of Resources:
* database-0 (ocf:pacemaker:remote): Stopped
* database-1 (ocf:pacemaker:remote): Started controller-2
* database-2 (ocf:pacemaker:remote): Stopped
* messaging-0 (ocf:pacemaker:remote): Started controller-2
* messaging-1 (ocf:pacemaker:remote): Started controller-2
* messaging-2 (ocf:pacemaker:remote): Started controller-2
* Container bundle set: rabbitmq-bundle [192.168.24.1:8787/rhosp12/openstack-rabbitmq-docker:pcmklatest]:
* rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Started messaging-0
* rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): Started messaging-1
* rabbitmq-bundle-2 (ocf:heartbeat:rabbitmq-cluster): Started messaging-2
* Container bundle set: galera-bundle [192.168.24.1:8787/rhosp12/openstack-mariadb-docker:pcmklatest]:
* galera-bundle-0 (ocf:heartbeat:galera): FAILED Promoted database-0 (UNCLEAN)
* galera-bundle-1 (ocf:heartbeat:galera): Promoted database-1
* galera-bundle-2 (ocf:heartbeat:galera): FAILED Promoted database-2 (UNCLEAN)
* Container bundle set: redis-bundle [192.168.24.1:8787/rhosp12/openstack-redis-docker:pcmklatest]:
* redis-bundle-0 (ocf:heartbeat:redis): Unpromoted controller-0
* redis-bundle-1 (ocf:heartbeat:redis): Stopped
* redis-bundle-2 (ocf:heartbeat:redis): Unpromoted controller-2
* ip-192.168.24.11 (ocf:heartbeat:IPaddr2): Stopped
* ip-10.0.0.104 (ocf:heartbeat:IPaddr2): Stopped
* ip-172.17.1.19 (ocf:heartbeat:IPaddr2): Started controller-2
* ip-172.17.1.11 (ocf:heartbeat:IPaddr2): Stopped
* ip-172.17.3.13 (ocf:heartbeat:IPaddr2): Stopped
* ip-172.17.4.19 (ocf:heartbeat:IPaddr2): Started controller-2
* Container bundle set: haproxy-bundle [192.168.24.1:8787/rhosp12/openstack-haproxy-docker:pcmklatest]:
* haproxy-bundle-docker-0 (ocf:heartbeat:docker): Started controller-0
* haproxy-bundle-docker-1 (ocf:heartbeat:docker): Stopped
* haproxy-bundle-docker-2 (ocf:heartbeat:docker): Started controller-2
* openstack-cinder-volume (systemd:openstack-cinder-volume): Stopped
* stonith-fence_ipmilan-525400244e09 (stonith:fence_ipmilan): Started controller-2
* stonith-fence_ipmilan-525400cdec10 (stonith:fence_ipmilan): Started controller-2
* stonith-fence_ipmilan-525400c709f7 (stonith:fence_ipmilan): Stopped
* stonith-fence_ipmilan-525400a7f9e0 (stonith:fence_ipmilan): Started controller-0
* stonith-fence_ipmilan-525400a25787 (stonith:fence_ipmilan): Started controller-2
* stonith-fence_ipmilan-5254005ea387 (stonith:fence_ipmilan): Stopped
* stonith-fence_ipmilan-525400542c06 (stonith:fence_ipmilan): Stopped
* stonith-fence_ipmilan-525400aac413 (stonith:fence_ipmilan): Started controller-2
* stonith-fence_ipmilan-525400498d34 (stonith:fence_ipmilan): Stopped
Transition Summary:
* Fence (reboot) galera-bundle-2 (resource: galera-bundle-docker-2) 'guest is unclean'
* Fence (reboot) galera-bundle-0 (resource: galera-bundle-docker-0) 'guest is unclean'
* Start database-0 ( controller-0 )
* Start database-2 ( controller-1 )
* Recover galera-bundle-docker-0 ( database-0 )
* Start galera-bundle-0 ( controller-0 )
* Recover galera:0 ( Promoted galera-bundle-0 )
* Recover galera-bundle-docker-2 ( database-2 )
* Start galera-bundle-2 ( controller-1 )
* Recover galera:2 ( Promoted galera-bundle-2 )
* Promote redis:0 ( Unpromoted -> Promoted redis-bundle-0 )
* Start redis-bundle-docker-1 ( controller-1 )
* Start redis-bundle-1 ( controller-1 )
* Start redis:1 ( redis-bundle-1 )
* Start ip-192.168.24.11 ( controller-0 )
* Start ip-10.0.0.104 ( controller-1 )
* Start ip-172.17.1.11 ( controller-0 )
* Start ip-172.17.3.13 ( controller-1 )
* Start haproxy-bundle-docker-1 ( controller-1 )
* Start openstack-cinder-volume ( controller-0 )
* Start stonith-fence_ipmilan-525400c709f7 ( controller-1 )
* Start stonith-fence_ipmilan-5254005ea387 ( controller-1 )
* Start stonith-fence_ipmilan-525400542c06 ( controller-0 )
* Start stonith-fence_ipmilan-525400498d34 ( controller-1 )
Executing Cluster Transition:
* Resource action: database-0 start on controller-0
* Resource action: database-2 start on controller-1
* Pseudo action: redis-bundle-master_pre_notify_start_0
* Resource action: stonith-fence_ipmilan-525400c709f7 start on controller-1
* Resource action: stonith-fence_ipmilan-5254005ea387 start on controller-1
* Resource action: stonith-fence_ipmilan-525400542c06 start on controller-0
* Resource action: stonith-fence_ipmilan-525400498d34 start on controller-1
* Pseudo action: redis-bundle_start_0
* Pseudo action: galera-bundle_demote_0
* Resource action: database-0 monitor=20000 on controller-0
* Resource action: database-2 monitor=20000 on controller-1
* Pseudo action: galera-bundle-master_demote_0
* Resource action: redis notify on redis-bundle-0
* Resource action: redis notify on redis-bundle-2
* Pseudo action: redis-bundle-master_confirmed-pre_notify_start_0
* Pseudo action: redis-bundle-master_start_0
* Resource action: stonith-fence_ipmilan-525400c709f7 monitor=60000 on controller-1
* Resource action: stonith-fence_ipmilan-5254005ea387 monitor=60000 on controller-1
* Resource action: stonith-fence_ipmilan-525400542c06 monitor=60000 on controller-0
* Resource action: stonith-fence_ipmilan-525400498d34 monitor=60000 on controller-1
* Pseudo action: galera_demote_0
* Pseudo action: galera_demote_0
* Pseudo action: galera-bundle-master_demoted_0
* Pseudo action: galera-bundle_demoted_0
* Pseudo action: galera-bundle_stop_0
* Resource action: galera-bundle-docker-0 stop on database-0
* Resource action: galera-bundle-docker-2 stop on database-2
* Pseudo action: stonith-galera-bundle-2-reboot on galera-bundle-2
* Pseudo action: stonith-galera-bundle-0-reboot on galera-bundle-0
* Pseudo action: galera-bundle-master_stop_0
* Resource action: redis-bundle-docker-1 start on controller-1
* Resource action: redis-bundle-1 monitor on controller-1
* Resource action: ip-192.168.24.11 start on controller-0
* Resource action: ip-10.0.0.104 start on controller-1
* Resource action: ip-172.17.1.11 start on controller-0
* Resource action: ip-172.17.3.13 start on controller-1
* Resource action: openstack-cinder-volume start on controller-0
* Pseudo action: haproxy-bundle_start_0
* Pseudo action: galera_stop_0
* Resource action: redis-bundle-docker-1 monitor=60000 on controller-1
* Resource action: redis-bundle-1 start on controller-1
* Resource action: ip-192.168.24.11 monitor=10000 on controller-0
* Resource action: ip-10.0.0.104 monitor=10000 on controller-1
* Resource action: ip-172.17.1.11 monitor=10000 on controller-0
* Resource action: ip-172.17.3.13 monitor=10000 on controller-1
* Resource action: haproxy-bundle-docker-1 start on controller-1
* Resource action: openstack-cinder-volume monitor=60000 on controller-0
* Pseudo action: haproxy-bundle_running_0
* Pseudo action: galera_stop_0
* Pseudo action: galera-bundle-master_stopped_0
* Resource action: redis start on redis-bundle-1
* Pseudo action: redis-bundle-master_running_0
* Resource action: redis-bundle-1 monitor=30000 on controller-1
* Resource action: haproxy-bundle-docker-1 monitor=60000 on controller-1
* Pseudo action: galera-bundle_stopped_0
* Pseudo action: galera-bundle_start_0
* Pseudo action: galera-bundle-master_start_0
* Resource action: galera-bundle-docker-0 start on database-0
* Resource action: galera-bundle-0 monitor on controller-1
* Resource action: galera-bundle-docker-2 start on database-2
* Resource action: galera-bundle-2 monitor on controller-1
* Pseudo action: redis-bundle-master_post_notify_running_0
* Resource action: galera-bundle-docker-0 monitor=60000 on database-0
* Resource action: galera-bundle-0 start on controller-0
* Resource action: galera-bundle-docker-2 monitor=60000 on database-2
* Resource action: galera-bundle-2 start on controller-1
* Resource action: redis notify on redis-bundle-0
* Resource action: redis notify on redis-bundle-1
* Resource action: redis notify on redis-bundle-2
* Pseudo action: redis-bundle-master_confirmed-post_notify_running_0
* Pseudo action: redis-bundle_running_0
* Resource action: galera start on galera-bundle-0
* Resource action: galera start on galera-bundle-2
* Pseudo action: galera-bundle-master_running_0
* Resource action: galera-bundle-0 monitor=30000 on controller-0
* Resource action: galera-bundle-2 monitor=30000 on controller-1
* Pseudo action: redis-bundle-master_pre_notify_promote_0
* Pseudo action: redis-bundle_promote_0
* Pseudo action: galera-bundle_running_0
* Resource action: redis notify on redis-bundle-0
* Resource action: redis notify on redis-bundle-1
* Resource action: redis notify on redis-bundle-2
* Pseudo action: redis-bundle-master_confirmed-pre_notify_promote_0
* Pseudo action: redis-bundle-master_promote_0
* Pseudo action: galera-bundle_promote_0
* Pseudo action: galera-bundle-master_promote_0
* Resource action: redis promote on redis-bundle-0
* Pseudo action: redis-bundle-master_promoted_0
* Resource action: galera promote on galera-bundle-0
* Resource action: galera promote on galera-bundle-2
* Pseudo action: galera-bundle-master_promoted_0
* Pseudo action: redis-bundle-master_post_notify_promoted_0
* Pseudo action: galera-bundle_promoted_0
* Resource action: galera monitor=10000 on galera-bundle-0
* Resource action: galera monitor=10000 on galera-bundle-2
* Resource action: redis notify on redis-bundle-0
* Resource action: redis notify on redis-bundle-1
* Resource action: redis notify on redis-bundle-2
* Pseudo action: redis-bundle-master_confirmed-post_notify_promoted_0
* Pseudo action: redis-bundle_promoted_0
* Resource action: redis monitor=20000 on redis-bundle-0
* Resource action: redis monitor=60000 on redis-bundle-1
* Resource action: redis monitor=45000 on redis-bundle-1
Revised Cluster Status:
* Node List:
* Online: [ controller-0 controller-1 controller-2 ]
* RemoteOnline: [ database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ]
- * GuestOnline: [ galera-bundle-0@controller-0 galera-bundle-1@controller-2 galera-bundle-2@controller-1 rabbitmq-bundle-0@controller-2 rabbitmq-bundle-1@controller-2 rabbitmq-bundle-2@controller-2 redis-bundle-0@controller-0 redis-bundle-1@controller-1 redis-bundle-2@controller-2 ]
+ * GuestOnline: [ galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2 ]
* Full List of Resources:
* database-0 (ocf:pacemaker:remote): Started controller-0
* database-1 (ocf:pacemaker:remote): Started controller-2
* database-2 (ocf:pacemaker:remote): Started controller-1
* messaging-0 (ocf:pacemaker:remote): Started controller-2
* messaging-1 (ocf:pacemaker:remote): Started controller-2
* messaging-2 (ocf:pacemaker:remote): Started controller-2
* Container bundle set: rabbitmq-bundle [192.168.24.1:8787/rhosp12/openstack-rabbitmq-docker:pcmklatest]:
* rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Started messaging-0
* rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): Started messaging-1
* rabbitmq-bundle-2 (ocf:heartbeat:rabbitmq-cluster): Started messaging-2
* Container bundle set: galera-bundle [192.168.24.1:8787/rhosp12/openstack-mariadb-docker:pcmklatest]:
* galera-bundle-0 (ocf:heartbeat:galera): Promoted database-0
* galera-bundle-1 (ocf:heartbeat:galera): Promoted database-1
* galera-bundle-2 (ocf:heartbeat:galera): Promoted database-2
* Container bundle set: redis-bundle [192.168.24.1:8787/rhosp12/openstack-redis-docker:pcmklatest]:
* redis-bundle-0 (ocf:heartbeat:redis): Promoted controller-0
* redis-bundle-1 (ocf:heartbeat:redis): Unpromoted controller-1
* redis-bundle-2 (ocf:heartbeat:redis): Unpromoted controller-2
* ip-192.168.24.11 (ocf:heartbeat:IPaddr2): Started controller-0
* ip-10.0.0.104 (ocf:heartbeat:IPaddr2): Started controller-1
* ip-172.17.1.19 (ocf:heartbeat:IPaddr2): Started controller-2
* ip-172.17.1.11 (ocf:heartbeat:IPaddr2): Started controller-0
* ip-172.17.3.13 (ocf:heartbeat:IPaddr2): Started controller-1
* ip-172.17.4.19 (ocf:heartbeat:IPaddr2): Started controller-2
* Container bundle set: haproxy-bundle [192.168.24.1:8787/rhosp12/openstack-haproxy-docker:pcmklatest]:
* haproxy-bundle-docker-0 (ocf:heartbeat:docker): Started controller-0
* haproxy-bundle-docker-1 (ocf:heartbeat:docker): Started controller-1
* haproxy-bundle-docker-2 (ocf:heartbeat:docker): Started controller-2
* openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0
* stonith-fence_ipmilan-525400244e09 (stonith:fence_ipmilan): Started controller-2
* stonith-fence_ipmilan-525400cdec10 (stonith:fence_ipmilan): Started controller-2
* stonith-fence_ipmilan-525400c709f7 (stonith:fence_ipmilan): Started controller-1
* stonith-fence_ipmilan-525400a7f9e0 (stonith:fence_ipmilan): Started controller-0
* stonith-fence_ipmilan-525400a25787 (stonith:fence_ipmilan): Started controller-2
* stonith-fence_ipmilan-5254005ea387 (stonith:fence_ipmilan): Started controller-1
* stonith-fence_ipmilan-525400542c06 (stonith:fence_ipmilan): Started controller-0
* stonith-fence_ipmilan-525400aac413 (stonith:fence_ipmilan): Started controller-2
* stonith-fence_ipmilan-525400498d34 (stonith:fence_ipmilan): Started controller-1
diff --git a/cts/scheduler/summary/bundle-order-stop.summary b/cts/scheduler/summary/bundle-order-stop.summary
index 4313a6ce00..39eab8f93e 100644
--- a/cts/scheduler/summary/bundle-order-stop.summary
+++ b/cts/scheduler/summary/bundle-order-stop.summary
@@ -1,127 +1,127 @@
Current cluster status:
* Node List:
* Online: [ undercloud ]
- * GuestOnline: [ galera-bundle-0@undercloud rabbitmq-bundle-0@undercloud redis-bundle-0@undercloud ]
+ * GuestOnline: [ galera-bundle-0 rabbitmq-bundle-0 redis-bundle-0 ]
* Full List of Resources:
* Container bundle: rabbitmq-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-rabbitmq:latest]:
* rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Started undercloud
* Container bundle: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest]:
* galera-bundle-0 (ocf:heartbeat:galera): Promoted undercloud
* Container bundle: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest]:
* redis-bundle-0 (ocf:heartbeat:redis): Promoted undercloud
* ip-192.168.122.254 (ocf:heartbeat:IPaddr2): Started undercloud
* ip-192.168.122.250 (ocf:heartbeat:IPaddr2): Started undercloud
* ip-192.168.122.249 (ocf:heartbeat:IPaddr2): Started undercloud
* ip-192.168.122.253 (ocf:heartbeat:IPaddr2): Started undercloud
* ip-192.168.122.247 (ocf:heartbeat:IPaddr2): Started undercloud
* ip-192.168.122.248 (ocf:heartbeat:IPaddr2): Started undercloud
* Container bundle: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest]:
* haproxy-bundle-docker-0 (ocf:heartbeat:docker): Started undercloud
* Container bundle: openstack-cinder-volume [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-volume:latest]:
* openstack-cinder-volume-docker-0 (ocf:heartbeat:docker): Started undercloud
Transition Summary:
* Stop rabbitmq-bundle-docker-0 ( undercloud ) due to node availability
* Stop rabbitmq-bundle-0 ( undercloud ) due to node availability
* Stop rabbitmq:0 ( rabbitmq-bundle-0 ) due to unrunnable rabbitmq-bundle-0 start
* Stop galera-bundle-docker-0 ( undercloud ) due to node availability
* Stop galera-bundle-0 ( undercloud ) due to node availability
* Stop galera:0 ( Promoted galera-bundle-0 ) due to unrunnable galera-bundle-0 start
* Stop redis-bundle-docker-0 ( undercloud ) due to node availability
* Stop redis-bundle-0 ( undercloud ) due to node availability
* Stop redis:0 ( Promoted redis-bundle-0 ) due to unrunnable redis-bundle-0 start
* Stop ip-192.168.122.254 ( undercloud ) due to node availability
* Stop ip-192.168.122.250 ( undercloud ) due to node availability
* Stop ip-192.168.122.249 ( undercloud ) due to node availability
* Stop ip-192.168.122.253 ( undercloud ) due to node availability
* Stop ip-192.168.122.247 ( undercloud ) due to node availability
* Stop ip-192.168.122.248 ( undercloud ) due to node availability
* Stop haproxy-bundle-docker-0 ( undercloud ) due to node availability
* Stop openstack-cinder-volume-docker-0 ( undercloud ) due to node availability
Executing Cluster Transition:
* Pseudo action: rabbitmq-bundle-clone_pre_notify_stop_0
* Resource action: galera cancel=10000 on galera-bundle-0
* Resource action: redis cancel=20000 on redis-bundle-0
* Pseudo action: redis-bundle-master_pre_notify_demote_0
* Pseudo action: openstack-cinder-volume_stop_0
* Pseudo action: redis-bundle_demote_0
* Pseudo action: galera-bundle_demote_0
* Pseudo action: rabbitmq-bundle_stop_0
* Resource action: rabbitmq notify on rabbitmq-bundle-0
* Pseudo action: rabbitmq-bundle-clone_confirmed-pre_notify_stop_0
* Pseudo action: rabbitmq-bundle-clone_stop_0
* Pseudo action: galera-bundle-master_demote_0
* Resource action: redis notify on redis-bundle-0
* Pseudo action: redis-bundle-master_confirmed-pre_notify_demote_0
* Pseudo action: redis-bundle-master_demote_0
* Resource action: openstack-cinder-volume-docker-0 stop on undercloud
* Pseudo action: openstack-cinder-volume_stopped_0
* Resource action: rabbitmq stop on rabbitmq-bundle-0
* Pseudo action: rabbitmq-bundle-clone_stopped_0
* Resource action: rabbitmq-bundle-0 stop on undercloud
* Resource action: galera demote on galera-bundle-0
* Pseudo action: galera-bundle-master_demoted_0
* Resource action: redis demote on redis-bundle-0
* Pseudo action: redis-bundle-master_demoted_0
* Pseudo action: galera-bundle_demoted_0
* Pseudo action: galera-bundle_stop_0
* Pseudo action: rabbitmq-bundle-clone_post_notify_stopped_0
* Resource action: rabbitmq-bundle-docker-0 stop on undercloud
* Pseudo action: galera-bundle-master_stop_0
* Pseudo action: redis-bundle-master_post_notify_demoted_0
* Pseudo action: rabbitmq-bundle-clone_confirmed-post_notify_stopped_0
* Resource action: galera stop on galera-bundle-0
* Pseudo action: galera-bundle-master_stopped_0
* Resource action: galera-bundle-0 stop on undercloud
* Resource action: redis notify on redis-bundle-0
* Pseudo action: redis-bundle-master_confirmed-post_notify_demoted_0
* Pseudo action: redis-bundle-master_pre_notify_stop_0
* Pseudo action: redis-bundle_demoted_0
* Pseudo action: rabbitmq-bundle_stopped_0
* Resource action: galera-bundle-docker-0 stop on undercloud
* Resource action: redis notify on redis-bundle-0
* Pseudo action: redis-bundle-master_confirmed-pre_notify_stop_0
* Pseudo action: galera-bundle_stopped_0
* Pseudo action: redis-bundle_stop_0
* Pseudo action: redis-bundle-master_stop_0
* Resource action: redis stop on redis-bundle-0
* Pseudo action: redis-bundle-master_stopped_0
* Resource action: redis-bundle-0 stop on undercloud
* Pseudo action: redis-bundle-master_post_notify_stopped_0
* Resource action: redis-bundle-docker-0 stop on undercloud
* Pseudo action: redis-bundle-master_confirmed-post_notify_stopped_0
* Pseudo action: redis-bundle_stopped_0
* Pseudo action: haproxy-bundle_stop_0
* Resource action: haproxy-bundle-docker-0 stop on undercloud
* Pseudo action: haproxy-bundle_stopped_0
* Resource action: ip-192.168.122.254 stop on undercloud
* Resource action: ip-192.168.122.250 stop on undercloud
* Resource action: ip-192.168.122.249 stop on undercloud
* Resource action: ip-192.168.122.253 stop on undercloud
* Resource action: ip-192.168.122.247 stop on undercloud
* Resource action: ip-192.168.122.248 stop on undercloud
* Cluster action: do_shutdown on undercloud
Revised Cluster Status:
* Node List:
* Online: [ undercloud ]
* Full List of Resources:
* Container bundle: rabbitmq-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-rabbitmq:latest]:
* rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Stopped
* Container bundle: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest]:
* galera-bundle-0 (ocf:heartbeat:galera): Stopped
* Container bundle: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest]:
* redis-bundle-0 (ocf:heartbeat:redis): Stopped
* ip-192.168.122.254 (ocf:heartbeat:IPaddr2): Stopped
* ip-192.168.122.250 (ocf:heartbeat:IPaddr2): Stopped
* ip-192.168.122.249 (ocf:heartbeat:IPaddr2): Stopped
* ip-192.168.122.253 (ocf:heartbeat:IPaddr2): Stopped
* ip-192.168.122.247 (ocf:heartbeat:IPaddr2): Stopped
* ip-192.168.122.248 (ocf:heartbeat:IPaddr2): Stopped
* Container bundle: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest]:
* haproxy-bundle-docker-0 (ocf:heartbeat:docker): Stopped
* Container bundle: openstack-cinder-volume [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-volume:latest]:
* openstack-cinder-volume-docker-0 (ocf:heartbeat:docker): Stopped
diff --git a/cts/scheduler/summary/bundle-probe-order-2.summary b/cts/scheduler/summary/bundle-probe-order-2.summary
index 024c472ff9..aecc2a498b 100644
--- a/cts/scheduler/summary/bundle-probe-order-2.summary
+++ b/cts/scheduler/summary/bundle-probe-order-2.summary
@@ -1,34 +1,34 @@
Using the original execution date of: 2017-10-12 07:31:57Z
Current cluster status:
* Node List:
- * GuestNode galera-bundle-0@centos2: maintenance
+ * GuestNode galera-bundle-0: maintenance
* Online: [ centos1 centos2 centos3 ]
* Full List of Resources:
* Container bundle set: galera-bundle [docker.io/tripleoupstream/centos-binary-mariadb:latest] (unmanaged):
* galera-bundle-0 (ocf:heartbeat:galera): Stopped centos2 (unmanaged)
* galera-bundle-1 (ocf:heartbeat:galera): Stopped (unmanaged)
* galera-bundle-2 (ocf:heartbeat:galera): Stopped (unmanaged)
Transition Summary:
Executing Cluster Transition:
* Resource action: galera:0 monitor on galera-bundle-0
* Resource action: galera-bundle-docker-0 monitor=60000 on centos2
* Resource action: galera-bundle-0 monitor=30000 on centos2
* Resource action: galera-bundle-docker-1 monitor on centos2
* Resource action: galera-bundle-docker-2 monitor on centos3
* Resource action: galera-bundle-docker-2 monitor on centos2
* Resource action: galera-bundle-docker-2 monitor on centos1
Using the original execution date of: 2017-10-12 07:31:57Z
Revised Cluster Status:
* Node List:
- * GuestNode galera-bundle-0@centos2: maintenance
+ * GuestNode galera-bundle-0: maintenance
* Online: [ centos1 centos2 centos3 ]
* Full List of Resources:
* Container bundle set: galera-bundle [docker.io/tripleoupstream/centos-binary-mariadb:latest] (unmanaged):
* galera-bundle-0 (ocf:heartbeat:galera): Stopped centos2 (unmanaged)
* galera-bundle-1 (ocf:heartbeat:galera): Stopped (unmanaged)
* galera-bundle-2 (ocf:heartbeat:galera): Stopped (unmanaged)
diff --git a/cts/scheduler/summary/bundle-probe-remotes.summary b/cts/scheduler/summary/bundle-probe-remotes.summary
index 895510d7c6..1dd8523148 100644
--- a/cts/scheduler/summary/bundle-probe-remotes.summary
+++ b/cts/scheduler/summary/bundle-probe-remotes.summary
@@ -1,168 +1,168 @@
Current cluster status:
* Node List:
* Online: [ c09-h05-r630 c09-h06-r630 c09-h07-r630 ]
* RemoteOFFLINE: [ c09-h08-r630 c09-h09-r630 c09-h10-r630 ]
* Full List of Resources:
* c09-h08-r630 (ocf:pacemaker:remote): Stopped
* c09-h09-r630 (ocf:pacemaker:remote): Stopped
* c09-h10-r630 (ocf:pacemaker:remote): Stopped
* Container bundle set: scale1-bundle [beekhof:remote]:
* scale1-bundle-0 (ocf:pacemaker:Dummy): Stopped
* scale1-bundle-1 (ocf:pacemaker:Dummy): Stopped
* scale1-bundle-2 (ocf:pacemaker:Dummy): Stopped
* scale1-bundle-3 (ocf:pacemaker:Dummy): Stopped
* scale1-bundle-4 (ocf:pacemaker:Dummy): Stopped
* scale1-bundle-5 (ocf:pacemaker:Dummy): Stopped
Transition Summary:
* Start c09-h08-r630 ( c09-h05-r630 )
* Start c09-h09-r630 ( c09-h06-r630 )
* Start c09-h10-r630 ( c09-h07-r630 )
* Start scale1-bundle-docker-0 ( c09-h05-r630 )
* Start scale1-bundle-0 ( c09-h05-r630 )
* Start dummy1:0 ( scale1-bundle-0 )
* Start scale1-bundle-docker-1 ( c09-h06-r630 )
* Start scale1-bundle-1 ( c09-h06-r630 )
* Start dummy1:1 ( scale1-bundle-1 )
* Start scale1-bundle-docker-2 ( c09-h07-r630 )
* Start scale1-bundle-2 ( c09-h07-r630 )
* Start dummy1:2 ( scale1-bundle-2 )
* Start scale1-bundle-docker-3 ( c09-h08-r630 )
* Start scale1-bundle-3 ( c09-h05-r630 )
* Start dummy1:3 ( scale1-bundle-3 )
* Start scale1-bundle-docker-4 ( c09-h09-r630 )
* Start scale1-bundle-4 ( c09-h06-r630 )
* Start dummy1:4 ( scale1-bundle-4 )
* Start scale1-bundle-docker-5 ( c09-h10-r630 )
* Start scale1-bundle-5 ( c09-h07-r630 )
* Start dummy1:5 ( scale1-bundle-5 )
Executing Cluster Transition:
* Resource action: c09-h08-r630 monitor on c09-h07-r630
* Resource action: c09-h08-r630 monitor on c09-h06-r630
* Resource action: c09-h08-r630 monitor on c09-h05-r630
* Resource action: c09-h09-r630 monitor on c09-h07-r630
* Resource action: c09-h09-r630 monitor on c09-h06-r630
* Resource action: c09-h09-r630 monitor on c09-h05-r630
* Resource action: c09-h10-r630 monitor on c09-h07-r630
* Resource action: c09-h10-r630 monitor on c09-h06-r630
* Resource action: c09-h10-r630 monitor on c09-h05-r630
* Resource action: scale1-bundle-docker-0 monitor on c09-h07-r630
* Resource action: scale1-bundle-docker-0 monitor on c09-h06-r630
* Resource action: scale1-bundle-docker-0 monitor on c09-h05-r630
* Resource action: scale1-bundle-docker-1 monitor on c09-h07-r630
* Resource action: scale1-bundle-docker-1 monitor on c09-h06-r630
* Resource action: scale1-bundle-docker-1 monitor on c09-h05-r630
* Resource action: scale1-bundle-docker-2 monitor on c09-h07-r630
* Resource action: scale1-bundle-docker-2 monitor on c09-h06-r630
* Resource action: scale1-bundle-docker-2 monitor on c09-h05-r630
* Resource action: scale1-bundle-docker-3 monitor on c09-h07-r630
* Resource action: scale1-bundle-docker-3 monitor on c09-h06-r630
* Resource action: scale1-bundle-docker-3 monitor on c09-h05-r630
* Resource action: scale1-bundle-docker-4 monitor on c09-h07-r630
* Resource action: scale1-bundle-docker-4 monitor on c09-h06-r630
* Resource action: scale1-bundle-docker-4 monitor on c09-h05-r630
* Resource action: scale1-bundle-docker-5 monitor on c09-h07-r630
* Resource action: scale1-bundle-docker-5 monitor on c09-h06-r630
* Resource action: scale1-bundle-docker-5 monitor on c09-h05-r630
* Pseudo action: scale1-bundle_start_0
* Resource action: c09-h08-r630 start on c09-h05-r630
* Resource action: c09-h09-r630 start on c09-h06-r630
* Resource action: c09-h10-r630 start on c09-h07-r630
* Resource action: scale1-bundle-docker-0 monitor on c09-h10-r630
* Resource action: scale1-bundle-docker-0 monitor on c09-h09-r630
* Resource action: scale1-bundle-docker-0 monitor on c09-h08-r630
* Resource action: scale1-bundle-docker-1 monitor on c09-h10-r630
* Resource action: scale1-bundle-docker-1 monitor on c09-h09-r630
* Resource action: scale1-bundle-docker-1 monitor on c09-h08-r630
* Resource action: scale1-bundle-docker-2 monitor on c09-h10-r630
* Resource action: scale1-bundle-docker-2 monitor on c09-h09-r630
* Resource action: scale1-bundle-docker-2 monitor on c09-h08-r630
* Resource action: scale1-bundle-docker-3 monitor on c09-h10-r630
* Resource action: scale1-bundle-docker-3 monitor on c09-h09-r630
* Resource action: scale1-bundle-docker-3 monitor on c09-h08-r630
* Resource action: scale1-bundle-docker-4 monitor on c09-h10-r630
* Resource action: scale1-bundle-docker-4 monitor on c09-h09-r630
* Resource action: scale1-bundle-docker-4 monitor on c09-h08-r630
* Resource action: scale1-bundle-docker-5 monitor on c09-h10-r630
* Resource action: scale1-bundle-docker-5 monitor on c09-h09-r630
* Resource action: scale1-bundle-docker-5 monitor on c09-h08-r630
* Resource action: c09-h08-r630 monitor=60000 on c09-h05-r630
* Resource action: c09-h09-r630 monitor=60000 on c09-h06-r630
* Resource action: c09-h10-r630 monitor=60000 on c09-h07-r630
* Pseudo action: scale1-bundle-clone_start_0
* Resource action: scale1-bundle-docker-0 start on c09-h05-r630
* Resource action: scale1-bundle-0 monitor on c09-h07-r630
* Resource action: scale1-bundle-0 monitor on c09-h06-r630
* Resource action: scale1-bundle-0 monitor on c09-h05-r630
* Resource action: scale1-bundle-docker-1 start on c09-h06-r630
* Resource action: scale1-bundle-1 monitor on c09-h07-r630
* Resource action: scale1-bundle-1 monitor on c09-h06-r630
* Resource action: scale1-bundle-1 monitor on c09-h05-r630
* Resource action: scale1-bundle-docker-2 start on c09-h07-r630
* Resource action: scale1-bundle-2 monitor on c09-h07-r630
* Resource action: scale1-bundle-2 monitor on c09-h06-r630
* Resource action: scale1-bundle-2 monitor on c09-h05-r630
* Resource action: scale1-bundle-docker-3 start on c09-h08-r630
* Resource action: scale1-bundle-3 monitor on c09-h07-r630
* Resource action: scale1-bundle-3 monitor on c09-h06-r630
* Resource action: scale1-bundle-3 monitor on c09-h05-r630
* Resource action: scale1-bundle-docker-4 start on c09-h09-r630
* Resource action: scale1-bundle-4 monitor on c09-h07-r630
* Resource action: scale1-bundle-4 monitor on c09-h06-r630
* Resource action: scale1-bundle-4 monitor on c09-h05-r630
* Resource action: scale1-bundle-docker-5 start on c09-h10-r630
* Resource action: scale1-bundle-5 monitor on c09-h07-r630
* Resource action: scale1-bundle-5 monitor on c09-h06-r630
* Resource action: scale1-bundle-5 monitor on c09-h05-r630
* Resource action: scale1-bundle-docker-0 monitor=60000 on c09-h05-r630
* Resource action: scale1-bundle-0 start on c09-h05-r630
* Resource action: scale1-bundle-docker-1 monitor=60000 on c09-h06-r630
* Resource action: scale1-bundle-1 start on c09-h06-r630
* Resource action: scale1-bundle-docker-2 monitor=60000 on c09-h07-r630
* Resource action: scale1-bundle-2 start on c09-h07-r630
* Resource action: scale1-bundle-docker-3 monitor=60000 on c09-h08-r630
* Resource action: scale1-bundle-3 start on c09-h05-r630
* Resource action: scale1-bundle-docker-4 monitor=60000 on c09-h09-r630
* Resource action: scale1-bundle-4 start on c09-h06-r630
* Resource action: scale1-bundle-docker-5 monitor=60000 on c09-h10-r630
* Resource action: scale1-bundle-5 start on c09-h07-r630
* Resource action: dummy1:0 start on scale1-bundle-0
* Resource action: dummy1:1 start on scale1-bundle-1
* Resource action: dummy1:2 start on scale1-bundle-2
* Resource action: dummy1:3 start on scale1-bundle-3
* Resource action: dummy1:4 start on scale1-bundle-4
* Resource action: dummy1:5 start on scale1-bundle-5
* Pseudo action: scale1-bundle-clone_running_0
* Resource action: scale1-bundle-0 monitor=30000 on c09-h05-r630
* Resource action: scale1-bundle-1 monitor=30000 on c09-h06-r630
* Resource action: scale1-bundle-2 monitor=30000 on c09-h07-r630
* Resource action: scale1-bundle-3 monitor=30000 on c09-h05-r630
* Resource action: scale1-bundle-4 monitor=30000 on c09-h06-r630
* Resource action: scale1-bundle-5 monitor=30000 on c09-h07-r630
* Pseudo action: scale1-bundle_running_0
* Resource action: dummy1:0 monitor=10000 on scale1-bundle-0
* Resource action: dummy1:1 monitor=10000 on scale1-bundle-1
* Resource action: dummy1:2 monitor=10000 on scale1-bundle-2
* Resource action: dummy1:3 monitor=10000 on scale1-bundle-3
* Resource action: dummy1:4 monitor=10000 on scale1-bundle-4
* Resource action: dummy1:5 monitor=10000 on scale1-bundle-5
Revised Cluster Status:
* Node List:
* Online: [ c09-h05-r630 c09-h06-r630 c09-h07-r630 ]
* RemoteOnline: [ c09-h08-r630 c09-h09-r630 c09-h10-r630 ]
- * GuestOnline: [ scale1-bundle-0@c09-h05-r630 scale1-bundle-1@c09-h06-r630 scale1-bundle-2@c09-h07-r630 scale1-bundle-3@c09-h05-r630 scale1-bundle-4@c09-h06-r630 scale1-bundle-5@c09-h07-r630 ]
+ * GuestOnline: [ scale1-bundle-0 scale1-bundle-1 scale1-bundle-2 scale1-bundle-3 scale1-bundle-4 scale1-bundle-5 ]
* Full List of Resources:
* c09-h08-r630 (ocf:pacemaker:remote): Started c09-h05-r630
* c09-h09-r630 (ocf:pacemaker:remote): Started c09-h06-r630
* c09-h10-r630 (ocf:pacemaker:remote): Started c09-h07-r630
* Container bundle set: scale1-bundle [beekhof:remote]:
* scale1-bundle-0 (ocf:pacemaker:Dummy): Started c09-h05-r630
* scale1-bundle-1 (ocf:pacemaker:Dummy): Started c09-h06-r630
* scale1-bundle-2 (ocf:pacemaker:Dummy): Started c09-h07-r630
* scale1-bundle-3 (ocf:pacemaker:Dummy): Started c09-h08-r630
* scale1-bundle-4 (ocf:pacemaker:Dummy): Started c09-h09-r630
* scale1-bundle-5 (ocf:pacemaker:Dummy): Started c09-h10-r630
diff --git a/cts/scheduler/summary/bundle-replicas-change.summary b/cts/scheduler/summary/bundle-replicas-change.summary
index c90a7bbcb9..5cc92f3ead 100644
--- a/cts/scheduler/summary/bundle-replicas-change.summary
+++ b/cts/scheduler/summary/bundle-replicas-change.summary
@@ -1,77 +1,77 @@
Current cluster status:
* Node List:
* Online: [ rh74-test ]
- * GuestOnline: [ httpd-bundle-0@rh74-test ]
+ * GuestOnline: [ httpd-bundle-0 ]
* Full List of Resources:
* Container bundle set: httpd-bundle [pcmktest:http] (unique):
* httpd-bundle-0 (192.168.20.188) (ocf:heartbeat:apache): Stopped rh74-test
* httpd-bundle-1 (192.168.20.189) (ocf:heartbeat:apache): Stopped
* httpd-bundle-2 (192.168.20.190) (ocf:heartbeat:apache): Stopped
* httpd (ocf:heartbeat:apache): ORPHANED Started httpd-bundle-0
Transition Summary:
* Restart httpd-bundle-docker-0 ( rh74-test )
* Restart httpd-bundle-0 ( rh74-test ) due to required httpd-bundle-docker-0 start
* Start httpd:0 ( httpd-bundle-0 )
* Start httpd-bundle-ip-192.168.20.189 ( rh74-test )
* Start httpd-bundle-docker-1 ( rh74-test )
* Start httpd-bundle-1 ( rh74-test )
* Start httpd:1 ( httpd-bundle-1 )
* Start httpd-bundle-ip-192.168.20.190 ( rh74-test )
* Start httpd-bundle-docker-2 ( rh74-test )
* Start httpd-bundle-2 ( rh74-test )
* Start httpd:2 ( httpd-bundle-2 )
* Stop httpd ( httpd-bundle-0 ) due to node availability
Executing Cluster Transition:
* Resource action: httpd-bundle-ip-192.168.20.189 monitor on rh74-test
* Resource action: httpd-bundle-docker-1 monitor on rh74-test
* Resource action: httpd-bundle-ip-192.168.20.190 monitor on rh74-test
* Resource action: httpd-bundle-docker-2 monitor on rh74-test
* Resource action: httpd stop on httpd-bundle-0
* Pseudo action: httpd-bundle_stop_0
* Pseudo action: httpd-bundle_start_0
* Resource action: httpd-bundle-0 stop on rh74-test
* Resource action: httpd-bundle-ip-192.168.20.189 start on rh74-test
* Resource action: httpd-bundle-docker-1 start on rh74-test
* Resource action: httpd-bundle-1 monitor on rh74-test
* Resource action: httpd-bundle-ip-192.168.20.190 start on rh74-test
* Resource action: httpd-bundle-docker-2 start on rh74-test
* Resource action: httpd-bundle-2 monitor on rh74-test
* Resource action: httpd-bundle-docker-0 stop on rh74-test
* Resource action: httpd-bundle-docker-0 start on rh74-test
* Resource action: httpd-bundle-docker-0 monitor=60000 on rh74-test
* Resource action: httpd-bundle-0 start on rh74-test
* Resource action: httpd-bundle-0 monitor=30000 on rh74-test
* Resource action: httpd-bundle-ip-192.168.20.189 monitor=60000 on rh74-test
* Resource action: httpd-bundle-docker-1 monitor=60000 on rh74-test
* Resource action: httpd-bundle-1 start on rh74-test
* Resource action: httpd-bundle-ip-192.168.20.190 monitor=60000 on rh74-test
* Resource action: httpd-bundle-docker-2 monitor=60000 on rh74-test
* Resource action: httpd-bundle-2 start on rh74-test
* Resource action: httpd delete on httpd-bundle-0
* Pseudo action: httpd-bundle_stopped_0
* Resource action: httpd:0 monitor on httpd-bundle-0
* Pseudo action: httpd-bundle-clone_start_0
* Resource action: httpd-bundle-1 monitor=30000 on rh74-test
* Resource action: httpd-bundle-2 monitor=30000 on rh74-test
* Resource action: httpd:0 start on httpd-bundle-0
* Resource action: httpd:1 start on httpd-bundle-1
* Resource action: httpd:2 start on httpd-bundle-2
* Pseudo action: httpd-bundle-clone_running_0
* Pseudo action: httpd-bundle_running_0
* Resource action: httpd:0 monitor=10000 on httpd-bundle-0
* Resource action: httpd:1 monitor=10000 on httpd-bundle-1
* Resource action: httpd:2 monitor=10000 on httpd-bundle-2
Revised Cluster Status:
* Node List:
* Online: [ rh74-test ]
- * GuestOnline: [ httpd-bundle-0@rh74-test httpd-bundle-1@rh74-test httpd-bundle-2@rh74-test ]
+ * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ]
* Full List of Resources:
* Container bundle set: httpd-bundle [pcmktest:http] (unique):
* httpd-bundle-0 (192.168.20.188) (ocf:heartbeat:apache): Started rh74-test
* httpd-bundle-1 (192.168.20.189) (ocf:heartbeat:apache): Started rh74-test
* httpd-bundle-2 (192.168.20.190) (ocf:heartbeat:apache): Started rh74-test
diff --git a/cts/scheduler/summary/cancel-behind-moving-remote.summary b/cts/scheduler/summary/cancel-behind-moving-remote.summary
index 00524c893d..7a708d2556 100644
--- a/cts/scheduler/summary/cancel-behind-moving-remote.summary
+++ b/cts/scheduler/summary/cancel-behind-moving-remote.summary
@@ -1,211 +1,211 @@
Using the original execution date of: 2021-02-15 01:40:51Z
Current cluster status:
* Node List:
* Online: [ controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-2 ]
* OFFLINE: [ messaging-1 ]
* RemoteOnline: [ compute-0 compute-1 ]
- * GuestOnline: [ galera-bundle-0@database-0 galera-bundle-1@database-1 galera-bundle-2@database-2 ovn-dbs-bundle-1@controller-2 ovn-dbs-bundle-2@controller-1 rabbitmq-bundle-0@messaging-0 rabbitmq-bundle-2@messaging-2 redis-bundle-0@controller-2 redis-bundle-1@controller-0 redis-bundle-2@controller-1 ]
+ * GuestOnline: [ galera-bundle-0 galera-bundle-1 galera-bundle-2 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2 ]
* Full List of Resources:
* compute-0 (ocf:pacemaker:remote): Started controller-1
* compute-1 (ocf:pacemaker:remote): Started controller-2
* Container bundle set: galera-bundle [cluster.common.tag/rhosp16-openstack-mariadb:pcmklatest]:
* galera-bundle-0 (ocf:heartbeat:galera): Promoted database-0
* galera-bundle-1 (ocf:heartbeat:galera): Promoted database-1
* galera-bundle-2 (ocf:heartbeat:galera): Promoted database-2
* Container bundle set: rabbitmq-bundle [cluster.common.tag/rhosp16-openstack-rabbitmq:pcmklatest]:
* rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Started messaging-0
* rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): Stopped
* rabbitmq-bundle-2 (ocf:heartbeat:rabbitmq-cluster): Started messaging-2
* Container bundle set: redis-bundle [cluster.common.tag/rhosp16-openstack-redis:pcmklatest]:
* redis-bundle-0 (ocf:heartbeat:redis): Promoted controller-2
* redis-bundle-1 (ocf:heartbeat:redis): Unpromoted controller-0
* redis-bundle-2 (ocf:heartbeat:redis): Unpromoted controller-1
* ip-192.168.24.150 (ocf:heartbeat:IPaddr2): Started controller-1
* ip-10.0.0.150 (ocf:heartbeat:IPaddr2): Started controller-2
* ip-172.17.1.151 (ocf:heartbeat:IPaddr2): Started controller-1
* ip-172.17.1.150 (ocf:heartbeat:IPaddr2): Started controller-1
* ip-172.17.3.150 (ocf:heartbeat:IPaddr2): Started controller-1
* ip-172.17.4.150 (ocf:heartbeat:IPaddr2): Started controller-2
* Container bundle set: haproxy-bundle [cluster.common.tag/rhosp16-openstack-haproxy:pcmklatest]:
* haproxy-bundle-podman-0 (ocf:heartbeat:podman): Started controller-2
* haproxy-bundle-podman-1 (ocf:heartbeat:podman): Started controller-0
* haproxy-bundle-podman-2 (ocf:heartbeat:podman): Started controller-1
* Container bundle set: ovn-dbs-bundle [cluster.common.tag/rhosp16-openstack-ovn-northd:pcmklatest]:
* ovn-dbs-bundle-0 (ocf:ovn:ovndb-servers): Stopped
* ovn-dbs-bundle-1 (ocf:ovn:ovndb-servers): Unpromoted controller-2
* ovn-dbs-bundle-2 (ocf:ovn:ovndb-servers): Unpromoted controller-1
* ip-172.17.1.87 (ocf:heartbeat:IPaddr2): Stopped
* stonith-fence_compute-fence-nova (stonith:fence_compute): Started database-1
* Clone Set: compute-unfence-trigger-clone [compute-unfence-trigger]:
* Started: [ compute-0 compute-1 ]
* Stopped: [ controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ]
* nova-evacuate (ocf:openstack:NovaEvacuate): Started database-2
* stonith-fence_ipmilan-525400aa1373 (stonith:fence_ipmilan): Started messaging-0
* stonith-fence_ipmilan-525400dc23e0 (stonith:fence_ipmilan): Started messaging-2
* stonith-fence_ipmilan-52540040bb56 (stonith:fence_ipmilan): Started messaging-2
* stonith-fence_ipmilan-525400addd38 (stonith:fence_ipmilan): Started messaging-0
* stonith-fence_ipmilan-52540078fb07 (stonith:fence_ipmilan): Started database-0
* stonith-fence_ipmilan-525400ea59b0 (stonith:fence_ipmilan): Started database-1
* stonith-fence_ipmilan-525400066e50 (stonith:fence_ipmilan): Started database-2
* stonith-fence_ipmilan-525400e1534e (stonith:fence_ipmilan): Started database-1
* stonith-fence_ipmilan-52540060dbba (stonith:fence_ipmilan): Started database-2
* stonith-fence_ipmilan-525400e018b6 (stonith:fence_ipmilan): Started database-0
* stonith-fence_ipmilan-525400c87cdb (stonith:fence_ipmilan): Started messaging-0
* Container bundle: openstack-cinder-volume [cluster.common.tag/rhosp16-openstack-cinder-volume:pcmklatest]:
* openstack-cinder-volume-podman-0 (ocf:heartbeat:podman): Started controller-2
Transition Summary:
* Start rabbitmq-bundle-1 ( controller-0 ) due to unrunnable rabbitmq-bundle-podman-1 start (blocked)
* Start rabbitmq:1 ( rabbitmq-bundle-1 ) due to unrunnable rabbitmq-bundle-podman-1 start (blocked)
* Start ovn-dbs-bundle-podman-0 ( controller-2 )
* Start ovn-dbs-bundle-0 ( controller-2 )
* Start ovndb_servers:0 ( ovn-dbs-bundle-0 )
* Move ovn-dbs-bundle-podman-1 ( controller-2 -> controller-0 )
* Move ovn-dbs-bundle-1 ( controller-2 -> controller-0 )
* Restart ovndb_servers:1 ( Unpromoted -> Promoted ovn-dbs-bundle-1 ) due to required ovn-dbs-bundle-podman-1 start
* Start ip-172.17.1.87 ( controller-0 )
* Move stonith-fence_ipmilan-52540040bb56 ( messaging-2 -> database-0 )
* Move stonith-fence_ipmilan-525400e1534e ( database-1 -> messaging-2 )
Executing Cluster Transition:
* Pseudo action: rabbitmq-bundle-clone_pre_notify_start_0
* Resource action: ovndb_servers cancel=30000 on ovn-dbs-bundle-1
* Pseudo action: ovn-dbs-bundle-master_pre_notify_stop_0
* Cluster action: clear_failcount for ovn-dbs-bundle-0 on controller-0
* Cluster action: clear_failcount for ovn-dbs-bundle-1 on controller-2
* Cluster action: clear_failcount for stonith-fence_compute-fence-nova on messaging-0
* Cluster action: clear_failcount for nova-evacuate on messaging-0
* Cluster action: clear_failcount for stonith-fence_ipmilan-525400aa1373 on database-0
* Cluster action: clear_failcount for stonith-fence_ipmilan-525400dc23e0 on database-2
* Resource action: stonith-fence_ipmilan-52540040bb56 stop on messaging-2
* Cluster action: clear_failcount for stonith-fence_ipmilan-52540078fb07 on messaging-2
* Cluster action: clear_failcount for stonith-fence_ipmilan-525400ea59b0 on database-0
* Cluster action: clear_failcount for stonith-fence_ipmilan-525400066e50 on messaging-2
* Resource action: stonith-fence_ipmilan-525400e1534e stop on database-1
* Cluster action: clear_failcount for stonith-fence_ipmilan-525400e1534e on database-2
* Cluster action: clear_failcount for stonith-fence_ipmilan-52540060dbba on messaging-0
* Cluster action: clear_failcount for stonith-fence_ipmilan-525400e018b6 on database-0
* Cluster action: clear_failcount for stonith-fence_ipmilan-525400c87cdb on database-2
* Pseudo action: ovn-dbs-bundle_stop_0
* Pseudo action: rabbitmq-bundle_start_0
* Pseudo action: rabbitmq-bundle-clone_confirmed-pre_notify_start_0
* Pseudo action: rabbitmq-bundle-clone_start_0
* Resource action: ovndb_servers notify on ovn-dbs-bundle-1
* Resource action: ovndb_servers notify on ovn-dbs-bundle-2
* Pseudo action: ovn-dbs-bundle-master_confirmed-pre_notify_stop_0
* Pseudo action: ovn-dbs-bundle-master_stop_0
* Resource action: stonith-fence_ipmilan-52540040bb56 start on database-0
* Resource action: stonith-fence_ipmilan-525400e1534e start on messaging-2
* Pseudo action: rabbitmq-bundle-clone_running_0
* Resource action: ovndb_servers stop on ovn-dbs-bundle-1
* Pseudo action: ovn-dbs-bundle-master_stopped_0
* Resource action: ovn-dbs-bundle-1 stop on controller-2
* Resource action: stonith-fence_ipmilan-52540040bb56 monitor=60000 on database-0
* Resource action: stonith-fence_ipmilan-525400e1534e monitor=60000 on messaging-2
* Pseudo action: rabbitmq-bundle-clone_post_notify_running_0
* Pseudo action: ovn-dbs-bundle-master_post_notify_stopped_0
* Resource action: ovn-dbs-bundle-podman-1 stop on controller-2
* Pseudo action: rabbitmq-bundle-clone_confirmed-post_notify_running_0
* Resource action: ovndb_servers notify on ovn-dbs-bundle-2
* Pseudo action: ovn-dbs-bundle-master_confirmed-post_notify_stopped_0
* Pseudo action: ovn-dbs-bundle-master_pre_notify_start_0
* Pseudo action: ovn-dbs-bundle_stopped_0
* Pseudo action: ovn-dbs-bundle_start_0
* Pseudo action: rabbitmq-bundle_running_0
* Resource action: ovndb_servers notify on ovn-dbs-bundle-2
* Pseudo action: ovn-dbs-bundle-master_confirmed-pre_notify_start_0
* Pseudo action: ovn-dbs-bundle-master_start_0
* Resource action: ovn-dbs-bundle-podman-0 start on controller-2
* Resource action: ovn-dbs-bundle-0 start on controller-2
* Resource action: ovn-dbs-bundle-podman-1 start on controller-0
* Resource action: ovn-dbs-bundle-1 start on controller-0
* Resource action: ovndb_servers start on ovn-dbs-bundle-0
* Resource action: ovndb_servers start on ovn-dbs-bundle-1
* Pseudo action: ovn-dbs-bundle-master_running_0
* Resource action: ovn-dbs-bundle-podman-0 monitor=60000 on controller-2
* Resource action: ovn-dbs-bundle-0 monitor=30000 on controller-2
* Resource action: ovn-dbs-bundle-podman-1 monitor=60000 on controller-0
* Resource action: ovn-dbs-bundle-1 monitor=30000 on controller-0
* Pseudo action: ovn-dbs-bundle-master_post_notify_running_0
* Resource action: ovndb_servers notify on ovn-dbs-bundle-0
* Resource action: ovndb_servers notify on ovn-dbs-bundle-1
* Resource action: ovndb_servers notify on ovn-dbs-bundle-2
* Pseudo action: ovn-dbs-bundle-master_confirmed-post_notify_running_0
* Pseudo action: ovn-dbs-bundle_running_0
* Pseudo action: ovn-dbs-bundle-master_pre_notify_promote_0
* Pseudo action: ovn-dbs-bundle_promote_0
* Resource action: ovndb_servers notify on ovn-dbs-bundle-0
* Resource action: ovndb_servers notify on ovn-dbs-bundle-1
* Resource action: ovndb_servers notify on ovn-dbs-bundle-2
* Pseudo action: ovn-dbs-bundle-master_confirmed-pre_notify_promote_0
* Pseudo action: ovn-dbs-bundle-master_promote_0
* Resource action: ovndb_servers promote on ovn-dbs-bundle-1
* Pseudo action: ovn-dbs-bundle-master_promoted_0
* Pseudo action: ovn-dbs-bundle-master_post_notify_promoted_0
* Resource action: ovndb_servers notify on ovn-dbs-bundle-0
* Resource action: ovndb_servers notify on ovn-dbs-bundle-1
* Resource action: ovndb_servers notify on ovn-dbs-bundle-2
* Pseudo action: ovn-dbs-bundle-master_confirmed-post_notify_promoted_0
* Pseudo action: ovn-dbs-bundle_promoted_0
* Resource action: ovndb_servers monitor=30000 on ovn-dbs-bundle-0
* Resource action: ovndb_servers monitor=10000 on ovn-dbs-bundle-1
* Resource action: ip-172.17.1.87 start on controller-0
* Resource action: ip-172.17.1.87 monitor=10000 on controller-0
Using the original execution date of: 2021-02-15 01:40:51Z
Revised Cluster Status:
* Node List:
* Online: [ controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-2 ]
* OFFLINE: [ messaging-1 ]
* RemoteOnline: [ compute-0 compute-1 ]
- * GuestOnline: [ galera-bundle-0@database-0 galera-bundle-1@database-1 galera-bundle-2@database-2 ovn-dbs-bundle-0@controller-2 ovn-dbs-bundle-1@controller-0 ovn-dbs-bundle-2@controller-1 rabbitmq-bundle-0@messaging-0 rabbitmq-bundle-2@messaging-2 redis-bundle-0@controller-2 redis-bundle-1@controller-0 redis-bundle-2@controller-1 ]
+ * GuestOnline: [ galera-bundle-0 galera-bundle-1 galera-bundle-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2 ]
* Full List of Resources:
* compute-0 (ocf:pacemaker:remote): Started controller-1
* compute-1 (ocf:pacemaker:remote): Started controller-2
* Container bundle set: galera-bundle [cluster.common.tag/rhosp16-openstack-mariadb:pcmklatest]:
* galera-bundle-0 (ocf:heartbeat:galera): Promoted database-0
* galera-bundle-1 (ocf:heartbeat:galera): Promoted database-1
* galera-bundle-2 (ocf:heartbeat:galera): Promoted database-2
* Container bundle set: rabbitmq-bundle [cluster.common.tag/rhosp16-openstack-rabbitmq:pcmklatest]:
* rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Started messaging-0
* rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): Stopped
* rabbitmq-bundle-2 (ocf:heartbeat:rabbitmq-cluster): Started messaging-2
* Container bundle set: redis-bundle [cluster.common.tag/rhosp16-openstack-redis:pcmklatest]:
* redis-bundle-0 (ocf:heartbeat:redis): Promoted controller-2
* redis-bundle-1 (ocf:heartbeat:redis): Unpromoted controller-0
* redis-bundle-2 (ocf:heartbeat:redis): Unpromoted controller-1
* ip-192.168.24.150 (ocf:heartbeat:IPaddr2): Started controller-1
* ip-10.0.0.150 (ocf:heartbeat:IPaddr2): Started controller-2
* ip-172.17.1.151 (ocf:heartbeat:IPaddr2): Started controller-1
* ip-172.17.1.150 (ocf:heartbeat:IPaddr2): Started controller-1
* ip-172.17.3.150 (ocf:heartbeat:IPaddr2): Started controller-1
* ip-172.17.4.150 (ocf:heartbeat:IPaddr2): Started controller-2
* Container bundle set: haproxy-bundle [cluster.common.tag/rhosp16-openstack-haproxy:pcmklatest]:
* haproxy-bundle-podman-0 (ocf:heartbeat:podman): Started controller-2
* haproxy-bundle-podman-1 (ocf:heartbeat:podman): Started controller-0
* haproxy-bundle-podman-2 (ocf:heartbeat:podman): Started controller-1
* Container bundle set: ovn-dbs-bundle [cluster.common.tag/rhosp16-openstack-ovn-northd:pcmklatest]:
* ovn-dbs-bundle-0 (ocf:ovn:ovndb-servers): Unpromoted controller-2
* ovn-dbs-bundle-1 (ocf:ovn:ovndb-servers): Promoted controller-0
* ovn-dbs-bundle-2 (ocf:ovn:ovndb-servers): Unpromoted controller-1
* ip-172.17.1.87 (ocf:heartbeat:IPaddr2): Started controller-0
* stonith-fence_compute-fence-nova (stonith:fence_compute): Started database-1
* Clone Set: compute-unfence-trigger-clone [compute-unfence-trigger]:
* Started: [ compute-0 compute-1 ]
* Stopped: [ controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ]
* nova-evacuate (ocf:openstack:NovaEvacuate): Started database-2
* stonith-fence_ipmilan-525400aa1373 (stonith:fence_ipmilan): Started messaging-0
* stonith-fence_ipmilan-525400dc23e0 (stonith:fence_ipmilan): Started messaging-2
* stonith-fence_ipmilan-52540040bb56 (stonith:fence_ipmilan): Started database-0
* stonith-fence_ipmilan-525400addd38 (stonith:fence_ipmilan): Started messaging-0
* stonith-fence_ipmilan-52540078fb07 (stonith:fence_ipmilan): Started database-0
* stonith-fence_ipmilan-525400ea59b0 (stonith:fence_ipmilan): Started database-1
* stonith-fence_ipmilan-525400066e50 (stonith:fence_ipmilan): Started database-2
* stonith-fence_ipmilan-525400e1534e (stonith:fence_ipmilan): Started messaging-2
* stonith-fence_ipmilan-52540060dbba (stonith:fence_ipmilan): Started database-2
* stonith-fence_ipmilan-525400e018b6 (stonith:fence_ipmilan): Started database-0
* stonith-fence_ipmilan-525400c87cdb (stonith:fence_ipmilan): Started messaging-0
* Container bundle: openstack-cinder-volume [cluster.common.tag/rhosp16-openstack-cinder-volume:pcmklatest]:
* openstack-cinder-volume-podman-0 (ocf:heartbeat:podman): Started controller-2
diff --git a/cts/scheduler/summary/clbz5007-promotable-colocation.summary b/cts/scheduler/summary/clbz5007-promotable-colocation.summary
index 98b2d75ca5..58348bc77f 100644
--- a/cts/scheduler/summary/clbz5007-promotable-colocation.summary
+++ b/cts/scheduler/summary/clbz5007-promotable-colocation.summary
@@ -1,31 +1,31 @@
Current cluster status:
* Node List:
* Online: [ fc16-builder fc16-builder2 ]
* Full List of Resources:
* Clone Set: MS_DUMMY [DUMMY] (promotable):
* Promoted: [ fc16-builder ]
* Unpromoted: [ fc16-builder2 ]
* UNPROMOTED_IP (ocf:pacemaker:Dummy): Started fc16-builder
* PROMOTED_IP (ocf:pacemaker:Dummy): Started fc16-builder2
Transition Summary:
* Move UNPROMOTED_IP ( fc16-builder -> fc16-builder2 )
- * Move PROMOTED_IP ( fc16-builder2 -> fc16-builder )
+ * Move PROMOTED_IP ( fc16-builder2 -> fc16-builder )
Executing Cluster Transition:
* Resource action: UNPROMOTED_IP stop on fc16-builder
* Resource action: PROMOTED_IP stop on fc16-builder2
* Resource action: UNPROMOTED_IP start on fc16-builder2
* Resource action: PROMOTED_IP start on fc16-builder
Revised Cluster Status:
* Node List:
* Online: [ fc16-builder fc16-builder2 ]
* Full List of Resources:
* Clone Set: MS_DUMMY [DUMMY] (promotable):
* Promoted: [ fc16-builder ]
* Unpromoted: [ fc16-builder2 ]
* UNPROMOTED_IP (ocf:pacemaker:Dummy): Started fc16-builder2
* PROMOTED_IP (ocf:pacemaker:Dummy): Started fc16-builder
diff --git a/cts/scheduler/summary/colocation-influence.summary b/cts/scheduler/summary/colocation-influence.summary
index 3ea8b3f545..e240003d92 100644
--- a/cts/scheduler/summary/colocation-influence.summary
+++ b/cts/scheduler/summary/colocation-influence.summary
@@ -1,170 +1,170 @@
Current cluster status:
* Node List:
* Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ]
- * GuestOnline: [ bundle10-0@rhel7-2 bundle10-1@rhel7-3 bundle11-0@rhel7-1 ]
+ * GuestOnline: [ bundle10-0 bundle10-1 bundle11-0 ]
* Full List of Resources:
* Fencing (stonith:fence_xvm): Started rhel7-1
* rsc1a (ocf:pacemaker:Dummy): Started rhel7-2
* rsc1b (ocf:pacemaker:Dummy): Started rhel7-2
* rsc2a (ocf:pacemaker:Dummy): Started rhel7-4
* rsc2b (ocf:pacemaker:Dummy): Started rhel7-4
* rsc3a (ocf:pacemaker:Dummy): Stopped
* rsc3b (ocf:pacemaker:Dummy): Stopped
* rsc4a (ocf:pacemaker:Dummy): Started rhel7-3
* rsc4b (ocf:pacemaker:Dummy): Started rhel7-3
* rsc5a (ocf:pacemaker:Dummy): Started rhel7-1
* Resource Group: group5a:
* rsc5a1 (ocf:pacemaker:Dummy): Started rhel7-1
* rsc5a2 (ocf:pacemaker:Dummy): Started rhel7-1
* Resource Group: group6a:
* rsc6a1 (ocf:pacemaker:Dummy): Started rhel7-2
* rsc6a2 (ocf:pacemaker:Dummy): Started rhel7-2
* rsc6a (ocf:pacemaker:Dummy): Started rhel7-2
* Resource Group: group7a:
* rsc7a1 (ocf:pacemaker:Dummy): Started rhel7-3
* rsc7a2 (ocf:pacemaker:Dummy): Started rhel7-3
* Clone Set: rsc8a-clone [rsc8a]:
* Started: [ rhel7-1 rhel7-3 rhel7-4 ]
* Clone Set: rsc8b-clone [rsc8b]:
* Started: [ rhel7-1 rhel7-3 rhel7-4 ]
* rsc9a (ocf:pacemaker:Dummy): Started rhel7-4
* rsc9b (ocf:pacemaker:Dummy): Started rhel7-4
* rsc9c (ocf:pacemaker:Dummy): Started rhel7-4
* rsc10a (ocf:pacemaker:Dummy): Started rhel7-2
* rsc11a (ocf:pacemaker:Dummy): Started rhel7-1
* rsc12a (ocf:pacemaker:Dummy): Started rhel7-1
* rsc12b (ocf:pacemaker:Dummy): Started rhel7-1
* rsc12c (ocf:pacemaker:Dummy): Started rhel7-1
* Container bundle set: bundle10 [pcmktest:http]:
* bundle10-0 (192.168.122.131) (ocf:heartbeat:apache): Started rhel7-2
* bundle10-1 (192.168.122.132) (ocf:heartbeat:apache): Started rhel7-3
* Container bundle set: bundle11 [pcmktest:http]:
* bundle11-0 (192.168.122.134) (ocf:pacemaker:Dummy): Started rhel7-1
* bundle11-1 (192.168.122.135) (ocf:pacemaker:Dummy): Stopped
* rsc13a (ocf:pacemaker:Dummy): Started rhel7-3
* Clone Set: rsc13b-clone [rsc13b] (promotable):
* Promoted: [ rhel7-3 ]
* Unpromoted: [ rhel7-1 rhel7-2 rhel7-4 ]
* Stopped: [ rhel7-5 ]
* rsc14b (ocf:pacemaker:Dummy): Started rhel7-4
* Clone Set: rsc14a-clone [rsc14a] (promotable):
* Promoted: [ rhel7-4 ]
* Unpromoted: [ rhel7-1 rhel7-2 rhel7-3 ]
* Stopped: [ rhel7-5 ]
Transition Summary:
* Move rsc1a ( rhel7-2 -> rhel7-3 )
* Move rsc1b ( rhel7-2 -> rhel7-3 )
* Stop rsc2a ( rhel7-4 ) due to node availability
* Start rsc3a ( rhel7-2 )
* Start rsc3b ( rhel7-2 )
* Stop rsc4a ( rhel7-3 ) due to node availability
* Stop rsc5a ( rhel7-1 ) due to node availability
* Stop rsc6a1 ( rhel7-2 ) due to node availability
* Stop rsc6a2 ( rhel7-2 ) due to node availability
* Stop rsc7a2 ( rhel7-3 ) due to node availability
* Stop rsc8a:1 ( rhel7-4 ) due to node availability
* Stop rsc9c ( rhel7-4 ) due to node availability
* Move rsc10a ( rhel7-2 -> rhel7-3 )
* Stop rsc12b ( rhel7-1 ) due to node availability
* Start bundle11-1 ( rhel7-5 ) due to unrunnable bundle11-docker-1 start (blocked)
* Start bundle11a:1 ( bundle11-1 ) due to unrunnable bundle11-docker-1 start (blocked)
* Stop rsc13a ( rhel7-3 ) due to node availability
* Stop rsc14a:1 ( Promoted rhel7-4 ) due to node availability
Executing Cluster Transition:
* Resource action: rsc1a stop on rhel7-2
* Resource action: rsc1b stop on rhel7-2
* Resource action: rsc2a stop on rhel7-4
* Resource action: rsc3a start on rhel7-2
* Resource action: rsc3b start on rhel7-2
* Resource action: rsc4a stop on rhel7-3
* Resource action: rsc5a stop on rhel7-1
* Pseudo action: group6a_stop_0
* Resource action: rsc6a2 stop on rhel7-2
* Pseudo action: group7a_stop_0
* Resource action: rsc7a2 stop on rhel7-3
* Pseudo action: rsc8a-clone_stop_0
* Resource action: rsc9c stop on rhel7-4
* Resource action: rsc10a stop on rhel7-2
* Resource action: rsc12b stop on rhel7-1
* Resource action: rsc13a stop on rhel7-3
* Pseudo action: rsc14a-clone_demote_0
* Pseudo action: bundle11_start_0
* Resource action: rsc1a start on rhel7-3
* Resource action: rsc1b start on rhel7-3
* Resource action: rsc3a monitor=10000 on rhel7-2
* Resource action: rsc3b monitor=10000 on rhel7-2
* Resource action: rsc6a1 stop on rhel7-2
* Pseudo action: group7a_stopped_0
* Resource action: rsc8a stop on rhel7-4
* Pseudo action: rsc8a-clone_stopped_0
* Resource action: rsc10a start on rhel7-3
* Pseudo action: bundle11-clone_start_0
* Resource action: rsc14a demote on rhel7-4
* Pseudo action: rsc14a-clone_demoted_0
* Pseudo action: rsc14a-clone_stop_0
* Resource action: rsc1a monitor=10000 on rhel7-3
* Resource action: rsc1b monitor=10000 on rhel7-3
* Pseudo action: group6a_stopped_0
* Resource action: rsc10a monitor=10000 on rhel7-3
* Pseudo action: bundle11-clone_running_0
* Resource action: rsc14a stop on rhel7-4
* Pseudo action: rsc14a-clone_stopped_0
* Pseudo action: bundle11_running_0
Revised Cluster Status:
* Node List:
* Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ]
- * GuestOnline: [ bundle10-0@rhel7-2 bundle10-1@rhel7-3 bundle11-0@rhel7-1 ]
+ * GuestOnline: [ bundle10-0 bundle10-1 bundle11-0 ]
* Full List of Resources:
* Fencing (stonith:fence_xvm): Started rhel7-1
* rsc1a (ocf:pacemaker:Dummy): Started rhel7-3
* rsc1b (ocf:pacemaker:Dummy): Started rhel7-3
* rsc2a (ocf:pacemaker:Dummy): Stopped
* rsc2b (ocf:pacemaker:Dummy): Started rhel7-4
* rsc3a (ocf:pacemaker:Dummy): Started rhel7-2
* rsc3b (ocf:pacemaker:Dummy): Started rhel7-2
* rsc4a (ocf:pacemaker:Dummy): Stopped
* rsc4b (ocf:pacemaker:Dummy): Started rhel7-3
* rsc5a (ocf:pacemaker:Dummy): Stopped
* Resource Group: group5a:
* rsc5a1 (ocf:pacemaker:Dummy): Started rhel7-1
* rsc5a2 (ocf:pacemaker:Dummy): Started rhel7-1
* Resource Group: group6a:
* rsc6a1 (ocf:pacemaker:Dummy): Stopped
* rsc6a2 (ocf:pacemaker:Dummy): Stopped
* rsc6a (ocf:pacemaker:Dummy): Started rhel7-2
* Resource Group: group7a:
* rsc7a1 (ocf:pacemaker:Dummy): Started rhel7-3
* rsc7a2 (ocf:pacemaker:Dummy): Stopped
* Clone Set: rsc8a-clone [rsc8a]:
* Started: [ rhel7-1 rhel7-3 ]
* Stopped: [ rhel7-2 rhel7-4 rhel7-5 ]
* Clone Set: rsc8b-clone [rsc8b]:
* Started: [ rhel7-1 rhel7-3 rhel7-4 ]
* rsc9a (ocf:pacemaker:Dummy): Started rhel7-4
* rsc9b (ocf:pacemaker:Dummy): Started rhel7-4
* rsc9c (ocf:pacemaker:Dummy): Stopped
* rsc10a (ocf:pacemaker:Dummy): Started rhel7-3
* rsc11a (ocf:pacemaker:Dummy): Started rhel7-1
* rsc12a (ocf:pacemaker:Dummy): Started rhel7-1
* rsc12b (ocf:pacemaker:Dummy): Stopped
* rsc12c (ocf:pacemaker:Dummy): Started rhel7-1
* Container bundle set: bundle10 [pcmktest:http]:
* bundle10-0 (192.168.122.131) (ocf:heartbeat:apache): Started rhel7-2
* bundle10-1 (192.168.122.132) (ocf:heartbeat:apache): Started rhel7-3
* Container bundle set: bundle11 [pcmktest:http]:
* bundle11-0 (192.168.122.134) (ocf:pacemaker:Dummy): Started rhel7-1
* bundle11-1 (192.168.122.135) (ocf:pacemaker:Dummy): Stopped
* rsc13a (ocf:pacemaker:Dummy): Stopped
* Clone Set: rsc13b-clone [rsc13b] (promotable):
* Promoted: [ rhel7-3 ]
* Unpromoted: [ rhel7-1 rhel7-2 rhel7-4 ]
* Stopped: [ rhel7-5 ]
* rsc14b (ocf:pacemaker:Dummy): Started rhel7-4
* Clone Set: rsc14a-clone [rsc14a] (promotable):
* Unpromoted: [ rhel7-1 rhel7-2 rhel7-3 ]
* Stopped: [ rhel7-4 rhel7-5 ]
diff --git a/cts/scheduler/summary/container-is-remote-node.summary b/cts/scheduler/summary/container-is-remote-node.summary
index 7310bf4ec7..c022e896f4 100644
--- a/cts/scheduler/summary/container-is-remote-node.summary
+++ b/cts/scheduler/summary/container-is-remote-node.summary
@@ -1,59 +1,59 @@
3 of 19 resource instances DISABLED and 0 BLOCKED from further action due to failure
Current cluster status:
* Node List:
* Online: [ lama2 lama3 ]
- * GuestOnline: [ RNVM1@lama2 ]
+ * GuestOnline: [ RNVM1 ]
* Full List of Resources:
* restofencelama2 (stonith:fence_ipmilan): Started lama3
* restofencelama3 (stonith:fence_ipmilan): Started lama2
* Clone Set: dlm-clone [dlm]:
* Started: [ lama2 lama3 ]
* Stopped: [ RNVM1 ]
* Clone Set: clvmd-clone [clvmd]:
* Started: [ lama2 lama3 ]
* Stopped: [ RNVM1 ]
* Clone Set: gfs2-lv_1_1-clone [gfs2-lv_1_1]:
* Started: [ lama2 lama3 ]
* Stopped: [ RNVM1 ]
* Clone Set: gfs2-lv_1_2-clone [gfs2-lv_1_2] (disabled):
* Stopped (disabled): [ lama2 lama3 RNVM1 ]
* VM1 (ocf:heartbeat:VirtualDomain): Started lama2
* Resource Group: RES1:
* FSdata1 (ocf:heartbeat:Filesystem): Started RNVM1
* RES1-IP (ocf:heartbeat:IPaddr2): Started RNVM1
* res-rsyslog (ocf:heartbeat:rsyslog.test): Started RNVM1
Transition Summary:
Executing Cluster Transition:
* Resource action: dlm monitor on RNVM1
* Resource action: clvmd monitor on RNVM1
* Resource action: gfs2-lv_1_1 monitor on RNVM1
* Resource action: gfs2-lv_1_2 monitor on RNVM1
Revised Cluster Status:
* Node List:
* Online: [ lama2 lama3 ]
- * GuestOnline: [ RNVM1@lama2 ]
+ * GuestOnline: [ RNVM1 ]
* Full List of Resources:
* restofencelama2 (stonith:fence_ipmilan): Started lama3
* restofencelama3 (stonith:fence_ipmilan): Started lama2
* Clone Set: dlm-clone [dlm]:
* Started: [ lama2 lama3 ]
* Stopped: [ RNVM1 ]
* Clone Set: clvmd-clone [clvmd]:
* Started: [ lama2 lama3 ]
* Stopped: [ RNVM1 ]
* Clone Set: gfs2-lv_1_1-clone [gfs2-lv_1_1]:
* Started: [ lama2 lama3 ]
* Stopped: [ RNVM1 ]
* Clone Set: gfs2-lv_1_2-clone [gfs2-lv_1_2] (disabled):
* Stopped (disabled): [ lama2 lama3 RNVM1 ]
* VM1 (ocf:heartbeat:VirtualDomain): Started lama2
* Resource Group: RES1:
* FSdata1 (ocf:heartbeat:Filesystem): Started RNVM1
* RES1-IP (ocf:heartbeat:IPaddr2): Started RNVM1
* res-rsyslog (ocf:heartbeat:rsyslog.test): Started RNVM1
diff --git a/cts/scheduler/summary/guest-host-not-fenceable.summary b/cts/scheduler/summary/guest-host-not-fenceable.summary
index e17d21f0f2..9e3b5db405 100644
--- a/cts/scheduler/summary/guest-host-not-fenceable.summary
+++ b/cts/scheduler/summary/guest-host-not-fenceable.summary
@@ -1,91 +1,91 @@
Using the original execution date of: 2019-08-26 04:52:42Z
Current cluster status:
* Node List:
* Node node2: UNCLEAN (offline)
* Node node3: UNCLEAN (offline)
* Online: [ node1 ]
- * GuestOnline: [ galera-bundle-0@node1 rabbitmq-bundle-0@node1 ]
+ * GuestOnline: [ galera-bundle-0 rabbitmq-bundle-0 ]
* Full List of Resources:
* Container bundle set: rabbitmq-bundle [192.168.122.139:8787/rhosp13/openstack-rabbitmq:pcmklatest]:
* rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Started node1
* rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): FAILED node2 (UNCLEAN)
* rabbitmq-bundle-2 (ocf:heartbeat:rabbitmq-cluster): FAILED node3 (UNCLEAN)
* Container bundle set: galera-bundle [192.168.122.139:8787/rhosp13/openstack-mariadb:pcmklatest]:
* galera-bundle-0 (ocf:heartbeat:galera): FAILED Promoted node1
* galera-bundle-1 (ocf:heartbeat:galera): FAILED Promoted node2 (UNCLEAN)
* galera-bundle-2 (ocf:heartbeat:galera): FAILED Promoted node3 (UNCLEAN)
* stonith-fence_ipmilan-node1 (stonith:fence_ipmilan): Started node2 (UNCLEAN)
* stonith-fence_ipmilan-node3 (stonith:fence_ipmilan): Started node2 (UNCLEAN)
* stonith-fence_ipmilan-node2 (stonith:fence_ipmilan): Started node3 (UNCLEAN)
Transition Summary:
* Stop rabbitmq-bundle-docker-0 ( node1 ) due to no quorum
* Stop rabbitmq-bundle-0 ( node1 ) due to no quorum
* Stop rabbitmq:0 ( rabbitmq-bundle-0 ) due to no quorum
* Stop rabbitmq-bundle-docker-1 ( node2 ) due to node availability (blocked)
* Stop rabbitmq-bundle-1 ( node2 ) due to no quorum (blocked)
* Stop rabbitmq:1 ( rabbitmq-bundle-1 ) due to no quorum (blocked)
* Stop rabbitmq-bundle-docker-2 ( node3 ) due to node availability (blocked)
* Stop rabbitmq-bundle-2 ( node3 ) due to no quorum (blocked)
* Stop rabbitmq:2 ( rabbitmq-bundle-2 ) due to no quorum (blocked)
* Stop galera-bundle-docker-0 ( node1 ) due to no quorum
* Stop galera-bundle-0 ( node1 ) due to no quorum
* Stop galera:0 ( Promoted galera-bundle-0 ) due to no quorum
* Stop galera-bundle-docker-1 ( node2 ) due to node availability (blocked)
* Stop galera-bundle-1 ( node2 ) due to no quorum (blocked)
* Stop galera:1 ( Promoted galera-bundle-1 ) due to no quorum (blocked)
* Stop galera-bundle-docker-2 ( node3 ) due to node availability (blocked)
* Stop galera-bundle-2 ( node3 ) due to no quorum (blocked)
* Stop galera:2 ( Promoted galera-bundle-2 ) due to no quorum (blocked)
* Stop stonith-fence_ipmilan-node1 ( node2 ) due to node availability (blocked)
* Stop stonith-fence_ipmilan-node3 ( node2 ) due to no quorum (blocked)
* Stop stonith-fence_ipmilan-node2 ( node3 ) due to no quorum (blocked)
Executing Cluster Transition:
* Pseudo action: rabbitmq-bundle-clone_pre_notify_stop_0
* Pseudo action: galera-bundle_demote_0
* Pseudo action: rabbitmq-bundle_stop_0
* Resource action: rabbitmq notify on rabbitmq-bundle-0
* Pseudo action: rabbitmq-bundle-clone_confirmed-pre_notify_stop_0
* Pseudo action: rabbitmq-bundle-clone_stop_0
* Pseudo action: galera-bundle-master_demote_0
* Resource action: rabbitmq stop on rabbitmq-bundle-0
* Pseudo action: rabbitmq-bundle-clone_stopped_0
* Resource action: rabbitmq-bundle-0 stop on node1
* Resource action: rabbitmq-bundle-0 cancel=60000 on node1
* Resource action: galera demote on galera-bundle-0
* Pseudo action: galera-bundle-master_demoted_0
* Pseudo action: galera-bundle_demoted_0
* Pseudo action: galera-bundle_stop_0
* Pseudo action: rabbitmq-bundle-clone_post_notify_stopped_0
* Resource action: rabbitmq-bundle-docker-0 stop on node1
* Pseudo action: galera-bundle-master_stop_0
* Pseudo action: rabbitmq-bundle-clone_confirmed-post_notify_stopped_0
* Resource action: galera stop on galera-bundle-0
* Pseudo action: galera-bundle-master_stopped_0
* Resource action: galera-bundle-0 stop on node1
* Resource action: galera-bundle-0 cancel=60000 on node1
* Pseudo action: rabbitmq-bundle_stopped_0
* Resource action: galera-bundle-docker-0 stop on node1
* Pseudo action: galera-bundle_stopped_0
Using the original execution date of: 2019-08-26 04:52:42Z
Revised Cluster Status:
* Node List:
* Node node2: UNCLEAN (offline)
* Node node3: UNCLEAN (offline)
* Online: [ node1 ]
* Full List of Resources:
* Container bundle set: rabbitmq-bundle [192.168.122.139:8787/rhosp13/openstack-rabbitmq:pcmklatest]:
* rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Stopped
* rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): FAILED node2 (UNCLEAN)
* rabbitmq-bundle-2 (ocf:heartbeat:rabbitmq-cluster): FAILED node3 (UNCLEAN)
* Container bundle set: galera-bundle [192.168.122.139:8787/rhosp13/openstack-mariadb:pcmklatest]:
* galera-bundle-0 (ocf:heartbeat:galera): Stopped
* galera-bundle-1 (ocf:heartbeat:galera): FAILED Promoted node2 (UNCLEAN)
* galera-bundle-2 (ocf:heartbeat:galera): FAILED Promoted node3 (UNCLEAN)
* stonith-fence_ipmilan-node1 (stonith:fence_ipmilan): Started node2 (UNCLEAN)
* stonith-fence_ipmilan-node3 (stonith:fence_ipmilan): Started node2 (UNCLEAN)
* stonith-fence_ipmilan-node2 (stonith:fence_ipmilan): Started node3 (UNCLEAN)
diff --git a/cts/scheduler/summary/guest-node-cleanup.summary b/cts/scheduler/summary/guest-node-cleanup.summary
index 4298619820..f68fb4fa44 100644
--- a/cts/scheduler/summary/guest-node-cleanup.summary
+++ b/cts/scheduler/summary/guest-node-cleanup.summary
@@ -1,55 +1,55 @@
Using the original execution date of: 2018-10-15 16:02:04Z
Current cluster status:
* Node List:
* Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ]
- * GuestOnline: [ lxc2@rhel7-1 ]
+ * GuestOnline: [ lxc2 ]
* Full List of Resources:
* Fencing (stonith:fence_xvm): Started rhel7-2
* FencingPass (stonith:fence_dummy): Started rhel7-3
* container1 (ocf:heartbeat:VirtualDomain): FAILED
* container2 (ocf:heartbeat:VirtualDomain): Started rhel7-1
* Clone Set: lxc-ms-master [lxc-ms] (promotable):
* Unpromoted: [ lxc2 ]
* Stopped: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ]
Transition Summary:
* Fence (reboot) lxc1 (resource: container1) 'guest is unclean'
* Start container1 ( rhel7-1 )
* Recover lxc-ms:1 ( Promoted lxc1 )
* Restart lxc1 ( rhel7-1 ) due to required container1 start
Executing Cluster Transition:
* Resource action: container1 monitor on rhel7-1
* Pseudo action: lxc-ms-master_demote_0
* Resource action: lxc1 stop on rhel7-1
* Pseudo action: stonith-lxc1-reboot on lxc1
* Resource action: container1 start on rhel7-1
* Pseudo action: lxc-ms_demote_0
* Pseudo action: lxc-ms-master_demoted_0
* Pseudo action: lxc-ms-master_stop_0
* Resource action: lxc1 start on rhel7-1
* Resource action: lxc1 monitor=30000 on rhel7-1
* Pseudo action: lxc-ms_stop_0
* Pseudo action: lxc-ms-master_stopped_0
* Pseudo action: lxc-ms-master_start_0
* Resource action: lxc-ms start on lxc1
* Pseudo action: lxc-ms-master_running_0
* Pseudo action: lxc-ms-master_promote_0
* Resource action: lxc-ms promote on lxc1
* Pseudo action: lxc-ms-master_promoted_0
Using the original execution date of: 2018-10-15 16:02:04Z
Revised Cluster Status:
* Node List:
* Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ]
- * GuestOnline: [ lxc1@rhel7-1 lxc2@rhel7-1 ]
+ * GuestOnline: [ lxc1 lxc2 ]
* Full List of Resources:
* Fencing (stonith:fence_xvm): Started rhel7-2
* FencingPass (stonith:fence_dummy): Started rhel7-3
* container1 (ocf:heartbeat:VirtualDomain): Started rhel7-1
* container2 (ocf:heartbeat:VirtualDomain): Started rhel7-1
* Clone Set: lxc-ms-master [lxc-ms] (promotable):
* Promoted: [ lxc1 ]
* Unpromoted: [ lxc2 ]
diff --git a/cts/scheduler/summary/guest-node-host-dies.summary b/cts/scheduler/summary/guest-node-host-dies.summary
index b0286b2846..84074c1f0a 100644
--- a/cts/scheduler/summary/guest-node-host-dies.summary
+++ b/cts/scheduler/summary/guest-node-host-dies.summary
@@ -1,82 +1,82 @@
Current cluster status:
* Node List:
* Node rhel7-1: UNCLEAN (offline)
* Online: [ rhel7-2 rhel7-3 rhel7-4 rhel7-5 ]
* Full List of Resources:
* Fencing (stonith:fence_xvm): Started rhel7-4
* rsc_rhel7-1 (ocf:heartbeat:IPaddr2): Started rhel7-1 (UNCLEAN)
* container1 (ocf:heartbeat:VirtualDomain): FAILED rhel7-1 (UNCLEAN)
* container2 (ocf:heartbeat:VirtualDomain): FAILED rhel7-1 (UNCLEAN)
* Clone Set: lxc-ms-master [lxc-ms] (promotable):
* Stopped: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ]
Transition Summary:
* Fence (reboot) lxc2 (resource: container2) 'guest is unclean'
* Fence (reboot) lxc1 (resource: container1) 'guest is unclean'
* Fence (reboot) rhel7-1 'rsc_rhel7-1 is thought to be active there'
* Restart Fencing ( rhel7-4 ) due to resource definition change
* Move rsc_rhel7-1 ( rhel7-1 -> rhel7-5 )
* Recover container1 ( rhel7-1 -> rhel7-2 )
* Recover container2 ( rhel7-1 -> rhel7-3 )
* Recover lxc-ms:0 ( Promoted lxc1 )
* Recover lxc-ms:1 ( Unpromoted lxc2 )
* Move lxc1 ( rhel7-1 -> rhel7-2 )
* Move lxc2 ( rhel7-1 -> rhel7-3 )
Executing Cluster Transition:
* Resource action: Fencing stop on rhel7-4
* Pseudo action: lxc-ms-master_demote_0
* Pseudo action: lxc1_stop_0
* Resource action: lxc1 monitor on rhel7-5
* Resource action: lxc1 monitor on rhel7-4
* Resource action: lxc1 monitor on rhel7-3
* Pseudo action: lxc2_stop_0
* Resource action: lxc2 monitor on rhel7-5
* Resource action: lxc2 monitor on rhel7-4
* Resource action: lxc2 monitor on rhel7-2
* Fencing rhel7-1 (reboot)
* Pseudo action: rsc_rhel7-1_stop_0
* Pseudo action: container1_stop_0
* Pseudo action: container2_stop_0
* Pseudo action: stonith-lxc2-reboot on lxc2
* Pseudo action: stonith-lxc1-reboot on lxc1
* Resource action: Fencing start on rhel7-4
* Resource action: Fencing monitor=120000 on rhel7-4
* Resource action: rsc_rhel7-1 start on rhel7-5
* Resource action: container1 start on rhel7-2
* Resource action: container2 start on rhel7-3
* Pseudo action: lxc-ms_demote_0
* Pseudo action: lxc-ms-master_demoted_0
* Pseudo action: lxc-ms-master_stop_0
* Resource action: lxc1 start on rhel7-2
* Resource action: lxc2 start on rhel7-3
* Resource action: rsc_rhel7-1 monitor=5000 on rhel7-5
* Pseudo action: lxc-ms_stop_0
* Pseudo action: lxc-ms_stop_0
* Pseudo action: lxc-ms-master_stopped_0
* Pseudo action: lxc-ms-master_start_0
* Resource action: lxc1 monitor=30000 on rhel7-2
* Resource action: lxc2 monitor=30000 on rhel7-3
* Resource action: lxc-ms start on lxc1
* Resource action: lxc-ms start on lxc2
* Pseudo action: lxc-ms-master_running_0
* Resource action: lxc-ms monitor=10000 on lxc2
* Pseudo action: lxc-ms-master_promote_0
* Resource action: lxc-ms promote on lxc1
* Pseudo action: lxc-ms-master_promoted_0
Revised Cluster Status:
* Node List:
* Online: [ rhel7-2 rhel7-3 rhel7-4 rhel7-5 ]
* OFFLINE: [ rhel7-1 ]
- * GuestOnline: [ lxc1@rhel7-2 lxc2@rhel7-3 ]
+ * GuestOnline: [ lxc1 lxc2 ]
* Full List of Resources:
* Fencing (stonith:fence_xvm): Started rhel7-4
* rsc_rhel7-1 (ocf:heartbeat:IPaddr2): Started rhel7-5
* container1 (ocf:heartbeat:VirtualDomain): Started rhel7-2
* container2 (ocf:heartbeat:VirtualDomain): Started rhel7-3
* Clone Set: lxc-ms-master [lxc-ms] (promotable):
* Promoted: [ lxc1 ]
* Unpromoted: [ lxc2 ]
diff --git a/cts/scheduler/summary/nested-remote-recovery.summary b/cts/scheduler/summary/nested-remote-recovery.summary
index 0274d2d876..fd3ccd7613 100644
--- a/cts/scheduler/summary/nested-remote-recovery.summary
+++ b/cts/scheduler/summary/nested-remote-recovery.summary
@@ -1,131 +1,131 @@
Using the original execution date of: 2018-09-11 21:23:25Z
Current cluster status:
* Node List:
* Online: [ controller-0 controller-1 controller-2 ]
* RemoteOnline: [ database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ]
- * GuestOnline: [ galera-bundle-1@controller-1 galera-bundle-2@controller-2 rabbitmq-bundle-0@controller-2 rabbitmq-bundle-1@controller-1 rabbitmq-bundle-2@controller-1 redis-bundle-0@controller-0 redis-bundle-1@controller-1 redis-bundle-2@controller-2 ]
+ * GuestOnline: [ galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2 ]
* Full List of Resources:
* database-0 (ocf:pacemaker:remote): Started controller-0
* database-1 (ocf:pacemaker:remote): Started controller-1
* database-2 (ocf:pacemaker:remote): Started controller-2
* messaging-0 (ocf:pacemaker:remote): Started controller-2
* messaging-1 (ocf:pacemaker:remote): Started controller-1
* messaging-2 (ocf:pacemaker:remote): Started controller-1
* Container bundle set: galera-bundle [192.168.24.1:8787/rhosp13/openstack-mariadb:pcmklatest]:
* galera-bundle-0 (ocf:heartbeat:galera): FAILED Promoted database-0
* galera-bundle-1 (ocf:heartbeat:galera): Promoted database-1
* galera-bundle-2 (ocf:heartbeat:galera): Promoted database-2
* Container bundle set: rabbitmq-bundle [192.168.24.1:8787/rhosp13/openstack-rabbitmq:pcmklatest]:
* rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Started messaging-0
* rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): Started messaging-1
* rabbitmq-bundle-2 (ocf:heartbeat:rabbitmq-cluster): Started messaging-2
* Container bundle set: redis-bundle [192.168.24.1:8787/rhosp13/openstack-redis:pcmklatest]:
* redis-bundle-0 (ocf:heartbeat:redis): Unpromoted controller-0
* redis-bundle-1 (ocf:heartbeat:redis): Promoted controller-1
* redis-bundle-2 (ocf:heartbeat:redis): Unpromoted controller-2
* ip-192.168.24.12 (ocf:heartbeat:IPaddr2): Started controller-1
* ip-10.0.0.109 (ocf:heartbeat:IPaddr2): Started controller-2
* ip-172.17.1.18 (ocf:heartbeat:IPaddr2): Started controller-1
* ip-172.17.1.12 (ocf:heartbeat:IPaddr2): Started controller-2
* ip-172.17.3.18 (ocf:heartbeat:IPaddr2): Started controller-1
* ip-172.17.4.14 (ocf:heartbeat:IPaddr2): Started controller-1
* Container bundle set: haproxy-bundle [192.168.24.1:8787/rhosp13/openstack-haproxy:pcmklatest]:
* haproxy-bundle-docker-0 (ocf:heartbeat:docker): Started controller-0
* haproxy-bundle-docker-1 (ocf:heartbeat:docker): Started controller-1
* haproxy-bundle-docker-2 (ocf:heartbeat:docker): Started controller-2
* Container bundle: openstack-cinder-volume [192.168.24.1:8787/rhosp13/openstack-cinder-volume:pcmklatest]:
* openstack-cinder-volume-docker-0 (ocf:heartbeat:docker): Started controller-0
* stonith-fence_ipmilan-5254005f9a33 (stonith:fence_ipmilan): Started controller-2
* stonith-fence_ipmilan-52540098c9ff (stonith:fence_ipmilan): Started controller-1
* stonith-fence_ipmilan-5254000203a2 (stonith:fence_ipmilan): Started controller-2
* stonith-fence_ipmilan-5254003296a5 (stonith:fence_ipmilan): Started controller-1
* stonith-fence_ipmilan-52540066e27e (stonith:fence_ipmilan): Started controller-1
* stonith-fence_ipmilan-52540065418e (stonith:fence_ipmilan): Started controller-2
* stonith-fence_ipmilan-525400aab9d9 (stonith:fence_ipmilan): Started controller-2
* stonith-fence_ipmilan-525400a16c0d (stonith:fence_ipmilan): Started controller-1
* stonith-fence_ipmilan-5254002f6d57 (stonith:fence_ipmilan): Started controller-1
Transition Summary:
* Fence (reboot) galera-bundle-0 (resource: galera-bundle-docker-0) 'guest is unclean'
* Recover galera-bundle-docker-0 ( database-0 )
* Recover galera-bundle-0 ( controller-0 )
* Recover galera:0 ( Promoted galera-bundle-0 )
Executing Cluster Transition:
* Resource action: galera-bundle-0 stop on controller-0
* Pseudo action: galera-bundle_demote_0
* Pseudo action: galera-bundle-master_demote_0
* Pseudo action: galera_demote_0
* Pseudo action: galera-bundle-master_demoted_0
* Pseudo action: galera-bundle_demoted_0
* Pseudo action: galera-bundle_stop_0
* Resource action: galera-bundle-docker-0 stop on database-0
* Pseudo action: stonith-galera-bundle-0-reboot on galera-bundle-0
* Pseudo action: galera-bundle-master_stop_0
* Pseudo action: galera_stop_0
* Pseudo action: galera-bundle-master_stopped_0
* Pseudo action: galera-bundle_stopped_0
* Pseudo action: galera-bundle_start_0
* Pseudo action: galera-bundle-master_start_0
* Resource action: galera-bundle-docker-0 start on database-0
* Resource action: galera-bundle-docker-0 monitor=60000 on database-0
* Resource action: galera-bundle-0 start on controller-0
* Resource action: galera-bundle-0 monitor=30000 on controller-0
* Resource action: galera start on galera-bundle-0
* Pseudo action: galera-bundle-master_running_0
* Pseudo action: galera-bundle_running_0
* Pseudo action: galera-bundle_promote_0
* Pseudo action: galera-bundle-master_promote_0
* Resource action: galera promote on galera-bundle-0
* Pseudo action: galera-bundle-master_promoted_0
* Pseudo action: galera-bundle_promoted_0
* Resource action: galera monitor=10000 on galera-bundle-0
Using the original execution date of: 2018-09-11 21:23:25Z
Revised Cluster Status:
* Node List:
* Online: [ controller-0 controller-1 controller-2 ]
* RemoteOnline: [ database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ]
- * GuestOnline: [ galera-bundle-0@controller-0 galera-bundle-1@controller-1 galera-bundle-2@controller-2 rabbitmq-bundle-0@controller-2 rabbitmq-bundle-1@controller-1 rabbitmq-bundle-2@controller-1 redis-bundle-0@controller-0 redis-bundle-1@controller-1 redis-bundle-2@controller-2 ]
+ * GuestOnline: [ galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2 ]
* Full List of Resources:
* database-0 (ocf:pacemaker:remote): Started controller-0
* database-1 (ocf:pacemaker:remote): Started controller-1
* database-2 (ocf:pacemaker:remote): Started controller-2
* messaging-0 (ocf:pacemaker:remote): Started controller-2
* messaging-1 (ocf:pacemaker:remote): Started controller-1
* messaging-2 (ocf:pacemaker:remote): Started controller-1
* Container bundle set: galera-bundle [192.168.24.1:8787/rhosp13/openstack-mariadb:pcmklatest]:
* galera-bundle-0 (ocf:heartbeat:galera): Promoted database-0
* galera-bundle-1 (ocf:heartbeat:galera): Promoted database-1
* galera-bundle-2 (ocf:heartbeat:galera): Promoted database-2
* Container bundle set: rabbitmq-bundle [192.168.24.1:8787/rhosp13/openstack-rabbitmq:pcmklatest]:
* rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Started messaging-0
* rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): Started messaging-1
* rabbitmq-bundle-2 (ocf:heartbeat:rabbitmq-cluster): Started messaging-2
* Container bundle set: redis-bundle [192.168.24.1:8787/rhosp13/openstack-redis:pcmklatest]:
* redis-bundle-0 (ocf:heartbeat:redis): Unpromoted controller-0
* redis-bundle-1 (ocf:heartbeat:redis): Promoted controller-1
* redis-bundle-2 (ocf:heartbeat:redis): Unpromoted controller-2
* ip-192.168.24.12 (ocf:heartbeat:IPaddr2): Started controller-1
* ip-10.0.0.109 (ocf:heartbeat:IPaddr2): Started controller-2
* ip-172.17.1.18 (ocf:heartbeat:IPaddr2): Started controller-1
* ip-172.17.1.12 (ocf:heartbeat:IPaddr2): Started controller-2
* ip-172.17.3.18 (ocf:heartbeat:IPaddr2): Started controller-1
* ip-172.17.4.14 (ocf:heartbeat:IPaddr2): Started controller-1
* Container bundle set: haproxy-bundle [192.168.24.1:8787/rhosp13/openstack-haproxy:pcmklatest]:
* haproxy-bundle-docker-0 (ocf:heartbeat:docker): Started controller-0
* haproxy-bundle-docker-1 (ocf:heartbeat:docker): Started controller-1
* haproxy-bundle-docker-2 (ocf:heartbeat:docker): Started controller-2
* Container bundle: openstack-cinder-volume [192.168.24.1:8787/rhosp13/openstack-cinder-volume:pcmklatest]:
* openstack-cinder-volume-docker-0 (ocf:heartbeat:docker): Started controller-0
* stonith-fence_ipmilan-5254005f9a33 (stonith:fence_ipmilan): Started controller-2
* stonith-fence_ipmilan-52540098c9ff (stonith:fence_ipmilan): Started controller-1
* stonith-fence_ipmilan-5254000203a2 (stonith:fence_ipmilan): Started controller-2
* stonith-fence_ipmilan-5254003296a5 (stonith:fence_ipmilan): Started controller-1
* stonith-fence_ipmilan-52540066e27e (stonith:fence_ipmilan): Started controller-1
* stonith-fence_ipmilan-52540065418e (stonith:fence_ipmilan): Started controller-2
* stonith-fence_ipmilan-525400aab9d9 (stonith:fence_ipmilan): Started controller-2
* stonith-fence_ipmilan-525400a16c0d (stonith:fence_ipmilan): Started controller-1
* stonith-fence_ipmilan-5254002f6d57 (stonith:fence_ipmilan): Started controller-1
diff --git a/cts/scheduler/summary/no-promote-on-unrunnable-guest.summary b/cts/scheduler/summary/no-promote-on-unrunnable-guest.summary
index 493b50c856..c06f8f087d 100644
--- a/cts/scheduler/summary/no-promote-on-unrunnable-guest.summary
+++ b/cts/scheduler/summary/no-promote-on-unrunnable-guest.summary
@@ -1,103 +1,103 @@
Using the original execution date of: 2020-05-14 10:49:31Z
Current cluster status:
* Node List:
* Online: [ controller-0 controller-1 controller-2 ]
- * GuestOnline: [ galera-bundle-0@controller-0 galera-bundle-1@controller-1 galera-bundle-2@controller-2 ovn-dbs-bundle-0@controller-0 ovn-dbs-bundle-1@controller-1 ovn-dbs-bundle-2@controller-2 rabbitmq-bundle-0@controller-0 rabbitmq-bundle-1@controller-1 rabbitmq-bundle-2@controller-2 redis-bundle-0@controller-0 redis-bundle-1@controller-1 redis-bundle-2@controller-2 ]
+ * GuestOnline: [ galera-bundle-0 galera-bundle-1 galera-bundle-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2 ]
* Full List of Resources:
* Container bundle set: galera-bundle [cluster.common.tag/rhosp16-openstack-mariadb:pcmklatest]:
* galera-bundle-0 (ocf:heartbeat:galera): Promoted controller-0
* galera-bundle-1 (ocf:heartbeat:galera): Promoted controller-1
* galera-bundle-2 (ocf:heartbeat:galera): Promoted controller-2
* Container bundle set: rabbitmq-bundle [cluster.common.tag/rhosp16-openstack-rabbitmq:pcmklatest]:
* rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Started controller-0
* rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): Started controller-1
* rabbitmq-bundle-2 (ocf:heartbeat:rabbitmq-cluster): Started controller-2
* Container bundle set: redis-bundle [cluster.common.tag/rhosp16-openstack-redis:pcmklatest]:
* redis-bundle-0 (ocf:heartbeat:redis): Promoted controller-0
* redis-bundle-1 (ocf:heartbeat:redis): Unpromoted controller-1
* redis-bundle-2 (ocf:heartbeat:redis): Unpromoted controller-2
* Container bundle set: ovn-dbs-bundle [cluster.common.tag/rhosp16-openstack-ovn-northd:pcmklatest]:
* ovn-dbs-bundle-0 (ocf:ovn:ovndb-servers): Unpromoted controller-0
* ovn-dbs-bundle-1 (ocf:ovn:ovndb-servers): Unpromoted controller-1
* ovn-dbs-bundle-2 (ocf:ovn:ovndb-servers): Unpromoted controller-2
* stonith-fence_ipmilan-5254005e097a (stonith:fence_ipmilan): Started controller-0
* stonith-fence_ipmilan-525400afe30e (stonith:fence_ipmilan): Started controller-2
* stonith-fence_ipmilan-525400985679 (stonith:fence_ipmilan): Started controller-1
* Container bundle: openstack-cinder-volume [cluster.common.tag/rhosp16-openstack-cinder-volume:pcmklatest]:
* openstack-cinder-volume-podman-0 (ocf:heartbeat:podman): Started controller-0
Transition Summary:
* Stop ovn-dbs-bundle-podman-0 ( controller-0 ) due to node availability
* Stop ovn-dbs-bundle-0 ( controller-0 ) due to unrunnable ovn-dbs-bundle-podman-0 start
* Stop ovndb_servers:0 ( Unpromoted ovn-dbs-bundle-0 ) due to unrunnable ovn-dbs-bundle-podman-0 start
* Promote ovndb_servers:1 ( Unpromoted -> Promoted ovn-dbs-bundle-1 )
Executing Cluster Transition:
* Resource action: ovndb_servers cancel=30000 on ovn-dbs-bundle-1
* Pseudo action: ovn-dbs-bundle-master_pre_notify_stop_0
* Pseudo action: ovn-dbs-bundle_stop_0
* Resource action: ovndb_servers notify on ovn-dbs-bundle-0
* Resource action: ovndb_servers notify on ovn-dbs-bundle-1
* Resource action: ovndb_servers notify on ovn-dbs-bundle-2
* Pseudo action: ovn-dbs-bundle-master_confirmed-pre_notify_stop_0
* Pseudo action: ovn-dbs-bundle-master_stop_0
* Resource action: ovndb_servers stop on ovn-dbs-bundle-0
* Pseudo action: ovn-dbs-bundle-master_stopped_0
* Resource action: ovn-dbs-bundle-0 stop on controller-0
* Pseudo action: ovn-dbs-bundle-master_post_notify_stopped_0
* Resource action: ovn-dbs-bundle-podman-0 stop on controller-0
* Resource action: ovndb_servers notify on ovn-dbs-bundle-1
* Resource action: ovndb_servers notify on ovn-dbs-bundle-2
* Pseudo action: ovn-dbs-bundle-master_confirmed-post_notify_stopped_0
* Pseudo action: ovn-dbs-bundle-master_pre_notify_start_0
* Pseudo action: ovn-dbs-bundle_stopped_0
* Pseudo action: ovn-dbs-bundle-master_confirmed-pre_notify_start_0
* Pseudo action: ovn-dbs-bundle-master_start_0
* Pseudo action: ovn-dbs-bundle-master_running_0
* Pseudo action: ovn-dbs-bundle-master_post_notify_running_0
* Pseudo action: ovn-dbs-bundle-master_confirmed-post_notify_running_0
* Pseudo action: ovn-dbs-bundle_running_0
* Pseudo action: ovn-dbs-bundle-master_pre_notify_promote_0
* Pseudo action: ovn-dbs-bundle_promote_0
* Resource action: ovndb_servers notify on ovn-dbs-bundle-1
* Resource action: ovndb_servers notify on ovn-dbs-bundle-2
* Pseudo action: ovn-dbs-bundle-master_confirmed-pre_notify_promote_0
* Pseudo action: ovn-dbs-bundle-master_promote_0
* Resource action: ovndb_servers promote on ovn-dbs-bundle-1
* Pseudo action: ovn-dbs-bundle-master_promoted_0
* Pseudo action: ovn-dbs-bundle-master_post_notify_promoted_0
* Resource action: ovndb_servers notify on ovn-dbs-bundle-1
* Resource action: ovndb_servers notify on ovn-dbs-bundle-2
* Pseudo action: ovn-dbs-bundle-master_confirmed-post_notify_promoted_0
* Pseudo action: ovn-dbs-bundle_promoted_0
* Resource action: ovndb_servers monitor=10000 on ovn-dbs-bundle-1
Using the original execution date of: 2020-05-14 10:49:31Z
Revised Cluster Status:
* Node List:
* Online: [ controller-0 controller-1 controller-2 ]
- * GuestOnline: [ galera-bundle-0@controller-0 galera-bundle-1@controller-1 galera-bundle-2@controller-2 ovn-dbs-bundle-1@controller-1 ovn-dbs-bundle-2@controller-2 rabbitmq-bundle-0@controller-0 rabbitmq-bundle-1@controller-1 rabbitmq-bundle-2@controller-2 redis-bundle-0@controller-0 redis-bundle-1@controller-1 redis-bundle-2@controller-2 ]
+ * GuestOnline: [ galera-bundle-0 galera-bundle-1 galera-bundle-2 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2 ]
* Full List of Resources:
* Container bundle set: galera-bundle [cluster.common.tag/rhosp16-openstack-mariadb:pcmklatest]:
* galera-bundle-0 (ocf:heartbeat:galera): Promoted controller-0
* galera-bundle-1 (ocf:heartbeat:galera): Promoted controller-1
* galera-bundle-2 (ocf:heartbeat:galera): Promoted controller-2
* Container bundle set: rabbitmq-bundle [cluster.common.tag/rhosp16-openstack-rabbitmq:pcmklatest]:
* rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Started controller-0
* rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): Started controller-1
* rabbitmq-bundle-2 (ocf:heartbeat:rabbitmq-cluster): Started controller-2
* Container bundle set: redis-bundle [cluster.common.tag/rhosp16-openstack-redis:pcmklatest]:
* redis-bundle-0 (ocf:heartbeat:redis): Promoted controller-0
* redis-bundle-1 (ocf:heartbeat:redis): Unpromoted controller-1
* redis-bundle-2 (ocf:heartbeat:redis): Unpromoted controller-2
* Container bundle set: ovn-dbs-bundle [cluster.common.tag/rhosp16-openstack-ovn-northd:pcmklatest]:
* ovn-dbs-bundle-0 (ocf:ovn:ovndb-servers): Stopped
* ovn-dbs-bundle-1 (ocf:ovn:ovndb-servers): Promoted controller-1
* ovn-dbs-bundle-2 (ocf:ovn:ovndb-servers): Unpromoted controller-2
* stonith-fence_ipmilan-5254005e097a (stonith:fence_ipmilan): Started controller-0
* stonith-fence_ipmilan-525400afe30e (stonith:fence_ipmilan): Started controller-2
* stonith-fence_ipmilan-525400985679 (stonith:fence_ipmilan): Started controller-1
* Container bundle: openstack-cinder-volume [cluster.common.tag/rhosp16-openstack-cinder-volume:pcmklatest]:
* openstack-cinder-volume-podman-0 (ocf:heartbeat:podman): Started controller-0
diff --git a/cts/scheduler/summary/notifs-for-unrunnable.summary b/cts/scheduler/summary/notifs-for-unrunnable.summary
index ecd65be550..a9503b46b2 100644
--- a/cts/scheduler/summary/notifs-for-unrunnable.summary
+++ b/cts/scheduler/summary/notifs-for-unrunnable.summary
@@ -1,99 +1,99 @@
Using the original execution date of: 2018-02-13 23:40:47Z
Current cluster status:
* Node List:
* Online: [ controller-1 controller-2 ]
* OFFLINE: [ controller-0 ]
- * GuestOnline: [ galera-bundle-1@controller-1 galera-bundle-2@controller-2 rabbitmq-bundle-1@controller-1 rabbitmq-bundle-2@controller-2 redis-bundle-1@controller-1 redis-bundle-2@controller-2 ]
+ * GuestOnline: [ galera-bundle-1 galera-bundle-2 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-1 redis-bundle-2 ]
* Full List of Resources:
* Container bundle set: rabbitmq-bundle [192.168.24.1:8787/rhosp12/openstack-rabbitmq:pcmklatest]:
* rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Stopped
* rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): Started controller-1
* rabbitmq-bundle-2 (ocf:heartbeat:rabbitmq-cluster): Started controller-2
* Container bundle set: galera-bundle [192.168.24.1:8787/rhosp12/openstack-mariadb:pcmklatest]:
* galera-bundle-0 (ocf:heartbeat:galera): Stopped
* galera-bundle-1 (ocf:heartbeat:galera): Promoted controller-1
* galera-bundle-2 (ocf:heartbeat:galera): Promoted controller-2
* Container bundle set: redis-bundle [192.168.24.1:8787/rhosp12/openstack-redis:pcmklatest]:
* redis-bundle-0 (ocf:heartbeat:redis): Stopped
* redis-bundle-1 (ocf:heartbeat:redis): Unpromoted controller-1
* redis-bundle-2 (ocf:heartbeat:redis): Promoted controller-2
* ip-192.168.24.6 (ocf:heartbeat:IPaddr2): Started controller-2
* ip-10.0.0.109 (ocf:heartbeat:IPaddr2): Started controller-1
* ip-172.17.1.15 (ocf:heartbeat:IPaddr2): Started controller-2
* ip-172.17.1.11 (ocf:heartbeat:IPaddr2): Started controller-1
* ip-172.17.3.11 (ocf:heartbeat:IPaddr2): Started controller-1
* ip-172.17.4.11 (ocf:heartbeat:IPaddr2): Started controller-2
* Container bundle set: haproxy-bundle [192.168.24.1:8787/rhosp12/openstack-haproxy:pcmklatest]:
* haproxy-bundle-docker-0 (ocf:heartbeat:docker): Stopped
* haproxy-bundle-docker-1 (ocf:heartbeat:docker): Started controller-1
* haproxy-bundle-docker-2 (ocf:heartbeat:docker): Started controller-2
* openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-2
* stonith-fence_ipmilan-525400fec0c8 (stonith:fence_ipmilan): Started controller-1
* stonith-fence_ipmilan-5254002ff217 (stonith:fence_ipmilan): Started controller-2
* stonith-fence_ipmilan-5254008f971a (stonith:fence_ipmilan): Started controller-1
Transition Summary:
* Start rabbitmq-bundle-0 ( controller-1 ) due to unrunnable rabbitmq-bundle-docker-0 start (blocked)
* Start rabbitmq:0 ( rabbitmq-bundle-0 ) due to unrunnable rabbitmq-bundle-docker-0 start (blocked)
* Start galera-bundle-0 ( controller-2 ) due to unrunnable galera-bundle-docker-0 start (blocked)
* Start galera:0 ( galera-bundle-0 ) due to unrunnable galera-bundle-docker-0 start (blocked)
* Start redis-bundle-0 ( controller-1 ) due to unrunnable redis-bundle-docker-0 start (blocked)
* Start redis:0 ( redis-bundle-0 ) due to unrunnable redis-bundle-docker-0 start (blocked)
Executing Cluster Transition:
* Pseudo action: rabbitmq-bundle-clone_pre_notify_start_0
* Pseudo action: redis-bundle-master_pre_notify_start_0
* Pseudo action: redis-bundle_start_0
* Pseudo action: galera-bundle_start_0
* Pseudo action: rabbitmq-bundle_start_0
* Pseudo action: rabbitmq-bundle-clone_confirmed-pre_notify_start_0
* Pseudo action: rabbitmq-bundle-clone_start_0
* Pseudo action: galera-bundle-master_start_0
* Pseudo action: redis-bundle-master_confirmed-pre_notify_start_0
* Pseudo action: redis-bundle-master_start_0
* Pseudo action: rabbitmq-bundle-clone_running_0
* Pseudo action: galera-bundle-master_running_0
* Pseudo action: redis-bundle-master_running_0
* Pseudo action: galera-bundle_running_0
* Pseudo action: rabbitmq-bundle-clone_post_notify_running_0
* Pseudo action: redis-bundle-master_post_notify_running_0
* Pseudo action: rabbitmq-bundle-clone_confirmed-post_notify_running_0
* Pseudo action: redis-bundle-master_confirmed-post_notify_running_0
* Pseudo action: redis-bundle_running_0
* Pseudo action: rabbitmq-bundle_running_0
Using the original execution date of: 2018-02-13 23:40:47Z
Revised Cluster Status:
* Node List:
* Online: [ controller-1 controller-2 ]
* OFFLINE: [ controller-0 ]
- * GuestOnline: [ galera-bundle-1@controller-1 galera-bundle-2@controller-2 rabbitmq-bundle-1@controller-1 rabbitmq-bundle-2@controller-2 redis-bundle-1@controller-1 redis-bundle-2@controller-2 ]
+ * GuestOnline: [ galera-bundle-1 galera-bundle-2 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-1 redis-bundle-2 ]
* Full List of Resources:
* Container bundle set: rabbitmq-bundle [192.168.24.1:8787/rhosp12/openstack-rabbitmq:pcmklatest]:
* rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Stopped
* rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): Started controller-1
* rabbitmq-bundle-2 (ocf:heartbeat:rabbitmq-cluster): Started controller-2
* Container bundle set: galera-bundle [192.168.24.1:8787/rhosp12/openstack-mariadb:pcmklatest]:
* galera-bundle-0 (ocf:heartbeat:galera): Stopped
* galera-bundle-1 (ocf:heartbeat:galera): Promoted controller-1
* galera-bundle-2 (ocf:heartbeat:galera): Promoted controller-2
* Container bundle set: redis-bundle [192.168.24.1:8787/rhosp12/openstack-redis:pcmklatest]:
* redis-bundle-0 (ocf:heartbeat:redis): Stopped
* redis-bundle-1 (ocf:heartbeat:redis): Unpromoted controller-1
* redis-bundle-2 (ocf:heartbeat:redis): Promoted controller-2
* ip-192.168.24.6 (ocf:heartbeat:IPaddr2): Started controller-2
* ip-10.0.0.109 (ocf:heartbeat:IPaddr2): Started controller-1
* ip-172.17.1.15 (ocf:heartbeat:IPaddr2): Started controller-2
* ip-172.17.1.11 (ocf:heartbeat:IPaddr2): Started controller-1
* ip-172.17.3.11 (ocf:heartbeat:IPaddr2): Started controller-1
* ip-172.17.4.11 (ocf:heartbeat:IPaddr2): Started controller-2
* Container bundle set: haproxy-bundle [192.168.24.1:8787/rhosp12/openstack-haproxy:pcmklatest]:
* haproxy-bundle-docker-0 (ocf:heartbeat:docker): Stopped
* haproxy-bundle-docker-1 (ocf:heartbeat:docker): Started controller-1
* haproxy-bundle-docker-2 (ocf:heartbeat:docker): Started controller-2
* openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-2
* stonith-fence_ipmilan-525400fec0c8 (stonith:fence_ipmilan): Started controller-1
* stonith-fence_ipmilan-5254002ff217 (stonith:fence_ipmilan): Started controller-2
* stonith-fence_ipmilan-5254008f971a (stonith:fence_ipmilan): Started controller-1
diff --git a/cts/scheduler/summary/notify-behind-stopping-remote.summary b/cts/scheduler/summary/notify-behind-stopping-remote.summary
index f5d9162029..257e445274 100644
--- a/cts/scheduler/summary/notify-behind-stopping-remote.summary
+++ b/cts/scheduler/summary/notify-behind-stopping-remote.summary
@@ -1,64 +1,64 @@
Using the original execution date of: 2018-11-22 20:36:07Z
Current cluster status:
* Node List:
* Online: [ ra1 ra2 ra3 ]
- * GuestOnline: [ redis-bundle-0@ra1 redis-bundle-1@ra2 redis-bundle-2@ra3 ]
+ * GuestOnline: [ redis-bundle-0 redis-bundle-1 redis-bundle-2 ]
* Full List of Resources:
* Container bundle set: redis-bundle [docker.io/tripleoqueens/centos-binary-redis:current-tripleo-rdo]:
* redis-bundle-0 (ocf:heartbeat:redis): Unpromoted ra1
* redis-bundle-1 (ocf:heartbeat:redis): Stopped ra2
* redis-bundle-2 (ocf:heartbeat:redis): Unpromoted ra3
Transition Summary:
* Promote redis:0 ( Unpromoted -> Promoted redis-bundle-0 )
* Stop redis-bundle-docker-1 ( ra2 ) due to node availability
* Stop redis-bundle-1 ( ra2 ) due to unrunnable redis-bundle-docker-1 start
* Start redis:1 ( redis-bundle-1 ) due to unrunnable redis-bundle-docker-1 start (blocked)
Executing Cluster Transition:
* Resource action: redis cancel=45000 on redis-bundle-0
* Resource action: redis cancel=60000 on redis-bundle-0
* Pseudo action: redis-bundle-master_pre_notify_start_0
* Resource action: redis-bundle-0 monitor=30000 on ra1
* Resource action: redis-bundle-0 cancel=60000 on ra1
* Resource action: redis-bundle-1 stop on ra2
* Resource action: redis-bundle-1 cancel=60000 on ra2
* Resource action: redis-bundle-2 monitor=30000 on ra3
* Resource action: redis-bundle-2 cancel=60000 on ra3
* Pseudo action: redis-bundle_stop_0
* Pseudo action: redis-bundle-master_confirmed-pre_notify_start_0
* Resource action: redis-bundle-docker-1 stop on ra2
* Pseudo action: redis-bundle_stopped_0
* Pseudo action: redis-bundle_start_0
* Pseudo action: redis-bundle-master_start_0
* Pseudo action: redis-bundle-master_running_0
* Pseudo action: redis-bundle-master_post_notify_running_0
* Pseudo action: redis-bundle-master_confirmed-post_notify_running_0
* Pseudo action: redis-bundle_running_0
* Pseudo action: redis-bundle-master_pre_notify_promote_0
* Pseudo action: redis-bundle_promote_0
* Resource action: redis notify on redis-bundle-0
* Resource action: redis notify on redis-bundle-2
* Pseudo action: redis-bundle-master_confirmed-pre_notify_promote_0
* Pseudo action: redis-bundle-master_promote_0
* Resource action: redis promote on redis-bundle-0
* Pseudo action: redis-bundle-master_promoted_0
* Pseudo action: redis-bundle-master_post_notify_promoted_0
* Resource action: redis notify on redis-bundle-0
* Resource action: redis notify on redis-bundle-2
* Pseudo action: redis-bundle-master_confirmed-post_notify_promoted_0
* Pseudo action: redis-bundle_promoted_0
* Resource action: redis monitor=20000 on redis-bundle-0
Using the original execution date of: 2018-11-22 20:36:07Z
Revised Cluster Status:
* Node List:
* Online: [ ra1 ra2 ra3 ]
- * GuestOnline: [ redis-bundle-0@ra1 redis-bundle-2@ra3 ]
+ * GuestOnline: [ redis-bundle-0 redis-bundle-2 ]
* Full List of Resources:
* Container bundle set: redis-bundle [docker.io/tripleoqueens/centos-binary-redis:current-tripleo-rdo]:
* redis-bundle-0 (ocf:heartbeat:redis): Promoted ra1
* redis-bundle-1 (ocf:heartbeat:redis): Stopped
* redis-bundle-2 (ocf:heartbeat:redis): Unpromoted ra3
diff --git a/cts/scheduler/summary/on_fail_demote1.summary b/cts/scheduler/summary/on_fail_demote1.summary
index ee23f4dc59..a386da096a 100644
--- a/cts/scheduler/summary/on_fail_demote1.summary
+++ b/cts/scheduler/summary/on_fail_demote1.summary
@@ -1,88 +1,88 @@
Using the original execution date of: 2020-06-16 19:23:21Z
Current cluster status:
* Node List:
* Online: [ rhel7-1 rhel7-3 rhel7-4 rhel7-5 ]
* RemoteOnline: [ remote-rhel7-2 ]
- * GuestOnline: [ lxc1@rhel7-3 lxc2@rhel7-3 stateful-bundle-0@rhel7-5 stateful-bundle-1@rhel7-1 stateful-bundle-2@rhel7-4 ]
+ * GuestOnline: [ lxc1 lxc2 stateful-bundle-0 stateful-bundle-1 stateful-bundle-2 ]
* Full List of Resources:
* Fencing (stonith:fence_xvm): Started rhel7-4
* Clone Set: rsc1-clone [rsc1] (promotable):
* rsc1 (ocf:pacemaker:Stateful): FAILED Promoted rhel7-4
* Unpromoted: [ lxc1 lxc2 remote-rhel7-2 rhel7-1 rhel7-3 rhel7-5 ]
* Clone Set: rsc2-master [rsc2] (promotable):
* rsc2 (ocf:pacemaker:Stateful): FAILED Promoted remote-rhel7-2
* Unpromoted: [ lxc1 lxc2 rhel7-1 rhel7-3 rhel7-4 rhel7-5 ]
* remote-rhel7-2 (ocf:pacemaker:remote): Started rhel7-1
* container1 (ocf:heartbeat:VirtualDomain): Started rhel7-3
* container2 (ocf:heartbeat:VirtualDomain): Started rhel7-3
* Clone Set: lxc-ms-master [lxc-ms] (promotable):
* lxc-ms (ocf:pacemaker:Stateful): FAILED Promoted lxc2
* Unpromoted: [ lxc1 ]
* Stopped: [ remote-rhel7-2 rhel7-1 rhel7-3 rhel7-4 rhel7-5 ]
* Container bundle set: stateful-bundle [pcmktest:http]:
* stateful-bundle-0 (192.168.122.131) (ocf:pacemaker:Stateful): FAILED Promoted rhel7-5
* stateful-bundle-1 (192.168.122.132) (ocf:pacemaker:Stateful): Unpromoted rhel7-1
* stateful-bundle-2 (192.168.122.133) (ocf:pacemaker:Stateful): Unpromoted rhel7-4
Transition Summary:
* Re-promote rsc1:0 ( Promoted rhel7-4 )
* Re-promote rsc2:4 ( Promoted remote-rhel7-2 )
* Re-promote lxc-ms:0 ( Promoted lxc2 )
* Re-promote bundled:0 ( Promoted stateful-bundle-0 )
Executing Cluster Transition:
* Pseudo action: rsc1-clone_demote_0
* Pseudo action: rsc2-master_demote_0
* Pseudo action: lxc-ms-master_demote_0
* Pseudo action: stateful-bundle_demote_0
* Resource action: rsc1 demote on rhel7-4
* Pseudo action: rsc1-clone_demoted_0
* Pseudo action: rsc1-clone_promote_0
* Resource action: rsc2 demote on remote-rhel7-2
* Pseudo action: rsc2-master_demoted_0
* Pseudo action: rsc2-master_promote_0
* Resource action: lxc-ms demote on lxc2
* Pseudo action: lxc-ms-master_demoted_0
* Pseudo action: lxc-ms-master_promote_0
* Pseudo action: stateful-bundle-master_demote_0
* Resource action: rsc1 promote on rhel7-4
* Pseudo action: rsc1-clone_promoted_0
* Resource action: rsc2 promote on remote-rhel7-2
* Pseudo action: rsc2-master_promoted_0
* Resource action: lxc-ms promote on lxc2
* Pseudo action: lxc-ms-master_promoted_0
* Resource action: bundled demote on stateful-bundle-0
* Pseudo action: stateful-bundle-master_demoted_0
* Pseudo action: stateful-bundle_demoted_0
* Pseudo action: stateful-bundle_promote_0
* Pseudo action: stateful-bundle-master_promote_0
* Resource action: bundled promote on stateful-bundle-0
* Pseudo action: stateful-bundle-master_promoted_0
* Pseudo action: stateful-bundle_promoted_0
Using the original execution date of: 2020-06-16 19:23:21Z
Revised Cluster Status:
* Node List:
* Online: [ rhel7-1 rhel7-3 rhel7-4 rhel7-5 ]
* RemoteOnline: [ remote-rhel7-2 ]
- * GuestOnline: [ lxc1@rhel7-3 lxc2@rhel7-3 stateful-bundle-0@rhel7-5 stateful-bundle-1@rhel7-1 stateful-bundle-2@rhel7-4 ]
+ * GuestOnline: [ lxc1 lxc2 stateful-bundle-0 stateful-bundle-1 stateful-bundle-2 ]
* Full List of Resources:
* Fencing (stonith:fence_xvm): Started rhel7-4
* Clone Set: rsc1-clone [rsc1] (promotable):
* Promoted: [ rhel7-4 ]
* Unpromoted: [ lxc1 lxc2 remote-rhel7-2 rhel7-1 rhel7-3 rhel7-5 ]
* Clone Set: rsc2-master [rsc2] (promotable):
* Promoted: [ remote-rhel7-2 ]
* Unpromoted: [ lxc1 lxc2 rhel7-1 rhel7-3 rhel7-4 rhel7-5 ]
* remote-rhel7-2 (ocf:pacemaker:remote): Started rhel7-1
* container1 (ocf:heartbeat:VirtualDomain): Started rhel7-3
* container2 (ocf:heartbeat:VirtualDomain): Started rhel7-3
* Clone Set: lxc-ms-master [lxc-ms] (promotable):
* Promoted: [ lxc2 ]
* Unpromoted: [ lxc1 ]
* Container bundle set: stateful-bundle [pcmktest:http]:
* stateful-bundle-0 (192.168.122.131) (ocf:pacemaker:Stateful): Promoted rhel7-5
* stateful-bundle-1 (192.168.122.132) (ocf:pacemaker:Stateful): Unpromoted rhel7-1
* stateful-bundle-2 (192.168.122.133) (ocf:pacemaker:Stateful): Unpromoted rhel7-4
diff --git a/cts/scheduler/summary/on_fail_demote4.summary b/cts/scheduler/summary/on_fail_demote4.summary
index 57eea35753..3082651198 100644
--- a/cts/scheduler/summary/on_fail_demote4.summary
+++ b/cts/scheduler/summary/on_fail_demote4.summary
@@ -1,189 +1,189 @@
Using the original execution date of: 2020-06-16 19:23:21Z
Current cluster status:
* Node List:
* RemoteNode remote-rhel7-2: UNCLEAN (offline)
* Node rhel7-4: UNCLEAN (offline)
* Online: [ rhel7-1 rhel7-3 rhel7-5 ]
- * GuestOnline: [ lxc1@rhel7-3 stateful-bundle-1@rhel7-1 ]
+ * GuestOnline: [ lxc1 stateful-bundle-1 ]
* Full List of Resources:
* Fencing (stonith:fence_xvm): Started rhel7-4 (UNCLEAN)
* Clone Set: rsc1-clone [rsc1] (promotable):
* rsc1 (ocf:pacemaker:Stateful): Promoted rhel7-4 (UNCLEAN)
* rsc1 (ocf:pacemaker:Stateful): Unpromoted remote-rhel7-2 (UNCLEAN)
* Unpromoted: [ lxc1 rhel7-1 rhel7-3 rhel7-5 ]
* Clone Set: rsc2-master [rsc2] (promotable):
* rsc2 (ocf:pacemaker:Stateful): Unpromoted rhel7-4 (UNCLEAN)
* rsc2 (ocf:pacemaker:Stateful): Promoted remote-rhel7-2 (UNCLEAN)
* Unpromoted: [ lxc1 rhel7-1 rhel7-3 rhel7-5 ]
* remote-rhel7-2 (ocf:pacemaker:remote): FAILED rhel7-1
* container1 (ocf:heartbeat:VirtualDomain): Started rhel7-3
* container2 (ocf:heartbeat:VirtualDomain): FAILED rhel7-3
* Clone Set: lxc-ms-master [lxc-ms] (promotable):
* Unpromoted: [ lxc1 ]
* Stopped: [ remote-rhel7-2 rhel7-1 rhel7-3 rhel7-4 rhel7-5 ]
* Container bundle set: stateful-bundle [pcmktest:http]:
* stateful-bundle-0 (192.168.122.131) (ocf:pacemaker:Stateful): FAILED Promoted rhel7-5
* stateful-bundle-1 (192.168.122.132) (ocf:pacemaker:Stateful): Unpromoted rhel7-1
* stateful-bundle-2 (192.168.122.133) (ocf:pacemaker:Stateful): FAILED rhel7-4 (UNCLEAN)
Transition Summary:
* Fence (reboot) stateful-bundle-2 (resource: stateful-bundle-docker-2) 'guest is unclean'
* Fence (reboot) stateful-bundle-0 (resource: stateful-bundle-docker-0) 'guest is unclean'
* Fence (reboot) lxc2 (resource: container2) 'guest is unclean'
* Fence (reboot) remote-rhel7-2 'remote connection is unrecoverable'
* Fence (reboot) rhel7-4 'peer is no longer part of the cluster'
* Move Fencing ( rhel7-4 -> rhel7-5 )
* Stop rsc1:0 ( Promoted rhel7-4 ) due to node availability
* Promote rsc1:1 ( Unpromoted -> Promoted rhel7-3 )
* Stop rsc1:4 ( Unpromoted remote-rhel7-2 ) due to node availability
* Recover rsc1:5 ( Unpromoted lxc2 )
* Stop rsc2:0 ( Unpromoted rhel7-4 ) due to node availability
* Promote rsc2:1 ( Unpromoted -> Promoted rhel7-3 )
* Stop rsc2:4 ( Promoted remote-rhel7-2 ) due to node availability
* Recover rsc2:5 ( Unpromoted lxc2 )
* Recover remote-rhel7-2 ( rhel7-1 )
* Recover container2 ( rhel7-3 )
* Recover lxc-ms:0 ( Promoted lxc2 )
* Recover stateful-bundle-docker-0 ( rhel7-5 )
* Restart stateful-bundle-0 ( rhel7-5 ) due to required stateful-bundle-docker-0 start
* Recover bundled:0 ( Promoted stateful-bundle-0 )
* Move stateful-bundle-ip-192.168.122.133 ( rhel7-4 -> rhel7-3 )
* Recover stateful-bundle-docker-2 ( rhel7-4 -> rhel7-3 )
* Move stateful-bundle-2 ( rhel7-4 -> rhel7-3 )
* Recover bundled:2 ( Unpromoted stateful-bundle-2 )
* Restart lxc2 ( rhel7-3 ) due to required container2 start
Executing Cluster Transition:
* Pseudo action: Fencing_stop_0
* Resource action: rsc1 cancel=11000 on rhel7-3
* Pseudo action: rsc1-clone_demote_0
* Resource action: rsc2 cancel=11000 on rhel7-3
* Pseudo action: rsc2-master_demote_0
* Pseudo action: lxc-ms-master_demote_0
* Resource action: stateful-bundle-0 stop on rhel7-5
* Pseudo action: stateful-bundle-2_stop_0
* Resource action: lxc2 stop on rhel7-3
* Pseudo action: stateful-bundle_demote_0
* Fencing remote-rhel7-2 (reboot)
* Fencing rhel7-4 (reboot)
* Pseudo action: rsc1_demote_0
* Pseudo action: rsc1-clone_demoted_0
* Pseudo action: rsc2_demote_0
* Pseudo action: rsc2-master_demoted_0
* Resource action: container2 stop on rhel7-3
* Pseudo action: stateful-bundle-master_demote_0
* Pseudo action: stonith-stateful-bundle-2-reboot on stateful-bundle-2
* Pseudo action: stonith-lxc2-reboot on lxc2
* Resource action: Fencing start on rhel7-5
* Pseudo action: rsc1-clone_stop_0
* Pseudo action: rsc2-master_stop_0
* Pseudo action: lxc-ms_demote_0
* Pseudo action: lxc-ms-master_demoted_0
* Pseudo action: lxc-ms-master_stop_0
* Pseudo action: bundled_demote_0
* Pseudo action: stateful-bundle-master_demoted_0
* Pseudo action: stateful-bundle_demoted_0
* Pseudo action: stateful-bundle_stop_0
* Resource action: Fencing monitor=120000 on rhel7-5
* Pseudo action: rsc1_stop_0
* Pseudo action: rsc1_stop_0
* Pseudo action: rsc1_stop_0
* Pseudo action: rsc1-clone_stopped_0
* Pseudo action: rsc1-clone_start_0
* Pseudo action: rsc2_stop_0
* Pseudo action: rsc2_stop_0
* Pseudo action: rsc2_stop_0
* Pseudo action: rsc2-master_stopped_0
* Pseudo action: rsc2-master_start_0
* Resource action: remote-rhel7-2 stop on rhel7-1
* Pseudo action: lxc-ms_stop_0
* Pseudo action: lxc-ms-master_stopped_0
* Pseudo action: lxc-ms-master_start_0
* Resource action: stateful-bundle-docker-0 stop on rhel7-5
* Pseudo action: stateful-bundle-docker-2_stop_0
* Pseudo action: stonith-stateful-bundle-0-reboot on stateful-bundle-0
* Resource action: remote-rhel7-2 start on rhel7-1
* Resource action: remote-rhel7-2 monitor=60000 on rhel7-1
* Resource action: container2 start on rhel7-3
* Resource action: container2 monitor=20000 on rhel7-3
* Pseudo action: stateful-bundle-master_stop_0
* Pseudo action: stateful-bundle-ip-192.168.122.133_stop_0
* Resource action: lxc2 start on rhel7-3
* Resource action: lxc2 monitor=30000 on rhel7-3
* Resource action: rsc1 start on lxc2
* Pseudo action: rsc1-clone_running_0
* Resource action: rsc2 start on lxc2
* Pseudo action: rsc2-master_running_0
* Resource action: lxc-ms start on lxc2
* Pseudo action: lxc-ms-master_running_0
* Pseudo action: bundled_stop_0
* Resource action: stateful-bundle-ip-192.168.122.133 start on rhel7-3
* Resource action: rsc1 monitor=11000 on lxc2
* Pseudo action: rsc1-clone_promote_0
* Resource action: rsc2 monitor=11000 on lxc2
* Pseudo action: rsc2-master_promote_0
* Pseudo action: lxc-ms-master_promote_0
* Pseudo action: bundled_stop_0
* Pseudo action: stateful-bundle-master_stopped_0
* Resource action: stateful-bundle-ip-192.168.122.133 monitor=60000 on rhel7-3
* Pseudo action: stateful-bundle_stopped_0
* Pseudo action: stateful-bundle_start_0
* Resource action: rsc1 promote on rhel7-3
* Pseudo action: rsc1-clone_promoted_0
* Resource action: rsc2 promote on rhel7-3
* Pseudo action: rsc2-master_promoted_0
* Resource action: lxc-ms promote on lxc2
* Pseudo action: lxc-ms-master_promoted_0
* Pseudo action: stateful-bundle-master_start_0
* Resource action: stateful-bundle-docker-0 start on rhel7-5
* Resource action: stateful-bundle-docker-0 monitor=60000 on rhel7-5
* Resource action: stateful-bundle-0 start on rhel7-5
* Resource action: stateful-bundle-0 monitor=30000 on rhel7-5
* Resource action: stateful-bundle-docker-2 start on rhel7-3
* Resource action: stateful-bundle-2 start on rhel7-3
* Resource action: rsc1 monitor=10000 on rhel7-3
* Resource action: rsc2 monitor=10000 on rhel7-3
* Resource action: lxc-ms monitor=10000 on lxc2
* Resource action: bundled start on stateful-bundle-0
* Resource action: bundled start on stateful-bundle-2
* Pseudo action: stateful-bundle-master_running_0
* Resource action: stateful-bundle-docker-2 monitor=60000 on rhel7-3
* Resource action: stateful-bundle-2 monitor=30000 on rhel7-3
* Pseudo action: stateful-bundle_running_0
* Resource action: bundled monitor=11000 on stateful-bundle-2
* Pseudo action: stateful-bundle_promote_0
* Pseudo action: stateful-bundle-master_promote_0
* Resource action: bundled promote on stateful-bundle-0
* Pseudo action: stateful-bundle-master_promoted_0
* Pseudo action: stateful-bundle_promoted_0
* Resource action: bundled monitor=10000 on stateful-bundle-0
Using the original execution date of: 2020-06-16 19:23:21Z
Revised Cluster Status:
* Node List:
* Online: [ rhel7-1 rhel7-3 rhel7-5 ]
* OFFLINE: [ rhel7-4 ]
* RemoteOnline: [ remote-rhel7-2 ]
- * GuestOnline: [ lxc1@rhel7-3 lxc2@rhel7-3 stateful-bundle-0@rhel7-5 stateful-bundle-1@rhel7-1 stateful-bundle-2@rhel7-3 ]
+ * GuestOnline: [ lxc1 lxc2 stateful-bundle-0 stateful-bundle-1 stateful-bundle-2 ]
* Full List of Resources:
* Fencing (stonith:fence_xvm): Started rhel7-5
* Clone Set: rsc1-clone [rsc1] (promotable):
* Promoted: [ rhel7-3 ]
* Unpromoted: [ lxc1 lxc2 rhel7-1 rhel7-5 ]
* Stopped: [ remote-rhel7-2 rhel7-4 ]
* Clone Set: rsc2-master [rsc2] (promotable):
* Promoted: [ rhel7-3 ]
* Unpromoted: [ lxc1 lxc2 rhel7-1 rhel7-5 ]
* Stopped: [ remote-rhel7-2 rhel7-4 ]
* remote-rhel7-2 (ocf:pacemaker:remote): Started rhel7-1
* container1 (ocf:heartbeat:VirtualDomain): Started rhel7-3
* container2 (ocf:heartbeat:VirtualDomain): Started rhel7-3
* Clone Set: lxc-ms-master [lxc-ms] (promotable):
* Promoted: [ lxc2 ]
* Unpromoted: [ lxc1 ]
* Container bundle set: stateful-bundle [pcmktest:http]:
* stateful-bundle-0 (192.168.122.131) (ocf:pacemaker:Stateful): Promoted rhel7-5
* stateful-bundle-1 (192.168.122.132) (ocf:pacemaker:Stateful): Unpromoted rhel7-1
* stateful-bundle-2 (192.168.122.133) (ocf:pacemaker:Stateful): Unpromoted rhel7-3
diff --git a/cts/scheduler/summary/order-expired-failure.summary b/cts/scheduler/summary/order-expired-failure.summary
index ca2e1d3062..7ec061757c 100644
--- a/cts/scheduler/summary/order-expired-failure.summary
+++ b/cts/scheduler/summary/order-expired-failure.summary
@@ -1,112 +1,112 @@
Using the original execution date of: 2018-04-09 07:55:35Z
Current cluster status:
* Node List:
* RemoteNode overcloud-novacompute-1: UNCLEAN (offline)
* Online: [ controller-0 controller-1 controller-2 ]
* RemoteOnline: [ overcloud-novacompute-0 ]
- * GuestOnline: [ galera-bundle-0@controller-2 galera-bundle-1@controller-0 galera-bundle-2@controller-1 rabbitmq-bundle-0@controller-2 rabbitmq-bundle-1@controller-0 rabbitmq-bundle-2@controller-1 redis-bundle-0@controller-2 redis-bundle-1@controller-0 redis-bundle-2@controller-1 ]
+ * GuestOnline: [ galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2 ]
* Full List of Resources:
* overcloud-novacompute-0 (ocf:pacemaker:remote): Started controller-0
* overcloud-novacompute-1 (ocf:pacemaker:remote): FAILED controller-1
* Container bundle set: rabbitmq-bundle [192.168.24.1:8787/rhosp13/openstack-rabbitmq:pcmklatest]:
* rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Started controller-2
* rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): Started controller-0
* rabbitmq-bundle-2 (ocf:heartbeat:rabbitmq-cluster): Started controller-1
* Container bundle set: galera-bundle [192.168.24.1:8787/rhosp13/openstack-mariadb:pcmklatest]:
* galera-bundle-0 (ocf:heartbeat:galera): Promoted controller-2
* galera-bundle-1 (ocf:heartbeat:galera): Promoted controller-0
* galera-bundle-2 (ocf:heartbeat:galera): Promoted controller-1
* Container bundle set: redis-bundle [192.168.24.1:8787/rhosp13/openstack-redis:pcmklatest]:
* redis-bundle-0 (ocf:heartbeat:redis): Promoted controller-2
* redis-bundle-1 (ocf:heartbeat:redis): Unpromoted controller-0
* redis-bundle-2 (ocf:heartbeat:redis): Unpromoted controller-1
* ip-192.168.24.11 (ocf:heartbeat:IPaddr2): Started controller-2
* ip-10.0.0.110 (ocf:heartbeat:IPaddr2): Stopped
* ip-172.17.1.14 (ocf:heartbeat:IPaddr2): Started controller-1
* ip-172.17.1.17 (ocf:heartbeat:IPaddr2): Started controller-2
* ip-172.17.3.11 (ocf:heartbeat:IPaddr2): Started controller-0
* ip-172.17.4.17 (ocf:heartbeat:IPaddr2): Started controller-1
* Container bundle set: haproxy-bundle [192.168.24.1:8787/rhosp13/openstack-haproxy:pcmklatest]:
* haproxy-bundle-docker-0 (ocf:heartbeat:docker): Started controller-2
* haproxy-bundle-docker-1 (ocf:heartbeat:docker): Started controller-0
* haproxy-bundle-docker-2 (ocf:heartbeat:docker): Started controller-1
* stonith-fence_compute-fence-nova (stonith:fence_compute): FAILED controller-2
* Clone Set: compute-unfence-trigger-clone [compute-unfence-trigger]:
* compute-unfence-trigger (ocf:pacemaker:Dummy): Started overcloud-novacompute-1 (UNCLEAN)
* Started: [ overcloud-novacompute-0 ]
* Stopped: [ controller-0 controller-1 controller-2 ]
* nova-evacuate (ocf:openstack:NovaEvacuate): Started controller-0
* stonith-fence_ipmilan-5254008be2cc (stonith:fence_ipmilan): Started controller-1
* stonith-fence_ipmilan-525400803f9e (stonith:fence_ipmilan): Started controller-0
* stonith-fence_ipmilan-525400fca120 (stonith:fence_ipmilan): Started controller-2
* stonith-fence_ipmilan-525400953d48 (stonith:fence_ipmilan): Started controller-2
* stonith-fence_ipmilan-525400b02b86 (stonith:fence_ipmilan): Started controller-1
* Container bundle: openstack-cinder-volume [192.168.24.1:8787/rhosp13/openstack-cinder-volume:pcmklatest]:
* openstack-cinder-volume-docker-0 (ocf:heartbeat:docker): Started controller-0
Transition Summary:
* Fence (reboot) overcloud-novacompute-1 'remote connection is unrecoverable'
* Stop overcloud-novacompute-1 ( controller-1 ) due to node availability
* Start ip-10.0.0.110 ( controller-1 )
* Recover stonith-fence_compute-fence-nova ( controller-2 )
* Stop compute-unfence-trigger:1 ( overcloud-novacompute-1 ) due to node availability
Executing Cluster Transition:
* Resource action: overcloud-novacompute-1 stop on controller-1
* Resource action: stonith-fence_compute-fence-nova stop on controller-2
* Fencing overcloud-novacompute-1 (reboot)
* Cluster action: clear_failcount for overcloud-novacompute-1 on controller-1
* Resource action: ip-10.0.0.110 start on controller-1
* Resource action: stonith-fence_compute-fence-nova start on controller-2
* Resource action: stonith-fence_compute-fence-nova monitor=60000 on controller-2
* Pseudo action: compute-unfence-trigger-clone_stop_0
* Resource action: ip-10.0.0.110 monitor=10000 on controller-1
* Pseudo action: compute-unfence-trigger_stop_0
* Pseudo action: compute-unfence-trigger-clone_stopped_0
Using the original execution date of: 2018-04-09 07:55:35Z
Revised Cluster Status:
* Node List:
* RemoteNode overcloud-novacompute-1: UNCLEAN (offline)
* Online: [ controller-0 controller-1 controller-2 ]
* RemoteOnline: [ overcloud-novacompute-0 ]
- * GuestOnline: [ galera-bundle-0@controller-2 galera-bundle-1@controller-0 galera-bundle-2@controller-1 rabbitmq-bundle-0@controller-2 rabbitmq-bundle-1@controller-0 rabbitmq-bundle-2@controller-1 redis-bundle-0@controller-2 redis-bundle-1@controller-0 redis-bundle-2@controller-1 ]
+ * GuestOnline: [ galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2 ]
* Full List of Resources:
* overcloud-novacompute-0 (ocf:pacemaker:remote): Started controller-0
* overcloud-novacompute-1 (ocf:pacemaker:remote): FAILED
* Container bundle set: rabbitmq-bundle [192.168.24.1:8787/rhosp13/openstack-rabbitmq:pcmklatest]:
* rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Started controller-2
* rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): Started controller-0
* rabbitmq-bundle-2 (ocf:heartbeat:rabbitmq-cluster): Started controller-1
* Container bundle set: galera-bundle [192.168.24.1:8787/rhosp13/openstack-mariadb:pcmklatest]:
* galera-bundle-0 (ocf:heartbeat:galera): Promoted controller-2
* galera-bundle-1 (ocf:heartbeat:galera): Promoted controller-0
* galera-bundle-2 (ocf:heartbeat:galera): Promoted controller-1
* Container bundle set: redis-bundle [192.168.24.1:8787/rhosp13/openstack-redis:pcmklatest]:
* redis-bundle-0 (ocf:heartbeat:redis): Promoted controller-2
* redis-bundle-1 (ocf:heartbeat:redis): Unpromoted controller-0
* redis-bundle-2 (ocf:heartbeat:redis): Unpromoted controller-1
* ip-192.168.24.11 (ocf:heartbeat:IPaddr2): Started controller-2
* ip-10.0.0.110 (ocf:heartbeat:IPaddr2): Started controller-1
* ip-172.17.1.14 (ocf:heartbeat:IPaddr2): Started controller-1
* ip-172.17.1.17 (ocf:heartbeat:IPaddr2): Started controller-2
* ip-172.17.3.11 (ocf:heartbeat:IPaddr2): Started controller-0
* ip-172.17.4.17 (ocf:heartbeat:IPaddr2): Started controller-1
* Container bundle set: haproxy-bundle [192.168.24.1:8787/rhosp13/openstack-haproxy:pcmklatest]:
* haproxy-bundle-docker-0 (ocf:heartbeat:docker): Started controller-2
* haproxy-bundle-docker-1 (ocf:heartbeat:docker): Started controller-0
* haproxy-bundle-docker-2 (ocf:heartbeat:docker): Started controller-1
* stonith-fence_compute-fence-nova (stonith:fence_compute): Started controller-2
* Clone Set: compute-unfence-trigger-clone [compute-unfence-trigger]:
* Started: [ overcloud-novacompute-0 ]
* Stopped: [ controller-0 controller-1 controller-2 overcloud-novacompute-1 ]
* nova-evacuate (ocf:openstack:NovaEvacuate): Started controller-0
* stonith-fence_ipmilan-5254008be2cc (stonith:fence_ipmilan): Started controller-1
* stonith-fence_ipmilan-525400803f9e (stonith:fence_ipmilan): Started controller-0
* stonith-fence_ipmilan-525400fca120 (stonith:fence_ipmilan): Started controller-2
* stonith-fence_ipmilan-525400953d48 (stonith:fence_ipmilan): Started controller-2
* stonith-fence_ipmilan-525400b02b86 (stonith:fence_ipmilan): Started controller-1
* Container bundle: openstack-cinder-volume [192.168.24.1:8787/rhosp13/openstack-cinder-volume:pcmklatest]:
* openstack-cinder-volume-docker-0 (ocf:heartbeat:docker): Started controller-0
diff --git a/cts/scheduler/summary/priority-fencing-delay.summary b/cts/scheduler/summary/priority-fencing-delay.summary
index 1fc4246fc1..ce5aff2562 100644
--- a/cts/scheduler/summary/priority-fencing-delay.summary
+++ b/cts/scheduler/summary/priority-fencing-delay.summary
@@ -1,104 +1,104 @@
Current cluster status:
* Node List:
* Node kiff-01: UNCLEAN (offline)
* Online: [ kiff-02 ]
- * GuestOnline: [ lxc-01_kiff-02@kiff-02 lxc-02_kiff-02@kiff-02 ]
+ * GuestOnline: [ lxc-01_kiff-02 lxc-02_kiff-02 ]
* Full List of Resources:
* vm-fs (ocf:heartbeat:Filesystem): FAILED lxc-01_kiff-01
* R-lxc-01_kiff-02 (ocf:heartbeat:VirtualDomain): Started kiff-02
* fence-kiff-01 (stonith:fence_ipmilan): Started kiff-02
* fence-kiff-02 (stonith:fence_ipmilan): Started kiff-01 (UNCLEAN)
* Clone Set: dlm-clone [dlm]:
* dlm (ocf:pacemaker:controld): Started kiff-01 (UNCLEAN)
* Started: [ kiff-02 ]
* Stopped: [ lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ]
* Clone Set: clvmd-clone [clvmd]:
* clvmd (ocf:heartbeat:clvm): Started kiff-01 (UNCLEAN)
* Started: [ kiff-02 ]
* Stopped: [ lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ]
* Clone Set: shared0-clone [shared0]:
* shared0 (ocf:heartbeat:Filesystem): Started kiff-01 (UNCLEAN)
* Started: [ kiff-02 ]
* Stopped: [ lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ]
* R-lxc-01_kiff-01 (ocf:heartbeat:VirtualDomain): FAILED kiff-01 (UNCLEAN)
* R-lxc-02_kiff-01 (ocf:heartbeat:VirtualDomain): Started kiff-01 (UNCLEAN)
* R-lxc-02_kiff-02 (ocf:heartbeat:VirtualDomain): Started kiff-02
Transition Summary:
* Fence (reboot) lxc-02_kiff-01 (resource: R-lxc-02_kiff-01) 'guest is unclean'
* Fence (reboot) lxc-01_kiff-01 (resource: R-lxc-01_kiff-01) 'guest is unclean'
* Fence (reboot) kiff-01 'peer is no longer part of the cluster'
* Recover vm-fs ( lxc-01_kiff-01 )
* Move fence-kiff-02 ( kiff-01 -> kiff-02 )
* Stop dlm:0 ( kiff-01 ) due to node availability
* Stop clvmd:0 ( kiff-01 ) due to node availability
* Stop shared0:0 ( kiff-01 ) due to node availability
* Recover R-lxc-01_kiff-01 ( kiff-01 -> kiff-02 )
* Move R-lxc-02_kiff-01 ( kiff-01 -> kiff-02 )
* Move lxc-01_kiff-01 ( kiff-01 -> kiff-02 )
* Move lxc-02_kiff-01 ( kiff-01 -> kiff-02 )
Executing Cluster Transition:
* Resource action: vm-fs monitor on lxc-02_kiff-02
* Resource action: vm-fs monitor on lxc-01_kiff-02
* Pseudo action: fence-kiff-02_stop_0
* Resource action: dlm monitor on lxc-02_kiff-02
* Resource action: dlm monitor on lxc-01_kiff-02
* Resource action: clvmd monitor on lxc-02_kiff-02
* Resource action: clvmd monitor on lxc-01_kiff-02
* Resource action: shared0 monitor on lxc-02_kiff-02
* Resource action: shared0 monitor on lxc-01_kiff-02
* Pseudo action: lxc-01_kiff-01_stop_0
* Pseudo action: lxc-02_kiff-01_stop_0
* Fencing kiff-01 (reboot)
* Pseudo action: R-lxc-01_kiff-01_stop_0
* Pseudo action: R-lxc-02_kiff-01_stop_0
* Pseudo action: stonith-lxc-02_kiff-01-reboot on lxc-02_kiff-01
* Pseudo action: stonith-lxc-01_kiff-01-reboot on lxc-01_kiff-01
* Pseudo action: vm-fs_stop_0
* Resource action: fence-kiff-02 start on kiff-02
* Pseudo action: shared0-clone_stop_0
* Resource action: R-lxc-01_kiff-01 start on kiff-02
* Resource action: R-lxc-02_kiff-01 start on kiff-02
* Resource action: lxc-01_kiff-01 start on kiff-02
* Resource action: lxc-02_kiff-01 start on kiff-02
* Resource action: vm-fs start on lxc-01_kiff-01
* Resource action: fence-kiff-02 monitor=60000 on kiff-02
* Pseudo action: shared0_stop_0
* Pseudo action: shared0-clone_stopped_0
* Resource action: R-lxc-01_kiff-01 monitor=10000 on kiff-02
* Resource action: R-lxc-02_kiff-01 monitor=10000 on kiff-02
* Resource action: lxc-01_kiff-01 monitor=30000 on kiff-02
* Resource action: lxc-02_kiff-01 monitor=30000 on kiff-02
* Resource action: vm-fs monitor=20000 on lxc-01_kiff-01
* Pseudo action: clvmd-clone_stop_0
* Pseudo action: clvmd_stop_0
* Pseudo action: clvmd-clone_stopped_0
* Pseudo action: dlm-clone_stop_0
* Pseudo action: dlm_stop_0
* Pseudo action: dlm-clone_stopped_0
Revised Cluster Status:
* Node List:
* Online: [ kiff-02 ]
* OFFLINE: [ kiff-01 ]
- * GuestOnline: [ lxc-01_kiff-01@kiff-02 lxc-01_kiff-02@kiff-02 lxc-02_kiff-01@kiff-02 lxc-02_kiff-02@kiff-02 ]
+ * GuestOnline: [ lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ]
* Full List of Resources:
* vm-fs (ocf:heartbeat:Filesystem): Started lxc-01_kiff-01
* R-lxc-01_kiff-02 (ocf:heartbeat:VirtualDomain): Started kiff-02
* fence-kiff-01 (stonith:fence_ipmilan): Started kiff-02
* fence-kiff-02 (stonith:fence_ipmilan): Started kiff-02
* Clone Set: dlm-clone [dlm]:
* Started: [ kiff-02 ]
* Stopped: [ kiff-01 lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ]
* Clone Set: clvmd-clone [clvmd]:
* Started: [ kiff-02 ]
* Stopped: [ kiff-01 lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ]
* Clone Set: shared0-clone [shared0]:
* Started: [ kiff-02 ]
* Stopped: [ kiff-01 lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ]
* R-lxc-01_kiff-01 (ocf:heartbeat:VirtualDomain): Started kiff-02
* R-lxc-02_kiff-01 (ocf:heartbeat:VirtualDomain): Started kiff-02
* R-lxc-02_kiff-02 (ocf:heartbeat:VirtualDomain): Started kiff-02
diff --git a/cts/scheduler/summary/remote-connection-shutdown.summary b/cts/scheduler/summary/remote-connection-shutdown.summary
index 513df3a594..b8ea5be046 100644
--- a/cts/scheduler/summary/remote-connection-shutdown.summary
+++ b/cts/scheduler/summary/remote-connection-shutdown.summary
@@ -1,162 +1,162 @@
Using the original execution date of: 2020-11-17 07:03:16Z
Current cluster status:
* Node List:
* Online: [ controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ]
* RemoteOnline: [ compute-0 compute-1 ]
- * GuestOnline: [ galera-bundle-0@database-0 galera-bundle-1@database-1 galera-bundle-2@database-2 ovn-dbs-bundle-0@controller-2 ovn-dbs-bundle-1@controller-0 ovn-dbs-bundle-2@controller-1 rabbitmq-bundle-0@messaging-0 rabbitmq-bundle-1@messaging-1 rabbitmq-bundle-2@messaging-2 redis-bundle-0@controller-2 redis-bundle-1@controller-0 redis-bundle-2@controller-1 ]
+ * GuestOnline: [ galera-bundle-0 galera-bundle-1 galera-bundle-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2 ]
* Full List of Resources:
* compute-0 (ocf:pacemaker:remote): Started controller-0
* compute-1 (ocf:pacemaker:remote): Started controller-1
* Container bundle set: galera-bundle [cluster.common.tag/mariadb:pcmklatest]:
* galera-bundle-0 (ocf:heartbeat:galera): Promoted database-0
* galera-bundle-1 (ocf:heartbeat:galera): Promoted database-1
* galera-bundle-2 (ocf:heartbeat:galera): Promoted database-2
* Container bundle set: rabbitmq-bundle [cluster.common.tag/rabbitmq:pcmklatest]:
* rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Started messaging-0
* rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): Started messaging-1
* rabbitmq-bundle-2 (ocf:heartbeat:rabbitmq-cluster): Started messaging-2
* Container bundle set: redis-bundle [cluster.common.tag/redis:pcmklatest]:
* redis-bundle-0 (ocf:heartbeat:redis): Promoted controller-2
* redis-bundle-1 (ocf:heartbeat:redis): Unpromoted controller-0
* redis-bundle-2 (ocf:heartbeat:redis): Unpromoted controller-1
* ip-192.168.24.150 (ocf:heartbeat:IPaddr2): Started controller-2
* ip-10.0.0.150 (ocf:heartbeat:IPaddr2): Started controller-0
* ip-172.17.1.151 (ocf:heartbeat:IPaddr2): Started controller-1
* ip-172.17.1.150 (ocf:heartbeat:IPaddr2): Started controller-2
* ip-172.17.3.150 (ocf:heartbeat:IPaddr2): Started controller-0
* ip-172.17.4.150 (ocf:heartbeat:IPaddr2): Started controller-1
* Container bundle set: haproxy-bundle [cluster.common.tag/haproxy:pcmklatest]:
* haproxy-bundle-podman-0 (ocf:heartbeat:podman): Started controller-2
* haproxy-bundle-podman-1 (ocf:heartbeat:podman): Started controller-0
* haproxy-bundle-podman-2 (ocf:heartbeat:podman): Started controller-1
* Container bundle set: ovn-dbs-bundle [cluster.common.tag/ovn-northd:pcmklatest]:
* ovn-dbs-bundle-0 (ocf:ovn:ovndb-servers): Promoted controller-2
* ovn-dbs-bundle-1 (ocf:ovn:ovndb-servers): Unpromoted controller-0
* ovn-dbs-bundle-2 (ocf:ovn:ovndb-servers): Unpromoted controller-1
* ip-172.17.1.57 (ocf:heartbeat:IPaddr2): Started controller-2
* stonith-fence_compute-fence-nova (stonith:fence_compute): Stopped
* Clone Set: compute-unfence-trigger-clone [compute-unfence-trigger]:
* Started: [ compute-0 compute-1 ]
* Stopped: [ controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ]
* nova-evacuate (ocf:openstack:NovaEvacuate): Started database-0
* stonith-fence_ipmilan-52540033df9c (stonith:fence_ipmilan): Started database-1
* stonith-fence_ipmilan-5254001f5f3c (stonith:fence_ipmilan): Started database-2
* stonith-fence_ipmilan-5254003f88b4 (stonith:fence_ipmilan): Started messaging-0
* stonith-fence_ipmilan-5254007b7920 (stonith:fence_ipmilan): Started messaging-1
* stonith-fence_ipmilan-525400642894 (stonith:fence_ipmilan): Started messaging-2
* stonith-fence_ipmilan-525400d5382b (stonith:fence_ipmilan): Started database-2
* stonith-fence_ipmilan-525400bb150b (stonith:fence_ipmilan): Started messaging-0
* stonith-fence_ipmilan-525400ffc780 (stonith:fence_ipmilan): Started messaging-2
* stonith-fence_ipmilan-5254009cb549 (stonith:fence_ipmilan): Started database-0
* stonith-fence_ipmilan-525400e10267 (stonith:fence_ipmilan): Started messaging-1
* stonith-fence_ipmilan-525400dc0f81 (stonith:fence_ipmilan): Started database-1
* Container bundle: openstack-cinder-volume [cluster.common.tag/cinder-volume:pcmklatest]:
* openstack-cinder-volume-podman-0 (ocf:heartbeat:podman): Started controller-0
Transition Summary:
- * Stop compute-0 ( controller-0 ) due to node availability
- * Start stonith-fence_compute-fence-nova ( database-0 )
- * Stop compute-unfence-trigger:0 ( compute-0 ) due to node availability
- * Move nova-evacuate ( database-0 -> database-1 )
- * Move stonith-fence_ipmilan-52540033df9c ( database-1 -> database-2 )
- * Move stonith-fence_ipmilan-5254001f5f3c ( database-2 -> messaging-0 )
- * Move stonith-fence_ipmilan-5254003f88b4 ( messaging-0 -> messaging-1 )
- * Move stonith-fence_ipmilan-5254007b7920 ( messaging-1 -> messaging-2 )
- * Move stonith-fence_ipmilan-525400ffc780 ( messaging-2 -> database-0 )
- * Move stonith-fence_ipmilan-5254009cb549 ( database-0 -> database-1 )
+ * Stop compute-0 ( controller-0 ) due to node availability
+ * Start stonith-fence_compute-fence-nova ( database-0 )
+ * Stop compute-unfence-trigger:0 ( compute-0 ) due to node availability
+ * Move nova-evacuate ( database-0 -> database-1 )
+ * Move stonith-fence_ipmilan-52540033df9c ( database-1 -> database-2 )
+ * Move stonith-fence_ipmilan-5254001f5f3c ( database-2 -> messaging-0 )
+ * Move stonith-fence_ipmilan-5254003f88b4 ( messaging-0 -> messaging-1 )
+ * Move stonith-fence_ipmilan-5254007b7920 ( messaging-1 -> messaging-2 )
+ * Move stonith-fence_ipmilan-525400ffc780 ( messaging-2 -> database-0 )
+ * Move stonith-fence_ipmilan-5254009cb549 ( database-0 -> database-1 )
Executing Cluster Transition:
- * Resource action: stonith-fence_compute-fence-nova start on database-0
- * Cluster action: clear_failcount for stonith-fence_compute-fence-nova on messaging-2
- * Cluster action: clear_failcount for stonith-fence_compute-fence-nova on messaging-0
- * Cluster action: clear_failcount for stonith-fence_compute-fence-nova on messaging-1
- * Cluster action: clear_failcount for stonith-fence_compute-fence-nova on controller-2
- * Cluster action: clear_failcount for stonith-fence_compute-fence-nova on controller-1
- * Cluster action: clear_failcount for stonith-fence_compute-fence-nova on controller-0
- * Cluster action: clear_failcount for stonith-fence_compute-fence-nova on database-2
- * Cluster action: clear_failcount for stonith-fence_compute-fence-nova on database-1
- * Cluster action: clear_failcount for stonith-fence_compute-fence-nova on database-0
- * Pseudo action: compute-unfence-trigger-clone_stop_0
- * Resource action: nova-evacuate stop on database-0
- * Resource action: stonith-fence_ipmilan-52540033df9c stop on database-1
- * Resource action: stonith-fence_ipmilan-5254001f5f3c stop on database-2
- * Resource action: stonith-fence_ipmilan-5254003f88b4 stop on messaging-0
- * Resource action: stonith-fence_ipmilan-5254007b7920 stop on messaging-1
- * Resource action: stonith-fence_ipmilan-525400ffc780 stop on messaging-2
- * Resource action: stonith-fence_ipmilan-5254009cb549 stop on database-0
- * Resource action: stonith-fence_compute-fence-nova monitor=60000 on database-0
- * Resource action: compute-unfence-trigger stop on compute-0
- * Pseudo action: compute-unfence-trigger-clone_stopped_0
- * Resource action: nova-evacuate start on database-1
- * Resource action: stonith-fence_ipmilan-52540033df9c start on database-2
- * Resource action: stonith-fence_ipmilan-5254001f5f3c start on messaging-0
- * Resource action: stonith-fence_ipmilan-5254003f88b4 start on messaging-1
- * Resource action: stonith-fence_ipmilan-5254007b7920 start on messaging-2
- * Resource action: stonith-fence_ipmilan-525400ffc780 start on database-0
- * Resource action: stonith-fence_ipmilan-5254009cb549 start on database-1
- * Resource action: compute-0 stop on controller-0
- * Resource action: nova-evacuate monitor=10000 on database-1
- * Resource action: stonith-fence_ipmilan-52540033df9c monitor=60000 on database-2
- * Resource action: stonith-fence_ipmilan-5254001f5f3c monitor=60000 on messaging-0
- * Resource action: stonith-fence_ipmilan-5254003f88b4 monitor=60000 on messaging-1
- * Resource action: stonith-fence_ipmilan-5254007b7920 monitor=60000 on messaging-2
- * Resource action: stonith-fence_ipmilan-525400ffc780 monitor=60000 on database-0
- * Resource action: stonith-fence_ipmilan-5254009cb549 monitor=60000 on database-1
+ * Resource action: stonith-fence_compute-fence-nova start on database-0
+ * Cluster action: clear_failcount for stonith-fence_compute-fence-nova on messaging-2
+ * Cluster action: clear_failcount for stonith-fence_compute-fence-nova on messaging-0
+ * Cluster action: clear_failcount for stonith-fence_compute-fence-nova on messaging-1
+ * Cluster action: clear_failcount for stonith-fence_compute-fence-nova on controller-2
+ * Cluster action: clear_failcount for stonith-fence_compute-fence-nova on controller-1
+ * Cluster action: clear_failcount for stonith-fence_compute-fence-nova on controller-0
+ * Cluster action: clear_failcount for stonith-fence_compute-fence-nova on database-2
+ * Cluster action: clear_failcount for stonith-fence_compute-fence-nova on database-1
+ * Cluster action: clear_failcount for stonith-fence_compute-fence-nova on database-0
+ * Pseudo action: compute-unfence-trigger-clone_stop_0
+ * Resource action: nova-evacuate stop on database-0
+ * Resource action: stonith-fence_ipmilan-52540033df9c stop on database-1
+ * Resource action: stonith-fence_ipmilan-5254001f5f3c stop on database-2
+ * Resource action: stonith-fence_ipmilan-5254003f88b4 stop on messaging-0
+ * Resource action: stonith-fence_ipmilan-5254007b7920 stop on messaging-1
+ * Resource action: stonith-fence_ipmilan-525400ffc780 stop on messaging-2
+ * Resource action: stonith-fence_ipmilan-5254009cb549 stop on database-0
+ * Resource action: stonith-fence_compute-fence-nova monitor=60000 on database-0
+ * Resource action: compute-unfence-trigger stop on compute-0
+ * Pseudo action: compute-unfence-trigger-clone_stopped_0
+ * Resource action: nova-evacuate start on database-1
+ * Resource action: stonith-fence_ipmilan-52540033df9c start on database-2
+ * Resource action: stonith-fence_ipmilan-5254001f5f3c start on messaging-0
+ * Resource action: stonith-fence_ipmilan-5254003f88b4 start on messaging-1
+ * Resource action: stonith-fence_ipmilan-5254007b7920 start on messaging-2
+ * Resource action: stonith-fence_ipmilan-525400ffc780 start on database-0
+ * Resource action: stonith-fence_ipmilan-5254009cb549 start on database-1
+ * Resource action: compute-0 stop on controller-0
+ * Resource action: nova-evacuate monitor=10000 on database-1
+ * Resource action: stonith-fence_ipmilan-52540033df9c monitor=60000 on database-2
+ * Resource action: stonith-fence_ipmilan-5254001f5f3c monitor=60000 on messaging-0
+ * Resource action: stonith-fence_ipmilan-5254003f88b4 monitor=60000 on messaging-1
+ * Resource action: stonith-fence_ipmilan-5254007b7920 monitor=60000 on messaging-2
+ * Resource action: stonith-fence_ipmilan-525400ffc780 monitor=60000 on database-0
+ * Resource action: stonith-fence_ipmilan-5254009cb549 monitor=60000 on database-1
Using the original execution date of: 2020-11-17 07:03:16Z
Revised Cluster Status:
* Node List:
* Online: [ controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ]
* RemoteOnline: [ compute-1 ]
* RemoteOFFLINE: [ compute-0 ]
- * GuestOnline: [ galera-bundle-0@database-0 galera-bundle-1@database-1 galera-bundle-2@database-2 ovn-dbs-bundle-0@controller-2 ovn-dbs-bundle-1@controller-0 ovn-dbs-bundle-2@controller-1 rabbitmq-bundle-0@messaging-0 rabbitmq-bundle-1@messaging-1 rabbitmq-bundle-2@messaging-2 redis-bundle-0@controller-2 redis-bundle-1@controller-0 redis-bundle-2@controller-1 ]
+ * GuestOnline: [ galera-bundle-0 galera-bundle-1 galera-bundle-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2 ]
* Full List of Resources:
* compute-0 (ocf:pacemaker:remote): Stopped
* compute-1 (ocf:pacemaker:remote): Started controller-1
* Container bundle set: galera-bundle [cluster.common.tag/mariadb:pcmklatest]:
* galera-bundle-0 (ocf:heartbeat:galera): Promoted database-0
* galera-bundle-1 (ocf:heartbeat:galera): Promoted database-1
* galera-bundle-2 (ocf:heartbeat:galera): Promoted database-2
* Container bundle set: rabbitmq-bundle [cluster.common.tag/rabbitmq:pcmklatest]:
* rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Started messaging-0
* rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): Started messaging-1
* rabbitmq-bundle-2 (ocf:heartbeat:rabbitmq-cluster): Started messaging-2
* Container bundle set: redis-bundle [cluster.common.tag/redis:pcmklatest]:
* redis-bundle-0 (ocf:heartbeat:redis): Promoted controller-2
* redis-bundle-1 (ocf:heartbeat:redis): Unpromoted controller-0
* redis-bundle-2 (ocf:heartbeat:redis): Unpromoted controller-1
* ip-192.168.24.150 (ocf:heartbeat:IPaddr2): Started controller-2
* ip-10.0.0.150 (ocf:heartbeat:IPaddr2): Started controller-0
* ip-172.17.1.151 (ocf:heartbeat:IPaddr2): Started controller-1
* ip-172.17.1.150 (ocf:heartbeat:IPaddr2): Started controller-2
* ip-172.17.3.150 (ocf:heartbeat:IPaddr2): Started controller-0
* ip-172.17.4.150 (ocf:heartbeat:IPaddr2): Started controller-1
* Container bundle set: haproxy-bundle [cluster.common.tag/haproxy:pcmklatest]:
* haproxy-bundle-podman-0 (ocf:heartbeat:podman): Started controller-2
* haproxy-bundle-podman-1 (ocf:heartbeat:podman): Started controller-0
* haproxy-bundle-podman-2 (ocf:heartbeat:podman): Started controller-1
* Container bundle set: ovn-dbs-bundle [cluster.common.tag/ovn-northd:pcmklatest]:
* ovn-dbs-bundle-0 (ocf:ovn:ovndb-servers): Promoted controller-2
* ovn-dbs-bundle-1 (ocf:ovn:ovndb-servers): Unpromoted controller-0
* ovn-dbs-bundle-2 (ocf:ovn:ovndb-servers): Unpromoted controller-1
* ip-172.17.1.57 (ocf:heartbeat:IPaddr2): Started controller-2
* stonith-fence_compute-fence-nova (stonith:fence_compute): Started database-0
* Clone Set: compute-unfence-trigger-clone [compute-unfence-trigger]:
* Started: [ compute-1 ]
* Stopped: [ compute-0 controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ]
* nova-evacuate (ocf:openstack:NovaEvacuate): Started database-1
* stonith-fence_ipmilan-52540033df9c (stonith:fence_ipmilan): Started database-2
* stonith-fence_ipmilan-5254001f5f3c (stonith:fence_ipmilan): Started messaging-0
* stonith-fence_ipmilan-5254003f88b4 (stonith:fence_ipmilan): Started messaging-1
* stonith-fence_ipmilan-5254007b7920 (stonith:fence_ipmilan): Started messaging-2
* stonith-fence_ipmilan-525400642894 (stonith:fence_ipmilan): Started messaging-2
* stonith-fence_ipmilan-525400d5382b (stonith:fence_ipmilan): Started database-2
* stonith-fence_ipmilan-525400bb150b (stonith:fence_ipmilan): Started messaging-0
* stonith-fence_ipmilan-525400ffc780 (stonith:fence_ipmilan): Started database-0
* stonith-fence_ipmilan-5254009cb549 (stonith:fence_ipmilan): Started database-1
* stonith-fence_ipmilan-525400e10267 (stonith:fence_ipmilan): Started messaging-1
* stonith-fence_ipmilan-525400dc0f81 (stonith:fence_ipmilan): Started database-1
* Container bundle: openstack-cinder-volume [cluster.common.tag/cinder-volume:pcmklatest]:
* openstack-cinder-volume-podman-0 (ocf:heartbeat:podman): Started controller-0
diff --git a/cts/scheduler/summary/remote-fence-unclean-3.summary b/cts/scheduler/summary/remote-fence-unclean-3.summary
index 296ae937b7..af916ed3e5 100644
--- a/cts/scheduler/summary/remote-fence-unclean-3.summary
+++ b/cts/scheduler/summary/remote-fence-unclean-3.summary
@@ -1,103 +1,103 @@
Current cluster status:
* Node List:
* Online: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* RemoteOFFLINE: [ overcloud-novacompute-0 ]
- * GuestOnline: [ galera-bundle-0@overcloud-controller-0 galera-bundle-1@overcloud-controller-1 galera-bundle-2@overcloud-controller-2 rabbitmq-bundle-0@overcloud-controller-0 rabbitmq-bundle-1@overcloud-controller-1 rabbitmq-bundle-2@overcloud-controller-2 redis-bundle-0@overcloud-controller-0 redis-bundle-1@overcloud-controller-1 redis-bundle-2@overcloud-controller-2 ]
+ * GuestOnline: [ galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2 ]
* Full List of Resources:
* fence1 (stonith:fence_xvm): Stopped
* overcloud-novacompute-0 (ocf:pacemaker:remote): FAILED overcloud-controller-0
* Container bundle set: rabbitmq-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-rabbitmq:latest]:
* rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Started overcloud-controller-0
* rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): Started overcloud-controller-1
* rabbitmq-bundle-2 (ocf:heartbeat:rabbitmq-cluster): Started overcloud-controller-2
* Container bundle set: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest]:
* galera-bundle-0 (ocf:heartbeat:galera): Promoted overcloud-controller-0
* galera-bundle-1 (ocf:heartbeat:galera): Promoted overcloud-controller-1
* galera-bundle-2 (ocf:heartbeat:galera): Promoted overcloud-controller-2
* Container bundle set: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest]:
* redis-bundle-0 (ocf:heartbeat:redis): Promoted overcloud-controller-0
* redis-bundle-1 (ocf:heartbeat:redis): Unpromoted overcloud-controller-1
* redis-bundle-2 (ocf:heartbeat:redis): Unpromoted overcloud-controller-2
* ip-192.168.24.9 (ocf:heartbeat:IPaddr2): Started overcloud-controller-0
* ip-10.0.0.7 (ocf:heartbeat:IPaddr2): Started overcloud-controller-1
* ip-172.16.2.4 (ocf:heartbeat:IPaddr2): Started overcloud-controller-2
* ip-172.16.2.8 (ocf:heartbeat:IPaddr2): Started overcloud-controller-0
* ip-172.16.1.9 (ocf:heartbeat:IPaddr2): Started overcloud-controller-1
* ip-172.16.3.9 (ocf:heartbeat:IPaddr2): Started overcloud-controller-2
* Container bundle set: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest]:
* haproxy-bundle-docker-0 (ocf:heartbeat:docker): Started overcloud-controller-0
* haproxy-bundle-docker-1 (ocf:heartbeat:docker): Started overcloud-controller-1
* haproxy-bundle-docker-2 (ocf:heartbeat:docker): Started overcloud-controller-2
* Container bundle: openstack-cinder-volume [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-volume:latest]:
* openstack-cinder-volume-docker-0 (ocf:heartbeat:docker): Started overcloud-controller-0
* Container bundle: openstack-cinder-backup [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-backup:latest]:
* openstack-cinder-backup-docker-0 (ocf:heartbeat:docker): Started overcloud-controller-1
Transition Summary:
* Fence (reboot) overcloud-novacompute-0 'the connection is unrecoverable'
* Start fence1 ( overcloud-controller-0 )
* Stop overcloud-novacompute-0 ( overcloud-controller-0 ) due to node availability
Executing Cluster Transition:
* Resource action: fence1 monitor on overcloud-controller-2
* Resource action: fence1 monitor on overcloud-controller-1
* Resource action: fence1 monitor on overcloud-controller-0
* Resource action: overcloud-novacompute-0 stop on overcloud-controller-0
* Resource action: rabbitmq-bundle-0 monitor on overcloud-controller-2
* Resource action: rabbitmq-bundle-0 monitor on overcloud-controller-1
* Resource action: rabbitmq-bundle-1 monitor on overcloud-controller-2
* Resource action: rabbitmq-bundle-1 monitor on overcloud-controller-0
* Resource action: rabbitmq-bundle-2 monitor on overcloud-controller-1
* Resource action: rabbitmq-bundle-2 monitor on overcloud-controller-0
* Resource action: galera-bundle-0 monitor on overcloud-controller-2
* Resource action: galera-bundle-0 monitor on overcloud-controller-1
* Resource action: galera-bundle-1 monitor on overcloud-controller-2
* Resource action: galera-bundle-1 monitor on overcloud-controller-0
* Resource action: galera-bundle-2 monitor on overcloud-controller-1
* Resource action: galera-bundle-2 monitor on overcloud-controller-0
* Resource action: redis-bundle-0 monitor on overcloud-controller-2
* Resource action: redis-bundle-0 monitor on overcloud-controller-1
* Resource action: redis-bundle-1 monitor on overcloud-controller-2
* Resource action: redis-bundle-1 monitor on overcloud-controller-0
* Resource action: redis-bundle-2 monitor on overcloud-controller-1
* Resource action: redis-bundle-2 monitor on overcloud-controller-0
* Fencing overcloud-novacompute-0 (reboot)
* Resource action: fence1 start on overcloud-controller-0
* Resource action: fence1 monitor=60000 on overcloud-controller-0
Revised Cluster Status:
* Node List:
* Online: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* RemoteOFFLINE: [ overcloud-novacompute-0 ]
- * GuestOnline: [ galera-bundle-0@overcloud-controller-0 galera-bundle-1@overcloud-controller-1 galera-bundle-2@overcloud-controller-2 rabbitmq-bundle-0@overcloud-controller-0 rabbitmq-bundle-1@overcloud-controller-1 rabbitmq-bundle-2@overcloud-controller-2 redis-bundle-0@overcloud-controller-0 redis-bundle-1@overcloud-controller-1 redis-bundle-2@overcloud-controller-2 ]
+ * GuestOnline: [ galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2 ]
* Full List of Resources:
* fence1 (stonith:fence_xvm): Started overcloud-controller-0
* overcloud-novacompute-0 (ocf:pacemaker:remote): Stopped
* Container bundle set: rabbitmq-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-rabbitmq:latest]:
* rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Started overcloud-controller-0
* rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): Started overcloud-controller-1
* rabbitmq-bundle-2 (ocf:heartbeat:rabbitmq-cluster): Started overcloud-controller-2
* Container bundle set: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest]:
* galera-bundle-0 (ocf:heartbeat:galera): Promoted overcloud-controller-0
* galera-bundle-1 (ocf:heartbeat:galera): Promoted overcloud-controller-1
* galera-bundle-2 (ocf:heartbeat:galera): Promoted overcloud-controller-2
* Container bundle set: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest]:
* redis-bundle-0 (ocf:heartbeat:redis): Promoted overcloud-controller-0
* redis-bundle-1 (ocf:heartbeat:redis): Unpromoted overcloud-controller-1
* redis-bundle-2 (ocf:heartbeat:redis): Unpromoted overcloud-controller-2
* ip-192.168.24.9 (ocf:heartbeat:IPaddr2): Started overcloud-controller-0
* ip-10.0.0.7 (ocf:heartbeat:IPaddr2): Started overcloud-controller-1
* ip-172.16.2.4 (ocf:heartbeat:IPaddr2): Started overcloud-controller-2
* ip-172.16.2.8 (ocf:heartbeat:IPaddr2): Started overcloud-controller-0
* ip-172.16.1.9 (ocf:heartbeat:IPaddr2): Started overcloud-controller-1
* ip-172.16.3.9 (ocf:heartbeat:IPaddr2): Started overcloud-controller-2
* Container bundle set: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest]:
* haproxy-bundle-docker-0 (ocf:heartbeat:docker): Started overcloud-controller-0
* haproxy-bundle-docker-1 (ocf:heartbeat:docker): Started overcloud-controller-1
* haproxy-bundle-docker-2 (ocf:heartbeat:docker): Started overcloud-controller-2
* Container bundle: openstack-cinder-volume [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-volume:latest]:
* openstack-cinder-volume-docker-0 (ocf:heartbeat:docker): Started overcloud-controller-0
* Container bundle: openstack-cinder-backup [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-backup:latest]:
* openstack-cinder-backup-docker-0 (ocf:heartbeat:docker): Started overcloud-controller-1
diff --git a/cts/scheduler/summary/route-remote-notify.summary b/cts/scheduler/summary/route-remote-notify.summary
index 4f8969d8e6..fb55346f35 100644
--- a/cts/scheduler/summary/route-remote-notify.summary
+++ b/cts/scheduler/summary/route-remote-notify.summary
@@ -1,98 +1,98 @@
Using the original execution date of: 2018-10-31 11:51:32Z
Current cluster status:
* Node List:
* Online: [ controller-0 controller-1 controller-2 ]
- * GuestOnline: [ rabbitmq-bundle-0@controller-0 rabbitmq-bundle-1@controller-1 rabbitmq-bundle-2@controller-2 ]
+ * GuestOnline: [ rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 ]
* Full List of Resources:
* Container bundle set: rabbitmq-bundle [192.168.24.1:8787/rhosp13/openstack-rabbitmq:pcmklatest]:
* rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Started controller-0
* rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): Started controller-1
* rabbitmq-bundle-2 (ocf:heartbeat:rabbitmq-cluster): Started controller-2
* ip-192.168.24.12 (ocf:heartbeat:IPaddr2): Started controller-0
* ip-10.0.0.101 (ocf:heartbeat:IPaddr2): Started controller-1
* ip-172.17.1.20 (ocf:heartbeat:IPaddr2): Started controller-2
* ip-172.17.1.11 (ocf:heartbeat:IPaddr2): Started controller-0
* ip-172.17.3.16 (ocf:heartbeat:IPaddr2): Started controller-1
* ip-172.17.4.15 (ocf:heartbeat:IPaddr2): Started controller-2
* Container bundle set: haproxy-bundle [192.168.24.1:8787/rhosp13/openstack-haproxy:pcmklatest]:
* haproxy-bundle-docker-0 (ocf:heartbeat:docker): Started controller-0
* haproxy-bundle-docker-1 (ocf:heartbeat:docker): Started controller-1
* haproxy-bundle-docker-2 (ocf:heartbeat:docker): Started controller-2
* Container bundle: openstack-cinder-volume [192.168.24.1:8787/rhosp13/openstack-cinder-volume:pcmklatest]:
* openstack-cinder-volume-docker-0 (ocf:heartbeat:docker): Started controller-0
Transition Summary:
* Stop rabbitmq-bundle-docker-0 ( controller-0 ) due to node availability
* Stop rabbitmq-bundle-0 ( controller-0 ) due to unrunnable rabbitmq-bundle-docker-0 start
* Stop rabbitmq:0 ( rabbitmq-bundle-0 ) due to unrunnable rabbitmq-bundle-docker-0 start
* Move ip-192.168.24.12 ( controller-0 -> controller-2 )
* Move ip-172.17.1.11 ( controller-0 -> controller-1 )
* Stop haproxy-bundle-docker-0 ( controller-0 ) due to node availability
* Move openstack-cinder-volume-docker-0 ( controller-0 -> controller-2 )
Executing Cluster Transition:
* Pseudo action: rabbitmq-bundle-clone_pre_notify_stop_0
* Pseudo action: openstack-cinder-volume_stop_0
* Pseudo action: openstack-cinder-volume_start_0
* Pseudo action: haproxy-bundle_stop_0
* Pseudo action: rabbitmq-bundle_stop_0
* Resource action: rabbitmq notify on rabbitmq-bundle-0
* Resource action: rabbitmq notify on rabbitmq-bundle-1
* Resource action: rabbitmq notify on rabbitmq-bundle-2
* Pseudo action: rabbitmq-bundle-clone_confirmed-pre_notify_stop_0
* Pseudo action: rabbitmq-bundle-clone_stop_0
* Resource action: haproxy-bundle-docker-0 stop on controller-0
* Resource action: openstack-cinder-volume-docker-0 stop on controller-0
* Pseudo action: openstack-cinder-volume_stopped_0
* Pseudo action: haproxy-bundle_stopped_0
* Resource action: rabbitmq stop on rabbitmq-bundle-0
* Pseudo action: rabbitmq-bundle-clone_stopped_0
* Resource action: rabbitmq-bundle-0 stop on controller-0
* Resource action: ip-192.168.24.12 stop on controller-0
* Resource action: ip-172.17.1.11 stop on controller-0
* Resource action: openstack-cinder-volume-docker-0 start on controller-2
* Pseudo action: openstack-cinder-volume_running_0
* Pseudo action: rabbitmq-bundle-clone_post_notify_stopped_0
* Resource action: rabbitmq-bundle-docker-0 stop on controller-0
* Resource action: ip-192.168.24.12 start on controller-2
* Resource action: ip-172.17.1.11 start on controller-1
* Resource action: openstack-cinder-volume-docker-0 monitor=60000 on controller-2
* Cluster action: do_shutdown on controller-0
* Resource action: rabbitmq notify on rabbitmq-bundle-1
* Resource action: rabbitmq notify on rabbitmq-bundle-2
* Pseudo action: rabbitmq-bundle-clone_confirmed-post_notify_stopped_0
* Pseudo action: rabbitmq-bundle-clone_pre_notify_start_0
* Resource action: ip-192.168.24.12 monitor=10000 on controller-2
* Resource action: ip-172.17.1.11 monitor=10000 on controller-1
* Pseudo action: rabbitmq-bundle_stopped_0
* Pseudo action: rabbitmq-bundle-clone_confirmed-pre_notify_start_0
* Pseudo action: rabbitmq-bundle-clone_start_0
* Pseudo action: rabbitmq-bundle-clone_running_0
* Pseudo action: rabbitmq-bundle-clone_post_notify_running_0
* Pseudo action: rabbitmq-bundle-clone_confirmed-post_notify_running_0
* Pseudo action: rabbitmq-bundle_running_0
Using the original execution date of: 2018-10-31 11:51:32Z
Revised Cluster Status:
* Node List:
* Online: [ controller-0 controller-1 controller-2 ]
- * GuestOnline: [ rabbitmq-bundle-1@controller-1 rabbitmq-bundle-2@controller-2 ]
+ * GuestOnline: [ rabbitmq-bundle-1 rabbitmq-bundle-2 ]
* Full List of Resources:
* Container bundle set: rabbitmq-bundle [192.168.24.1:8787/rhosp13/openstack-rabbitmq:pcmklatest]:
* rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Stopped
* rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): Started controller-1
* rabbitmq-bundle-2 (ocf:heartbeat:rabbitmq-cluster): Started controller-2
* ip-192.168.24.12 (ocf:heartbeat:IPaddr2): Started controller-2
* ip-10.0.0.101 (ocf:heartbeat:IPaddr2): Started controller-1
* ip-172.17.1.20 (ocf:heartbeat:IPaddr2): Started controller-2
* ip-172.17.1.11 (ocf:heartbeat:IPaddr2): Started controller-1
* ip-172.17.3.16 (ocf:heartbeat:IPaddr2): Started controller-1
* ip-172.17.4.15 (ocf:heartbeat:IPaddr2): Started controller-2
* Container bundle set: haproxy-bundle [192.168.24.1:8787/rhosp13/openstack-haproxy:pcmklatest]:
* haproxy-bundle-docker-0 (ocf:heartbeat:docker): Stopped
* haproxy-bundle-docker-1 (ocf:heartbeat:docker): Started controller-1
* haproxy-bundle-docker-2 (ocf:heartbeat:docker): Started controller-2
* Container bundle: openstack-cinder-volume [192.168.24.1:8787/rhosp13/openstack-cinder-volume:pcmklatest]:
* openstack-cinder-volume-docker-0 (ocf:heartbeat:docker): Started controller-2
diff --git a/cts/scheduler/summary/utilization-complex.summary b/cts/scheduler/summary/utilization-complex.summary
index 8b08c8c117..946dd12182 100644
--- a/cts/scheduler/summary/utilization-complex.summary
+++ b/cts/scheduler/summary/utilization-complex.summary
@@ -1,148 +1,148 @@
Using the original execution date of: 2022-01-05 22:04:47Z
Current cluster status:
* Node List:
* Online: [ rhel8-1 rhel8-2 rhel8-3 rhel8-4 rhel8-5 ]
- * GuestOnline: [ httpd-bundle-0@rhel8-2 ]
+ * GuestOnline: [ httpd-bundle-0 ]
* Full List of Resources:
* dummy3 (ocf:pacemaker:Dummy): Started rhel8-1
* dummy5 (ocf:pacemaker:Dummy): Started rhel8-2
* Container bundle set: httpd-bundle [localhost/pcmktest:http]:
* httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started rhel8-2
* httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Stopped
* httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped
* dummy4 (ocf:pacemaker:Dummy): Started rhel8-5
* dummy1 (ocf:pacemaker:Dummy): Started rhel8-1
* dummy2 (ocf:pacemaker:Dummy): Started rhel8-1
* Fencing (stonith:fence_xvm): Started rhel8-3
* FencingPass (stonith:fence_dummy): Started rhel8-4
* FencingFail (stonith:fence_dummy): Started rhel8-5
* Resource Group: g1:
* g1m1 (ocf:pacemaker:Dummy): Started rhel8-5
* g1m2 (ocf:pacemaker:Dummy): Started rhel8-5
* g1m3 (ocf:pacemaker:Dummy): Started rhel8-5
* Clone Set: clone1-clone [clone1]:
* Started: [ rhel8-1 rhel8-2 rhel8-3 rhel8-4 rhel8-5 ]
* Stopped: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ]
* Clone Set: clone2-clone [clone2]:
* Started: [ rhel8-1 rhel8-2 rhel8-3 rhel8-4 rhel8-5 ]
* Stopped: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ]
Transition Summary:
* Stop dummy3 ( rhel8-1 ) due to node availability
* Move dummy5 ( rhel8-2 -> rhel8-5 )
* Move httpd-bundle-ip-192.168.122.131 ( rhel8-2 -> rhel8-5 )
* Move httpd-bundle-podman-0 ( rhel8-2 -> rhel8-5 )
* Move httpd-bundle-0 ( rhel8-2 -> rhel8-5 )
* Restart httpd:0 ( httpd-bundle-0 ) due to required httpd-bundle-podman-0 start
* Start httpd-bundle-1 ( rhel8-1 ) due to unrunnable httpd-bundle-podman-1 start (blocked)
* Start httpd:1 ( httpd-bundle-1 ) due to unrunnable httpd-bundle-podman-1 start (blocked)
* Start httpd-bundle-2 ( rhel8-2 ) due to unrunnable httpd-bundle-podman-2 start (blocked)
* Start httpd:2 ( httpd-bundle-2 ) due to unrunnable httpd-bundle-podman-2 start (blocked)
* Move dummy4 ( rhel8-5 -> rhel8-4 )
* Move dummy1 ( rhel8-1 -> rhel8-3 )
* Move dummy2 ( rhel8-1 -> rhel8-3 )
* Move Fencing ( rhel8-3 -> rhel8-1 )
* Move FencingFail ( rhel8-5 -> rhel8-2 )
* Move g1m1 ( rhel8-5 -> rhel8-4 )
* Move g1m2 ( rhel8-5 -> rhel8-4 )
* Move g1m3 ( rhel8-5 -> rhel8-4 )
* Stop clone1:3 ( rhel8-5 ) due to node availability
* Stop clone2:3 ( rhel8-5 ) due to node availability
Executing Cluster Transition:
* Resource action: dummy3 stop on rhel8-1
* Resource action: dummy5 stop on rhel8-2
* Resource action: dummy4 stop on rhel8-5
* Resource action: dummy1 stop on rhel8-1
* Resource action: dummy2 stop on rhel8-1
* Resource action: Fencing stop on rhel8-3
* Resource action: FencingFail stop on rhel8-5
* Pseudo action: g1_stop_0
* Resource action: g1m3 stop on rhel8-5
* Pseudo action: clone1-clone_stop_0
* Pseudo action: clone2-clone_stop_0
* Pseudo action: httpd-bundle_stop_0
* Pseudo action: httpd-bundle_start_0
* Pseudo action: load_stopped_rhel8-4
* Pseudo action: load_stopped_rhel8-3
* Pseudo action: load_stopped_httpd-bundle-2
* Pseudo action: load_stopped_httpd-bundle-1
* Pseudo action: load_stopped_httpd-bundle-0
* Pseudo action: load_stopped_rhel8-1
* Pseudo action: httpd-bundle-clone_stop_0
* Resource action: dummy4 start on rhel8-4
* Resource action: dummy1 start on rhel8-3
* Resource action: dummy2 start on rhel8-3
* Resource action: Fencing start on rhel8-1
* Resource action: FencingFail start on rhel8-2
* Resource action: g1m2 stop on rhel8-5
* Resource action: clone1 stop on rhel8-5
* Pseudo action: clone1-clone_stopped_0
* Resource action: clone2 stop on rhel8-5
* Pseudo action: clone2-clone_stopped_0
* Resource action: httpd stop on httpd-bundle-0
* Pseudo action: httpd-bundle-clone_stopped_0
* Pseudo action: httpd-bundle-clone_start_0
* Resource action: httpd-bundle-0 stop on rhel8-2
* Resource action: dummy4 monitor=10000 on rhel8-4
* Resource action: dummy1 monitor=10000 on rhel8-3
* Resource action: dummy2 monitor=10000 on rhel8-3
* Resource action: Fencing monitor=120000 on rhel8-1
* Resource action: g1m1 stop on rhel8-5
* Pseudo action: load_stopped_rhel8-5
* Resource action: dummy5 start on rhel8-5
* Resource action: httpd-bundle-podman-0 stop on rhel8-2
* Pseudo action: g1_stopped_0
* Pseudo action: g1_start_0
* Resource action: g1m1 start on rhel8-4
* Resource action: g1m2 start on rhel8-4
* Resource action: g1m3 start on rhel8-4
* Pseudo action: httpd-bundle_stopped_0
* Pseudo action: load_stopped_rhel8-2
* Resource action: dummy5 monitor=10000 on rhel8-5
* Resource action: httpd-bundle-ip-192.168.122.131 stop on rhel8-2
* Pseudo action: g1_running_0
* Resource action: g1m1 monitor=10000 on rhel8-4
* Resource action: g1m2 monitor=10000 on rhel8-4
* Resource action: g1m3 monitor=10000 on rhel8-4
* Resource action: httpd-bundle-ip-192.168.122.131 start on rhel8-5
* Resource action: httpd-bundle-podman-0 start on rhel8-5
* Resource action: httpd-bundle-0 start on rhel8-5
* Resource action: httpd start on httpd-bundle-0
* Resource action: httpd monitor=15000 on httpd-bundle-0
* Pseudo action: httpd-bundle-clone_running_0
* Resource action: httpd-bundle-ip-192.168.122.131 monitor=60000 on rhel8-5
* Resource action: httpd-bundle-podman-0 monitor=60000 on rhel8-5
* Resource action: httpd-bundle-0 monitor=30000 on rhel8-5
* Pseudo action: httpd-bundle_running_0
Using the original execution date of: 2022-01-05 22:04:47Z
Revised Cluster Status:
* Node List:
* Online: [ rhel8-1 rhel8-2 rhel8-3 rhel8-4 rhel8-5 ]
- * GuestOnline: [ httpd-bundle-0@rhel8-5 ]
+ * GuestOnline: [ httpd-bundle-0 ]
* Full List of Resources:
* dummy3 (ocf:pacemaker:Dummy): Stopped
* dummy5 (ocf:pacemaker:Dummy): Started rhel8-5
* Container bundle set: httpd-bundle [localhost/pcmktest:http]:
* httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started rhel8-5
* httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Stopped
* httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped
* dummy4 (ocf:pacemaker:Dummy): Started rhel8-4
* dummy1 (ocf:pacemaker:Dummy): Started rhel8-3
* dummy2 (ocf:pacemaker:Dummy): Started rhel8-3
* Fencing (stonith:fence_xvm): Started rhel8-1
* FencingPass (stonith:fence_dummy): Started rhel8-4
* FencingFail (stonith:fence_dummy): Started rhel8-2
* Resource Group: g1:
* g1m1 (ocf:pacemaker:Dummy): Started rhel8-4
* g1m2 (ocf:pacemaker:Dummy): Started rhel8-4
* g1m3 (ocf:pacemaker:Dummy): Started rhel8-4
* Clone Set: clone1-clone [clone1]:
* Started: [ rhel8-1 rhel8-2 rhel8-3 rhel8-4 ]
* Stopped: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 rhel8-5 ]
* Clone Set: clone2-clone [clone2]:
* Started: [ rhel8-1 rhel8-2 rhel8-3 rhel8-4 ]
* Stopped: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 rhel8-5 ]
diff --git a/cts/scheduler/summary/whitebox-asymmetric.summary b/cts/scheduler/summary/whitebox-asymmetric.summary
index 2eb429c6ba..53911391c2 100644
--- a/cts/scheduler/summary/whitebox-asymmetric.summary
+++ b/cts/scheduler/summary/whitebox-asymmetric.summary
@@ -1,42 +1,42 @@
1 of 7 resource instances DISABLED and 0 BLOCKED from further action due to failure
Current cluster status:
* Node List:
* Online: [ 18builder ]
* Full List of Resources:
* fence_false (stonith:fence_false): Stopped
* container2 (ocf:pacemaker:Dummy): Started 18builder
* webserver (ocf:pacemaker:Dummy): Stopped
* nfs_mount (ocf:pacemaker:Dummy): Stopped
* Resource Group: mygroup:
* vg_tags (ocf:heartbeat:LVM): Stopped (disabled)
* vg_tags_dup (ocf:heartbeat:LVM): Stopped
Transition Summary:
* Start nfs_mount ( 18node2 )
* Start 18node2 ( 18builder )
Executing Cluster Transition:
* Resource action: 18node2 start on 18builder
* Resource action: webserver monitor on 18node2
* Resource action: nfs_mount monitor on 18node2
* Resource action: vg_tags monitor on 18node2
* Resource action: vg_tags_dup monitor on 18node2
* Resource action: 18node2 monitor=30000 on 18builder
* Resource action: nfs_mount start on 18node2
* Resource action: nfs_mount monitor=10000 on 18node2
Revised Cluster Status:
* Node List:
* Online: [ 18builder ]
- * GuestOnline: [ 18node2@18builder ]
+ * GuestOnline: [ 18node2 ]
* Full List of Resources:
* fence_false (stonith:fence_false): Stopped
* container2 (ocf:pacemaker:Dummy): Started 18builder
* webserver (ocf:pacemaker:Dummy): Stopped
* nfs_mount (ocf:pacemaker:Dummy): Started 18node2
* Resource Group: mygroup:
* vg_tags (ocf:heartbeat:LVM): Stopped (disabled)
* vg_tags_dup (ocf:heartbeat:LVM): Stopped
diff --git a/cts/scheduler/summary/whitebox-fail1.summary b/cts/scheduler/summary/whitebox-fail1.summary
index 7470c923a7..974f124093 100644
--- a/cts/scheduler/summary/whitebox-fail1.summary
+++ b/cts/scheduler/summary/whitebox-fail1.summary
@@ -1,59 +1,59 @@
Current cluster status:
* Node List:
* Online: [ 18node1 18node2 18node3 ]
- * GuestOnline: [ lxc2@18node2 ]
+ * GuestOnline: [ lxc2 ]
* Full List of Resources:
* container1 (ocf:heartbeat:VirtualDomain): FAILED 18node2
* container2 (ocf:heartbeat:VirtualDomain): Started 18node2
* shoot1 (stonith:fence_xvm): Started 18node3
* Clone Set: M-clone [M]:
* Started: [ 18node1 18node2 18node3 lxc2 ]
* A (ocf:pacemaker:Dummy): Started 18node1
* B (ocf:pacemaker:Dummy): FAILED lxc1
* C (ocf:pacemaker:Dummy): Started lxc2
* D (ocf:pacemaker:Dummy): Started 18node1
Transition Summary:
* Fence (reboot) lxc1 (resource: container1) 'guest is unclean'
* Recover container1 ( 18node2 )
* Recover M:4 ( lxc1 )
* Recover B ( lxc1 )
* Restart lxc1 ( 18node2 ) due to required container1 start
Executing Cluster Transition:
* Resource action: A monitor on lxc2
* Resource action: B monitor on lxc2
* Resource action: D monitor on lxc2
* Resource action: lxc1 stop on 18node2
* Resource action: container1 stop on 18node2
* Pseudo action: stonith-lxc1-reboot on lxc1
* Resource action: container1 start on 18node2
* Pseudo action: M-clone_stop_0
* Pseudo action: B_stop_0
* Resource action: lxc1 start on 18node2
* Resource action: lxc1 monitor=30000 on 18node2
* Pseudo action: M_stop_0
* Pseudo action: M-clone_stopped_0
* Pseudo action: M-clone_start_0
* Resource action: B start on lxc1
* Resource action: M start on lxc1
* Pseudo action: M-clone_running_0
* Resource action: B monitor=10000 on lxc1
* Resource action: M monitor=10000 on lxc1
Revised Cluster Status:
* Node List:
* Online: [ 18node1 18node2 18node3 ]
- * GuestOnline: [ lxc1@18node2 lxc2@18node2 ]
+ * GuestOnline: [ lxc1 lxc2 ]
* Full List of Resources:
* container1 (ocf:heartbeat:VirtualDomain): Started 18node2
* container2 (ocf:heartbeat:VirtualDomain): Started 18node2
* shoot1 (stonith:fence_xvm): Started 18node3
* Clone Set: M-clone [M]:
* Started: [ 18node1 18node2 18node3 lxc1 lxc2 ]
* A (ocf:pacemaker:Dummy): Started 18node1
* B (ocf:pacemaker:Dummy): Started lxc1
* C (ocf:pacemaker:Dummy): Started lxc2
* D (ocf:pacemaker:Dummy): Started 18node1
diff --git a/cts/scheduler/summary/whitebox-fail2.summary b/cts/scheduler/summary/whitebox-fail2.summary
index 4218337f45..73b44f536d 100644
--- a/cts/scheduler/summary/whitebox-fail2.summary
+++ b/cts/scheduler/summary/whitebox-fail2.summary
@@ -1,59 +1,59 @@
Current cluster status:
* Node List:
* Online: [ 18node1 18node2 18node3 ]
- * GuestOnline: [ lxc2@18node2 ]
+ * GuestOnline: [ lxc2 ]
* Full List of Resources:
* container1 (ocf:heartbeat:VirtualDomain): FAILED 18node2
* container2 (ocf:heartbeat:VirtualDomain): Started 18node2
* shoot1 (stonith:fence_xvm): Started 18node3
* Clone Set: M-clone [M]:
* Started: [ 18node1 18node2 18node3 lxc2 ]
* A (ocf:pacemaker:Dummy): Started 18node1
* B (ocf:pacemaker:Dummy): FAILED lxc1
* C (ocf:pacemaker:Dummy): Started lxc2
* D (ocf:pacemaker:Dummy): Started 18node1
Transition Summary:
* Fence (reboot) lxc1 (resource: container1) 'guest is unclean'
* Recover container1 ( 18node2 )
* Recover M:4 ( lxc1 )
* Recover B ( lxc1 )
* Recover lxc1 ( 18node2 )
Executing Cluster Transition:
* Resource action: A monitor on lxc2
* Resource action: B monitor on lxc2
* Resource action: D monitor on lxc2
* Resource action: lxc1 stop on 18node2
* Resource action: container1 stop on 18node2
* Pseudo action: stonith-lxc1-reboot on lxc1
* Resource action: container1 start on 18node2
* Pseudo action: M-clone_stop_0
* Pseudo action: B_stop_0
* Resource action: lxc1 start on 18node2
* Resource action: lxc1 monitor=30000 on 18node2
* Pseudo action: M_stop_0
* Pseudo action: M-clone_stopped_0
* Pseudo action: M-clone_start_0
* Resource action: B start on lxc1
* Resource action: M start on lxc1
* Pseudo action: M-clone_running_0
* Resource action: B monitor=10000 on lxc1
* Resource action: M monitor=10000 on lxc1
Revised Cluster Status:
* Node List:
* Online: [ 18node1 18node2 18node3 ]
- * GuestOnline: [ lxc1@18node2 lxc2@18node2 ]
+ * GuestOnline: [ lxc1 lxc2 ]
* Full List of Resources:
* container1 (ocf:heartbeat:VirtualDomain): Started 18node2
* container2 (ocf:heartbeat:VirtualDomain): Started 18node2
* shoot1 (stonith:fence_xvm): Started 18node3
* Clone Set: M-clone [M]:
* Started: [ 18node1 18node2 18node3 lxc1 lxc2 ]
* A (ocf:pacemaker:Dummy): Started 18node1
* B (ocf:pacemaker:Dummy): Started lxc1
* C (ocf:pacemaker:Dummy): Started lxc2
* D (ocf:pacemaker:Dummy): Started 18node1
diff --git a/cts/scheduler/summary/whitebox-fail3.summary b/cts/scheduler/summary/whitebox-fail3.summary
index 6b38fc2e22..b7de4a7eac 100644
--- a/cts/scheduler/summary/whitebox-fail3.summary
+++ b/cts/scheduler/summary/whitebox-fail3.summary
@@ -1,55 +1,55 @@
Current cluster status:
* Node List:
* Online: [ dvossel-laptop2 ]
* Full List of Resources:
* vm (ocf:heartbeat:VirtualDomain): Stopped
* vm2 (ocf:heartbeat:VirtualDomain): Stopped
* FAKE (ocf:pacemaker:Dummy): Started dvossel-laptop2
* Clone Set: W-master [W] (promotable):
* Promoted: [ dvossel-laptop2 ]
* Stopped: [ 18builder 18node1 ]
* Clone Set: X-master [X] (promotable):
* Promoted: [ dvossel-laptop2 ]
* Stopped: [ 18builder 18node1 ]
Transition Summary:
* Start vm ( dvossel-laptop2 )
* Move FAKE ( dvossel-laptop2 -> 18builder )
* Start W:1 ( 18builder )
* Start X:1 ( 18builder )
* Start 18builder ( dvossel-laptop2 )
Executing Cluster Transition:
* Resource action: vm start on dvossel-laptop2
* Pseudo action: W-master_start_0
* Pseudo action: X-master_start_0
* Resource action: 18builder monitor on dvossel-laptop2
* Resource action: 18builder start on dvossel-laptop2
* Resource action: FAKE stop on dvossel-laptop2
* Resource action: W start on 18builder
* Pseudo action: W-master_running_0
* Resource action: X start on 18builder
* Pseudo action: X-master_running_0
* Resource action: 18builder monitor=30000 on dvossel-laptop2
* Resource action: FAKE start on 18builder
* Resource action: W monitor=10000 on 18builder
* Resource action: X monitor=10000 on 18builder
Revised Cluster Status:
* Node List:
* Online: [ dvossel-laptop2 ]
- * GuestOnline: [ 18builder@dvossel-laptop2 ]
+ * GuestOnline: [ 18builder ]
* Full List of Resources:
* vm (ocf:heartbeat:VirtualDomain): Started dvossel-laptop2
* vm2 (ocf:heartbeat:VirtualDomain): Stopped
* FAKE (ocf:pacemaker:Dummy): Started 18builder
* Clone Set: W-master [W] (promotable):
* Promoted: [ dvossel-laptop2 ]
* Unpromoted: [ 18builder ]
* Stopped: [ 18node1 ]
* Clone Set: X-master [X] (promotable):
* Promoted: [ dvossel-laptop2 ]
* Unpromoted: [ 18builder ]
* Stopped: [ 18node1 ]
diff --git a/cts/scheduler/summary/whitebox-imply-stop-on-fence.summary b/cts/scheduler/summary/whitebox-imply-stop-on-fence.summary
index a27da5d542..78506c5354 100644
--- a/cts/scheduler/summary/whitebox-imply-stop-on-fence.summary
+++ b/cts/scheduler/summary/whitebox-imply-stop-on-fence.summary
@@ -1,104 +1,104 @@
Current cluster status:
* Node List:
* Node kiff-01: UNCLEAN (offline)
* Online: [ kiff-02 ]
- * GuestOnline: [ lxc-01_kiff-02@kiff-02 lxc-02_kiff-02@kiff-02 ]
+ * GuestOnline: [ lxc-01_kiff-02 lxc-02_kiff-02 ]
* Full List of Resources:
* fence-kiff-01 (stonith:fence_ipmilan): Started kiff-02
* fence-kiff-02 (stonith:fence_ipmilan): Started kiff-01 (UNCLEAN)
* Clone Set: dlm-clone [dlm]:
* dlm (ocf:pacemaker:controld): Started kiff-01 (UNCLEAN)
* Started: [ kiff-02 ]
* Stopped: [ lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ]
* Clone Set: clvmd-clone [clvmd]:
* clvmd (ocf:heartbeat:clvm): Started kiff-01 (UNCLEAN)
* Started: [ kiff-02 ]
* Stopped: [ lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ]
* Clone Set: shared0-clone [shared0]:
* shared0 (ocf:heartbeat:Filesystem): Started kiff-01 (UNCLEAN)
* Started: [ kiff-02 ]
* Stopped: [ lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ]
* R-lxc-01_kiff-01 (ocf:heartbeat:VirtualDomain): FAILED kiff-01 (UNCLEAN)
* R-lxc-02_kiff-01 (ocf:heartbeat:VirtualDomain): Started kiff-01 (UNCLEAN)
* R-lxc-01_kiff-02 (ocf:heartbeat:VirtualDomain): Started kiff-02
* R-lxc-02_kiff-02 (ocf:heartbeat:VirtualDomain): Started kiff-02
* vm-fs (ocf:heartbeat:Filesystem): FAILED lxc-01_kiff-01
Transition Summary:
* Fence (reboot) lxc-02_kiff-01 (resource: R-lxc-02_kiff-01) 'guest is unclean'
* Fence (reboot) lxc-01_kiff-01 (resource: R-lxc-01_kiff-01) 'guest is unclean'
* Fence (reboot) kiff-01 'peer is no longer part of the cluster'
* Move fence-kiff-02 ( kiff-01 -> kiff-02 )
* Stop dlm:0 ( kiff-01 ) due to node availability
* Stop clvmd:0 ( kiff-01 ) due to node availability
* Stop shared0:0 ( kiff-01 ) due to node availability
* Recover R-lxc-01_kiff-01 ( kiff-01 -> kiff-02 )
* Move R-lxc-02_kiff-01 ( kiff-01 -> kiff-02 )
* Recover vm-fs ( lxc-01_kiff-01 )
* Move lxc-01_kiff-01 ( kiff-01 -> kiff-02 )
* Move lxc-02_kiff-01 ( kiff-01 -> kiff-02 )
Executing Cluster Transition:
* Pseudo action: fence-kiff-02_stop_0
* Resource action: dlm monitor on lxc-02_kiff-02
* Resource action: dlm monitor on lxc-01_kiff-02
* Resource action: clvmd monitor on lxc-02_kiff-02
* Resource action: clvmd monitor on lxc-01_kiff-02
* Resource action: shared0 monitor on lxc-02_kiff-02
* Resource action: shared0 monitor on lxc-01_kiff-02
* Resource action: vm-fs monitor on lxc-02_kiff-02
* Resource action: vm-fs monitor on lxc-01_kiff-02
* Pseudo action: lxc-01_kiff-01_stop_0
* Pseudo action: lxc-02_kiff-01_stop_0
* Fencing kiff-01 (reboot)
* Pseudo action: R-lxc-01_kiff-01_stop_0
* Pseudo action: R-lxc-02_kiff-01_stop_0
* Pseudo action: stonith-lxc-02_kiff-01-reboot on lxc-02_kiff-01
* Pseudo action: stonith-lxc-01_kiff-01-reboot on lxc-01_kiff-01
* Resource action: fence-kiff-02 start on kiff-02
* Pseudo action: shared0-clone_stop_0
* Resource action: R-lxc-01_kiff-01 start on kiff-02
* Resource action: R-lxc-02_kiff-01 start on kiff-02
* Pseudo action: vm-fs_stop_0
* Resource action: lxc-01_kiff-01 start on kiff-02
* Resource action: lxc-02_kiff-01 start on kiff-02
* Resource action: fence-kiff-02 monitor=60000 on kiff-02
* Pseudo action: shared0_stop_0
* Pseudo action: shared0-clone_stopped_0
* Resource action: R-lxc-01_kiff-01 monitor=10000 on kiff-02
* Resource action: R-lxc-02_kiff-01 monitor=10000 on kiff-02
* Resource action: vm-fs start on lxc-01_kiff-01
* Resource action: lxc-01_kiff-01 monitor=30000 on kiff-02
* Resource action: lxc-02_kiff-01 monitor=30000 on kiff-02
* Pseudo action: clvmd-clone_stop_0
* Resource action: vm-fs monitor=20000 on lxc-01_kiff-01
* Pseudo action: clvmd_stop_0
* Pseudo action: clvmd-clone_stopped_0
* Pseudo action: dlm-clone_stop_0
* Pseudo action: dlm_stop_0
* Pseudo action: dlm-clone_stopped_0
Revised Cluster Status:
* Node List:
* Online: [ kiff-02 ]
* OFFLINE: [ kiff-01 ]
- * GuestOnline: [ lxc-01_kiff-01@kiff-02 lxc-01_kiff-02@kiff-02 lxc-02_kiff-01@kiff-02 lxc-02_kiff-02@kiff-02 ]
+ * GuestOnline: [ lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ]
* Full List of Resources:
* fence-kiff-01 (stonith:fence_ipmilan): Started kiff-02
* fence-kiff-02 (stonith:fence_ipmilan): Started kiff-02
* Clone Set: dlm-clone [dlm]:
* Started: [ kiff-02 ]
* Stopped: [ kiff-01 lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ]
* Clone Set: clvmd-clone [clvmd]:
* Started: [ kiff-02 ]
* Stopped: [ kiff-01 lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ]
* Clone Set: shared0-clone [shared0]:
* Started: [ kiff-02 ]
* Stopped: [ kiff-01 lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ]
* R-lxc-01_kiff-01 (ocf:heartbeat:VirtualDomain): Started kiff-02
* R-lxc-02_kiff-01 (ocf:heartbeat:VirtualDomain): Started kiff-02
* R-lxc-01_kiff-02 (ocf:heartbeat:VirtualDomain): Started kiff-02
* R-lxc-02_kiff-02 (ocf:heartbeat:VirtualDomain): Started kiff-02
* vm-fs (ocf:heartbeat:Filesystem): Started lxc-01_kiff-01
diff --git a/cts/scheduler/summary/whitebox-migrate1.summary b/cts/scheduler/summary/whitebox-migrate1.summary
index 6f18a504a5..f86454827c 100644
--- a/cts/scheduler/summary/whitebox-migrate1.summary
+++ b/cts/scheduler/summary/whitebox-migrate1.summary
@@ -1,56 +1,56 @@
Current cluster status:
* Node List:
* Online: [ rhel7-node2 rhel7-node3 ]
- * GuestOnline: [ rhel7-node1@rhel7-node2 ]
+ * GuestOnline: [ rhel7-node1 ]
* Full List of Resources:
* shooter1 (stonith:fence_xvm): Started rhel7-node3
* FAKE1 (ocf:heartbeat:Dummy): Started rhel7-node1
* FAKE2 (ocf:heartbeat:Dummy): Started rhel7-node1
* FAKE3 (ocf:heartbeat:Dummy): Started rhel7-node3
* FAKE4 (ocf:heartbeat:Dummy): Started rhel7-node3
* FAKE5 (ocf:heartbeat:Dummy): Started rhel7-node2
* FAKE6 (ocf:heartbeat:Dummy): Started rhel7-node1
* FAKE7 (ocf:heartbeat:Dummy): Started rhel7-node3
* remote-rsc (ocf:heartbeat:Dummy): Started rhel7-node2
Transition Summary:
* Move shooter1 ( rhel7-node3 -> rhel7-node2 )
* Move FAKE3 ( rhel7-node3 -> rhel7-node2 )
* Migrate remote-rsc ( rhel7-node2 -> rhel7-node3 )
* Migrate rhel7-node1 ( rhel7-node2 -> rhel7-node3 )
Executing Cluster Transition:
* Resource action: shooter1 stop on rhel7-node3
* Resource action: FAKE3 stop on rhel7-node3
* Resource action: rhel7-node1 monitor on rhel7-node3
* Resource action: shooter1 start on rhel7-node2
* Resource action: FAKE3 start on rhel7-node2
* Resource action: remote-rsc migrate_to on rhel7-node2
* Resource action: shooter1 monitor=60000 on rhel7-node2
* Resource action: FAKE3 monitor=10000 on rhel7-node2
* Resource action: remote-rsc migrate_from on rhel7-node3
* Resource action: rhel7-node1 migrate_to on rhel7-node2
* Resource action: rhel7-node1 migrate_from on rhel7-node3
* Resource action: rhel7-node1 stop on rhel7-node2
* Resource action: remote-rsc stop on rhel7-node2
* Pseudo action: remote-rsc_start_0
* Pseudo action: rhel7-node1_start_0
* Resource action: remote-rsc monitor=10000 on rhel7-node3
* Resource action: rhel7-node1 monitor=30000 on rhel7-node3
Revised Cluster Status:
* Node List:
* Online: [ rhel7-node2 rhel7-node3 ]
- * GuestOnline: [ rhel7-node1@rhel7-node3 ]
+ * GuestOnline: [ rhel7-node1 ]
* Full List of Resources:
* shooter1 (stonith:fence_xvm): Started rhel7-node2
* FAKE1 (ocf:heartbeat:Dummy): Started rhel7-node1
* FAKE2 (ocf:heartbeat:Dummy): Started rhel7-node1
* FAKE3 (ocf:heartbeat:Dummy): Started rhel7-node2
* FAKE4 (ocf:heartbeat:Dummy): Started rhel7-node3
* FAKE5 (ocf:heartbeat:Dummy): Started rhel7-node2
* FAKE6 (ocf:heartbeat:Dummy): Started rhel7-node1
* FAKE7 (ocf:heartbeat:Dummy): Started rhel7-node3
* remote-rsc (ocf:heartbeat:Dummy): Started rhel7-node3
diff --git a/cts/scheduler/summary/whitebox-move.summary b/cts/scheduler/summary/whitebox-move.summary
index 6742f114d0..88846e2c61 100644
--- a/cts/scheduler/summary/whitebox-move.summary
+++ b/cts/scheduler/summary/whitebox-move.summary
@@ -1,49 +1,49 @@
Current cluster status:
* Node List:
* Online: [ 18node1 18node2 18node3 ]
- * GuestOnline: [ lxc1@18node1 lxc2@18node2 ]
+ * GuestOnline: [ lxc1 lxc2 ]
* Full List of Resources:
* container1 (ocf:heartbeat:VirtualDomain): Started 18node1
* container2 (ocf:heartbeat:VirtualDomain): Started 18node2
* shoot1 (stonith:fence_xvm): Started 18node3
* Clone Set: M-clone [M]:
* Started: [ 18node1 18node2 18node3 lxc1 lxc2 ]
* A (ocf:pacemaker:Dummy): Started lxc1
Transition Summary:
* Move container1 ( 18node1 -> 18node2 )
* Restart M:3 ( lxc1 ) due to required container1 start
* Restart A ( lxc1 ) due to required container1 start
* Move lxc1 ( 18node1 -> 18node2 )
Executing Cluster Transition:
* Pseudo action: M-clone_stop_0
* Resource action: A stop on lxc1
* Resource action: A monitor on lxc2
* Resource action: M stop on lxc1
* Pseudo action: M-clone_stopped_0
* Pseudo action: M-clone_start_0
* Resource action: lxc1 stop on 18node1
* Resource action: container1 stop on 18node1
* Resource action: container1 start on 18node2
* Resource action: lxc1 start on 18node2
* Resource action: M start on lxc1
* Resource action: M monitor=10000 on lxc1
* Pseudo action: M-clone_running_0
* Resource action: A start on lxc1
* Resource action: A monitor=10000 on lxc1
* Resource action: lxc1 monitor=30000 on 18node2
Revised Cluster Status:
* Node List:
* Online: [ 18node1 18node2 18node3 ]
- * GuestOnline: [ lxc1@18node2 lxc2@18node2 ]
+ * GuestOnline: [ lxc1 lxc2 ]
* Full List of Resources:
* container1 (ocf:heartbeat:VirtualDomain): Started 18node2
* container2 (ocf:heartbeat:VirtualDomain): Started 18node2
* shoot1 (stonith:fence_xvm): Started 18node3
* Clone Set: M-clone [M]:
* Started: [ 18node1 18node2 18node3 lxc1 lxc2 ]
* A (ocf:pacemaker:Dummy): Started lxc1
diff --git a/cts/scheduler/summary/whitebox-ms-ordering-move.summary b/cts/scheduler/summary/whitebox-ms-ordering-move.summary
index 6a5fb6eaeb..00076986cc 100644
--- a/cts/scheduler/summary/whitebox-ms-ordering-move.summary
+++ b/cts/scheduler/summary/whitebox-ms-ordering-move.summary
@@ -1,107 +1,107 @@
Current cluster status:
* Node List:
* Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ]
- * GuestOnline: [ lxc1@rhel7-1 lxc2@rhel7-1 ]
+ * GuestOnline: [ lxc1 lxc2 ]
* Full List of Resources:
* Fencing (stonith:fence_xvm): Started rhel7-3
* FencingPass (stonith:fence_dummy): Started rhel7-4
* FencingFail (stonith:fence_dummy): Started rhel7-5
* rsc_rhel7-1 (ocf:heartbeat:IPaddr2): Started rhel7-1
* rsc_rhel7-2 (ocf:heartbeat:IPaddr2): Started rhel7-2
* rsc_rhel7-3 (ocf:heartbeat:IPaddr2): Started rhel7-3
* rsc_rhel7-4 (ocf:heartbeat:IPaddr2): Started rhel7-4
* rsc_rhel7-5 (ocf:heartbeat:IPaddr2): Started rhel7-5
* migrator (ocf:pacemaker:Dummy): Started rhel7-4
* Clone Set: Connectivity [ping-1]:
* Started: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ]
* Stopped: [ lxc1 lxc2 ]
* Clone Set: master-1 [stateful-1] (promotable):
* Promoted: [ rhel7-3 ]
* Unpromoted: [ rhel7-1 rhel7-2 rhel7-4 rhel7-5 ]
* Resource Group: group-1:
* r192.168.122.207 (ocf:heartbeat:IPaddr2): Started rhel7-3
* petulant (service:DummySD): Started rhel7-3
* r192.168.122.208 (ocf:heartbeat:IPaddr2): Started rhel7-3
* lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started rhel7-3
* container1 (ocf:heartbeat:VirtualDomain): Started rhel7-1
* container2 (ocf:heartbeat:VirtualDomain): Started rhel7-1
* Clone Set: lxc-ms-master [lxc-ms] (promotable):
* Promoted: [ lxc1 ]
* Unpromoted: [ lxc2 ]
Transition Summary:
* Move container1 ( rhel7-1 -> rhel7-2 )
* Restart lxc-ms:0 ( Promoted lxc1 ) due to required container1 start
* Move lxc1 ( rhel7-1 -> rhel7-2 )
Executing Cluster Transition:
* Resource action: rsc_rhel7-1 monitor on lxc2
* Resource action: rsc_rhel7-2 monitor on lxc2
* Resource action: rsc_rhel7-3 monitor on lxc2
* Resource action: rsc_rhel7-4 monitor on lxc2
* Resource action: rsc_rhel7-5 monitor on lxc2
* Resource action: migrator monitor on lxc2
* Resource action: ping-1 monitor on lxc2
* Resource action: stateful-1 monitor on lxc2
* Resource action: r192.168.122.207 monitor on lxc2
* Resource action: petulant monitor on lxc2
* Resource action: r192.168.122.208 monitor on lxc2
* Resource action: lsb-dummy monitor on lxc2
* Pseudo action: lxc-ms-master_demote_0
* Resource action: lxc1 monitor on rhel7-5
* Resource action: lxc1 monitor on rhel7-4
* Resource action: lxc1 monitor on rhel7-3
* Resource action: lxc1 monitor on rhel7-2
* Resource action: lxc2 monitor on rhel7-5
* Resource action: lxc2 monitor on rhel7-4
* Resource action: lxc2 monitor on rhel7-3
* Resource action: lxc2 monitor on rhel7-2
* Resource action: lxc-ms demote on lxc1
* Pseudo action: lxc-ms-master_demoted_0
* Pseudo action: lxc-ms-master_stop_0
* Resource action: lxc-ms stop on lxc1
* Pseudo action: lxc-ms-master_stopped_0
* Pseudo action: lxc-ms-master_start_0
* Resource action: lxc1 stop on rhel7-1
* Resource action: container1 stop on rhel7-1
* Resource action: container1 start on rhel7-2
* Resource action: lxc1 start on rhel7-2
* Resource action: lxc-ms start on lxc1
* Pseudo action: lxc-ms-master_running_0
* Resource action: lxc1 monitor=30000 on rhel7-2
* Pseudo action: lxc-ms-master_promote_0
* Resource action: lxc-ms promote on lxc1
* Pseudo action: lxc-ms-master_promoted_0
Revised Cluster Status:
* Node List:
* Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ]
- * GuestOnline: [ lxc1@rhel7-2 lxc2@rhel7-1 ]
+ * GuestOnline: [ lxc1 lxc2 ]
* Full List of Resources:
* Fencing (stonith:fence_xvm): Started rhel7-3
* FencingPass (stonith:fence_dummy): Started rhel7-4
* FencingFail (stonith:fence_dummy): Started rhel7-5
* rsc_rhel7-1 (ocf:heartbeat:IPaddr2): Started rhel7-1
* rsc_rhel7-2 (ocf:heartbeat:IPaddr2): Started rhel7-2
* rsc_rhel7-3 (ocf:heartbeat:IPaddr2): Started rhel7-3
* rsc_rhel7-4 (ocf:heartbeat:IPaddr2): Started rhel7-4
* rsc_rhel7-5 (ocf:heartbeat:IPaddr2): Started rhel7-5
* migrator (ocf:pacemaker:Dummy): Started rhel7-4
* Clone Set: Connectivity [ping-1]:
* Started: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ]
* Stopped: [ lxc1 lxc2 ]
* Clone Set: master-1 [stateful-1] (promotable):
* Promoted: [ rhel7-3 ]
* Unpromoted: [ rhel7-1 rhel7-2 rhel7-4 rhel7-5 ]
* Resource Group: group-1:
* r192.168.122.207 (ocf:heartbeat:IPaddr2): Started rhel7-3
* petulant (service:DummySD): Started rhel7-3
* r192.168.122.208 (ocf:heartbeat:IPaddr2): Started rhel7-3
* lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started rhel7-3
* container1 (ocf:heartbeat:VirtualDomain): Started rhel7-2
* container2 (ocf:heartbeat:VirtualDomain): Started rhel7-1
* Clone Set: lxc-ms-master [lxc-ms] (promotable):
* Promoted: [ lxc1 ]
* Unpromoted: [ lxc2 ]
diff --git a/cts/scheduler/summary/whitebox-ms-ordering.summary b/cts/scheduler/summary/whitebox-ms-ordering.summary
index 066763f31d..06ac35671b 100644
--- a/cts/scheduler/summary/whitebox-ms-ordering.summary
+++ b/cts/scheduler/summary/whitebox-ms-ordering.summary
@@ -1,73 +1,73 @@
Current cluster status:
* Node List:
* Online: [ 18node1 18node2 18node3 ]
* Full List of Resources:
* shooter (stonith:fence_xvm): Started 18node2
* container1 (ocf:heartbeat:VirtualDomain): FAILED
* container2 (ocf:heartbeat:VirtualDomain): FAILED
* Clone Set: lxc-ms-master [lxc-ms] (promotable):
* Stopped: [ 18node1 18node2 18node3 ]
Transition Summary:
* Fence (reboot) lxc2 (resource: container2) 'guest is unclean'
* Fence (reboot) lxc1 (resource: container1) 'guest is unclean'
* Start container1 ( 18node1 )
* Start container2 ( 18node1 )
* Recover lxc-ms:0 ( Promoted lxc1 )
* Recover lxc-ms:1 ( Unpromoted lxc2 )
* Start lxc1 ( 18node1 )
* Start lxc2 ( 18node1 )
Executing Cluster Transition:
* Resource action: container1 monitor on 18node3
* Resource action: container1 monitor on 18node2
* Resource action: container1 monitor on 18node1
* Resource action: container2 monitor on 18node3
* Resource action: container2 monitor on 18node2
* Resource action: container2 monitor on 18node1
* Resource action: lxc-ms monitor on 18node3
* Resource action: lxc-ms monitor on 18node2
* Resource action: lxc-ms monitor on 18node1
* Pseudo action: lxc-ms-master_demote_0
* Resource action: lxc1 monitor on 18node3
* Resource action: lxc1 monitor on 18node2
* Resource action: lxc1 monitor on 18node1
* Resource action: lxc2 monitor on 18node3
* Resource action: lxc2 monitor on 18node2
* Resource action: lxc2 monitor on 18node1
* Pseudo action: stonith-lxc2-reboot on lxc2
* Pseudo action: stonith-lxc1-reboot on lxc1
* Resource action: container1 start on 18node1
* Resource action: container2 start on 18node1
* Pseudo action: lxc-ms_demote_0
* Pseudo action: lxc-ms-master_demoted_0
* Pseudo action: lxc-ms-master_stop_0
* Resource action: lxc1 start on 18node1
* Resource action: lxc2 start on 18node1
* Pseudo action: lxc-ms_stop_0
* Pseudo action: lxc-ms_stop_0
* Pseudo action: lxc-ms-master_stopped_0
* Pseudo action: lxc-ms-master_start_0
* Resource action: lxc1 monitor=30000 on 18node1
* Resource action: lxc2 monitor=30000 on 18node1
* Resource action: lxc-ms start on lxc1
* Resource action: lxc-ms start on lxc2
* Pseudo action: lxc-ms-master_running_0
* Resource action: lxc-ms monitor=10000 on lxc2
* Pseudo action: lxc-ms-master_promote_0
* Resource action: lxc-ms promote on lxc1
* Pseudo action: lxc-ms-master_promoted_0
Revised Cluster Status:
* Node List:
* Online: [ 18node1 18node2 18node3 ]
- * GuestOnline: [ lxc1@18node1 lxc2@18node1 ]
+ * GuestOnline: [ lxc1 lxc2 ]
* Full List of Resources:
* shooter (stonith:fence_xvm): Started 18node2
* container1 (ocf:heartbeat:VirtualDomain): Started 18node1
* container2 (ocf:heartbeat:VirtualDomain): Started 18node1
* Clone Set: lxc-ms-master [lxc-ms] (promotable):
* Promoted: [ lxc1 ]
* Unpromoted: [ lxc2 ]
diff --git a/cts/scheduler/summary/whitebox-nested-group.summary b/cts/scheduler/summary/whitebox-nested-group.summary
index 9a95e2056f..d97c079400 100644
--- a/cts/scheduler/summary/whitebox-nested-group.summary
+++ b/cts/scheduler/summary/whitebox-nested-group.summary
@@ -1,102 +1,102 @@
Current cluster status:
* Node List:
* Online: [ c7auto1 c7auto2 c7auto3 ]
* Full List of Resources:
* shooter (stonith:fence_phd_kvm): Started c7auto2
* fake1 (ocf:heartbeat:Dummy): Stopped
* fake2 (ocf:heartbeat:Dummy): Stopped
* fake3 (ocf:heartbeat:Dummy): Stopped
* fake4 (ocf:heartbeat:Dummy): Stopped
* fake5 (ocf:heartbeat:Dummy): Stopped
* Clone Set: fake_clone [fake]:
* Stopped: [ c7auto1 c7auto2 c7auto3 c7auto4 ]
* Resource Group: fake_group:
* fake_fs (ocf:heartbeat:Dummy): Stopped
* container (ocf:heartbeat:Dummy): Stopped
Transition Summary:
* Start fake1 ( c7auto3 )
* Start fake2 ( c7auto4 )
* Start fake3 ( c7auto2 )
* Start fake4 ( c7auto3 )
* Start fake5 ( c7auto4 )
* Start fake:0 ( c7auto2 )
* Start fake:1 ( c7auto3 )
* Start fake:2 ( c7auto4 )
* Start fake:3 ( c7auto1 )
* Start fake_fs ( c7auto1 )
* Start container ( c7auto1 )
* Start c7auto4 ( c7auto1 )
Executing Cluster Transition:
* Resource action: fake1 monitor on c7auto3
* Resource action: fake1 monitor on c7auto2
* Resource action: fake1 monitor on c7auto1
* Resource action: fake2 monitor on c7auto3
* Resource action: fake2 monitor on c7auto2
* Resource action: fake2 monitor on c7auto1
* Resource action: fake3 monitor on c7auto3
* Resource action: fake3 monitor on c7auto2
* Resource action: fake3 monitor on c7auto1
* Resource action: fake4 monitor on c7auto3
* Resource action: fake4 monitor on c7auto2
* Resource action: fake4 monitor on c7auto1
* Resource action: fake5 monitor on c7auto3
* Resource action: fake5 monitor on c7auto2
* Resource action: fake5 monitor on c7auto1
* Resource action: fake:0 monitor on c7auto2
* Resource action: fake:1 monitor on c7auto3
* Resource action: fake:3 monitor on c7auto1
* Pseudo action: fake_clone_start_0
* Pseudo action: fake_group_start_0
* Resource action: fake_fs monitor on c7auto3
* Resource action: fake_fs monitor on c7auto2
* Resource action: fake_fs monitor on c7auto1
* Resource action: c7auto4 monitor on c7auto3
* Resource action: c7auto4 monitor on c7auto2
* Resource action: c7auto4 monitor on c7auto1
* Resource action: fake1 start on c7auto3
* Resource action: fake3 start on c7auto2
* Resource action: fake4 start on c7auto3
* Resource action: fake:0 start on c7auto2
* Resource action: fake:1 start on c7auto3
* Resource action: fake:3 start on c7auto1
* Resource action: fake_fs start on c7auto1
* Resource action: container start on c7auto1
* Resource action: c7auto4 start on c7auto1
* Resource action: fake1 monitor=10000 on c7auto3
* Resource action: fake2 start on c7auto4
* Resource action: fake3 monitor=10000 on c7auto2
* Resource action: fake4 monitor=10000 on c7auto3
* Resource action: fake5 start on c7auto4
* Resource action: fake:0 monitor=10000 on c7auto2
* Resource action: fake:1 monitor=10000 on c7auto3
* Resource action: fake:2 start on c7auto4
* Resource action: fake:3 monitor=10000 on c7auto1
* Pseudo action: fake_clone_running_0
* Pseudo action: fake_group_running_0
* Resource action: fake_fs monitor=10000 on c7auto1
* Resource action: container monitor=10000 on c7auto1
* Resource action: c7auto4 monitor=30000 on c7auto1
* Resource action: fake2 monitor=10000 on c7auto4
* Resource action: fake5 monitor=10000 on c7auto4
* Resource action: fake:2 monitor=10000 on c7auto4
Revised Cluster Status:
* Node List:
* Online: [ c7auto1 c7auto2 c7auto3 ]
- * GuestOnline: [ c7auto4@c7auto1 ]
+ * GuestOnline: [ c7auto4 ]
* Full List of Resources:
* shooter (stonith:fence_phd_kvm): Started c7auto2
* fake1 (ocf:heartbeat:Dummy): Started c7auto3
* fake2 (ocf:heartbeat:Dummy): Started c7auto4
* fake3 (ocf:heartbeat:Dummy): Started c7auto2
* fake4 (ocf:heartbeat:Dummy): Started c7auto3
* fake5 (ocf:heartbeat:Dummy): Started c7auto4
* Clone Set: fake_clone [fake]:
* Started: [ c7auto1 c7auto2 c7auto3 c7auto4 ]
* Resource Group: fake_group:
* fake_fs (ocf:heartbeat:Dummy): Started c7auto1
* container (ocf:heartbeat:Dummy): Started c7auto1
diff --git a/cts/scheduler/summary/whitebox-orphan-ms.summary b/cts/scheduler/summary/whitebox-orphan-ms.summary
index 0d0007dcc6..e7df2d81bf 100644
--- a/cts/scheduler/summary/whitebox-orphan-ms.summary
+++ b/cts/scheduler/summary/whitebox-orphan-ms.summary
@@ -1,87 +1,87 @@
Current cluster status:
* Node List:
* Online: [ 18node1 18node2 18node3 ]
- * GuestOnline: [ lxc1@18node1 lxc2@18node1 ]
+ * GuestOnline: [ lxc1 lxc2 ]
* Full List of Resources:
* Fencing (stonith:fence_xvm): Started 18node2
* FencingPass (stonith:fence_dummy): Started 18node3
* FencingFail (stonith:fence_dummy): Started 18node3
* rsc_18node1 (ocf:heartbeat:IPaddr2): Started 18node1
* rsc_18node2 (ocf:heartbeat:IPaddr2): Started 18node2
* rsc_18node3 (ocf:heartbeat:IPaddr2): Started 18node3
* migrator (ocf:pacemaker:Dummy): Started 18node1
* Clone Set: Connectivity [ping-1]:
* Started: [ 18node1 18node2 18node3 ]
* Clone Set: master-1 [stateful-1] (promotable):
* Promoted: [ 18node1 ]
* Unpromoted: [ 18node2 18node3 ]
* Resource Group: group-1:
* r192.168.122.87 (ocf:heartbeat:IPaddr2): Started 18node1
* r192.168.122.88 (ocf:heartbeat:IPaddr2): Started 18node1
* r192.168.122.89 (ocf:heartbeat:IPaddr2): Started 18node1
* lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started 18node1
* container2 (ocf:heartbeat:VirtualDomain): ORPHANED Started 18node1
* lxc1 (ocf:pacemaker:remote): ORPHANED Started 18node1
* lxc-ms (ocf:pacemaker:Stateful): ORPHANED Promoted [ lxc1 lxc2 ]
* lxc2 (ocf:pacemaker:remote): ORPHANED Started 18node1
* container1 (ocf:heartbeat:VirtualDomain): ORPHANED Started 18node1
Transition Summary:
* Move FencingFail ( 18node3 -> 18node1 )
* Stop container2 ( 18node1 ) due to node availability
* Stop lxc1 ( 18node1 ) due to node availability
* Stop lxc-ms ( Promoted lxc1 ) due to node availability
* Stop lxc-ms ( Promoted lxc2 ) due to node availability
* Stop lxc2 ( 18node1 ) due to node availability
* Stop container1 ( 18node1 ) due to node availability
Executing Cluster Transition:
* Resource action: FencingFail stop on 18node3
* Resource action: lxc-ms demote on lxc2
* Resource action: lxc-ms demote on lxc1
* Resource action: FencingFail start on 18node1
* Resource action: lxc-ms stop on lxc2
* Resource action: lxc-ms stop on lxc1
* Resource action: lxc-ms delete on 18node3
* Resource action: lxc-ms delete on 18node2
* Resource action: lxc-ms delete on 18node1
* Resource action: lxc2 stop on 18node1
* Resource action: lxc2 delete on 18node3
* Resource action: lxc2 delete on 18node2
* Resource action: lxc2 delete on 18node1
* Resource action: container2 stop on 18node1
* Resource action: container2 delete on 18node3
* Resource action: container2 delete on 18node2
* Resource action: container2 delete on 18node1
* Resource action: lxc1 stop on 18node1
* Resource action: lxc1 delete on 18node3
* Resource action: lxc1 delete on 18node2
* Resource action: lxc1 delete on 18node1
* Resource action: container1 stop on 18node1
* Resource action: container1 delete on 18node3
* Resource action: container1 delete on 18node2
* Resource action: container1 delete on 18node1
Revised Cluster Status:
* Node List:
* Online: [ 18node1 18node2 18node3 ]
* Full List of Resources:
* Fencing (stonith:fence_xvm): Started 18node2
* FencingPass (stonith:fence_dummy): Started 18node3
* FencingFail (stonith:fence_dummy): Started 18node1
* rsc_18node1 (ocf:heartbeat:IPaddr2): Started 18node1
* rsc_18node2 (ocf:heartbeat:IPaddr2): Started 18node2
* rsc_18node3 (ocf:heartbeat:IPaddr2): Started 18node3
* migrator (ocf:pacemaker:Dummy): Started 18node1
* Clone Set: Connectivity [ping-1]:
* Started: [ 18node1 18node2 18node3 ]
* Clone Set: master-1 [stateful-1] (promotable):
* Promoted: [ 18node1 ]
* Unpromoted: [ 18node2 18node3 ]
* Resource Group: group-1:
* r192.168.122.87 (ocf:heartbeat:IPaddr2): Started 18node1
* r192.168.122.88 (ocf:heartbeat:IPaddr2): Started 18node1
* r192.168.122.89 (ocf:heartbeat:IPaddr2): Started 18node1
* lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started 18node1
diff --git a/cts/scheduler/summary/whitebox-orphaned.summary b/cts/scheduler/summary/whitebox-orphaned.summary
index 08462cdf74..8d5efb48ee 100644
--- a/cts/scheduler/summary/whitebox-orphaned.summary
+++ b/cts/scheduler/summary/whitebox-orphaned.summary
@@ -1,59 +1,59 @@
Current cluster status:
* Node List:
* Online: [ 18node1 18node2 18node3 ]
- * GuestOnline: [ lxc1@18node2 lxc2@18node2 ]
+ * GuestOnline: [ lxc1 lxc2 ]
* Full List of Resources:
* container2 (ocf:heartbeat:VirtualDomain): Started 18node2
* shoot1 (stonith:fence_xvm): Started 18node3
* Clone Set: M-clone [M]:
* M (ocf:pacemaker:Dummy): ORPHANED Started lxc1
* Started: [ 18node1 18node2 18node3 lxc2 ]
* A (ocf:pacemaker:Dummy): Started 18node1
* B (ocf:pacemaker:Dummy): Started lxc1
* C (ocf:pacemaker:Dummy): Started lxc2
* D (ocf:pacemaker:Dummy): Started 18node1
* container1 (ocf:heartbeat:VirtualDomain): ORPHANED Started 18node2
* lxc1 (ocf:pacemaker:remote): ORPHANED Started 18node2
Transition Summary:
* Stop M:4 ( lxc1 ) due to node availability
* Move B ( lxc1 -> lxc2 )
* Stop container1 ( 18node2 ) due to node availability
* Stop lxc1 ( 18node2 ) due to node availability
Executing Cluster Transition:
* Pseudo action: M-clone_stop_0
* Resource action: A monitor on lxc2
* Resource action: B stop on lxc1
* Resource action: B monitor on lxc2
* Resource action: D monitor on lxc2
* Cluster action: clear_failcount for container1 on 18node2
* Cluster action: clear_failcount for lxc1 on 18node2
* Resource action: M stop on lxc1
* Pseudo action: M-clone_stopped_0
* Resource action: B start on lxc2
* Resource action: lxc1 stop on 18node2
* Resource action: lxc1 delete on 18node3
* Resource action: lxc1 delete on 18node2
* Resource action: lxc1 delete on 18node1
* Resource action: B monitor=10000 on lxc2
* Resource action: container1 stop on 18node2
* Resource action: container1 delete on 18node3
* Resource action: container1 delete on 18node2
* Resource action: container1 delete on 18node1
Revised Cluster Status:
* Node List:
* Online: [ 18node1 18node2 18node3 ]
- * GuestOnline: [ lxc2@18node2 ]
+ * GuestOnline: [ lxc2 ]
* Full List of Resources:
* container2 (ocf:heartbeat:VirtualDomain): Started 18node2
* shoot1 (stonith:fence_xvm): Started 18node3
* Clone Set: M-clone [M]:
* Started: [ 18node1 18node2 18node3 lxc2 ]
* A (ocf:pacemaker:Dummy): Started 18node1
* B (ocf:pacemaker:Dummy): Started lxc2
* C (ocf:pacemaker:Dummy): Started lxc2
* D (ocf:pacemaker:Dummy): Started 18node1
diff --git a/cts/scheduler/summary/whitebox-start.summary b/cts/scheduler/summary/whitebox-start.summary
index 8e9b4c3cc3..e17cde17a9 100644
--- a/cts/scheduler/summary/whitebox-start.summary
+++ b/cts/scheduler/summary/whitebox-start.summary
@@ -1,56 +1,56 @@
Current cluster status:
* Node List:
* Online: [ 18node1 18node2 18node3 ]
- * GuestOnline: [ lxc2@18node2 ]
+ * GuestOnline: [ lxc2 ]
* Full List of Resources:
* container1 (ocf:heartbeat:VirtualDomain): Stopped
* container2 (ocf:heartbeat:VirtualDomain): Started 18node2
* shoot1 (stonith:fence_xvm): Started 18node3
* Clone Set: M-clone [M]:
* Started: [ 18node1 18node2 18node3 lxc2 ]
* Stopped: [ lxc1 ]
* A (ocf:pacemaker:Dummy): Started 18node1
* B (ocf:pacemaker:Dummy): Started lxc2
* C (ocf:pacemaker:Dummy): Started lxc2
* D (ocf:pacemaker:Dummy): Started 18node1
Transition Summary:
* Start container1 ( 18node1 )
* Start M:4 ( lxc1 )
* Move A ( 18node1 -> lxc1 )
* Move B ( lxc2 -> 18node3 )
* Start lxc1 ( 18node1 )
Executing Cluster Transition:
* Resource action: container1 start on 18node1
* Pseudo action: M-clone_start_0
* Resource action: A monitor on lxc2
* Resource action: B stop on lxc2
* Resource action: D monitor on lxc2
* Resource action: lxc1 start on 18node1
* Resource action: M start on lxc1
* Pseudo action: M-clone_running_0
* Resource action: A stop on 18node1
* Resource action: B start on 18node3
* Resource action: lxc1 monitor=30000 on 18node1
* Resource action: M monitor=10000 on lxc1
* Resource action: A start on lxc1
* Resource action: B monitor=10000 on 18node3
* Resource action: A monitor=10000 on lxc1
Revised Cluster Status:
* Node List:
* Online: [ 18node1 18node2 18node3 ]
- * GuestOnline: [ lxc1@18node1 lxc2@18node2 ]
+ * GuestOnline: [ lxc1 lxc2 ]
* Full List of Resources:
* container1 (ocf:heartbeat:VirtualDomain): Started 18node1
* container2 (ocf:heartbeat:VirtualDomain): Started 18node2
* shoot1 (stonith:fence_xvm): Started 18node3
* Clone Set: M-clone [M]:
* Started: [ 18node1 18node2 18node3 lxc1 lxc2 ]
* A (ocf:pacemaker:Dummy): Started lxc1
* B (ocf:pacemaker:Dummy): Started 18node3
* C (ocf:pacemaker:Dummy): Started lxc2
* D (ocf:pacemaker:Dummy): Started 18node1
diff --git a/cts/scheduler/summary/whitebox-stop.summary b/cts/scheduler/summary/whitebox-stop.summary
index 717e042aa3..a7a5e0fceb 100644
--- a/cts/scheduler/summary/whitebox-stop.summary
+++ b/cts/scheduler/summary/whitebox-stop.summary
@@ -1,53 +1,53 @@
1 of 14 resource instances DISABLED and 0 BLOCKED from further action due to failure
Current cluster status:
* Node List:
* Online: [ 18node1 18node2 18node3 ]
- * GuestOnline: [ lxc1@18node2 lxc2@18node2 ]
+ * GuestOnline: [ lxc1 lxc2 ]
* Full List of Resources:
* container1 (ocf:heartbeat:VirtualDomain): Started 18node2 (disabled)
* container2 (ocf:heartbeat:VirtualDomain): Started 18node2
* shoot1 (stonith:fence_xvm): Started 18node3
* Clone Set: M-clone [M]:
* Started: [ 18node1 18node2 18node3 lxc1 lxc2 ]
* A (ocf:pacemaker:Dummy): Started 18node1
* B (ocf:pacemaker:Dummy): Started lxc1
* C (ocf:pacemaker:Dummy): Started lxc2
* D (ocf:pacemaker:Dummy): Started 18node1
Transition Summary:
* Stop container1 ( 18node2 ) due to node availability
* Stop M:4 ( lxc1 ) due to node availability
* Move B ( lxc1 -> lxc2 )
* Stop lxc1 ( 18node2 ) due to node availability
Executing Cluster Transition:
* Pseudo action: M-clone_stop_0
* Resource action: A monitor on lxc2
* Resource action: B stop on lxc1
* Resource action: B monitor on lxc2
* Resource action: D monitor on lxc2
* Resource action: M stop on lxc1
* Pseudo action: M-clone_stopped_0
* Resource action: B start on lxc2
* Resource action: lxc1 stop on 18node2
* Resource action: container1 stop on 18node2
* Resource action: B monitor=10000 on lxc2
Revised Cluster Status:
* Node List:
* Online: [ 18node1 18node2 18node3 ]
- * GuestOnline: [ lxc2@18node2 ]
+ * GuestOnline: [ lxc2 ]
* Full List of Resources:
* container1 (ocf:heartbeat:VirtualDomain): Stopped (disabled)
* container2 (ocf:heartbeat:VirtualDomain): Started 18node2
* shoot1 (stonith:fence_xvm): Started 18node3
* Clone Set: M-clone [M]:
* Started: [ 18node1 18node2 18node3 lxc2 ]
* Stopped: [ lxc1 ]
* A (ocf:pacemaker:Dummy): Started 18node1
* B (ocf:pacemaker:Dummy): Started lxc2
* C (ocf:pacemaker:Dummy): Started lxc2
* D (ocf:pacemaker:Dummy): Started 18node1
diff --git a/cts/scheduler/summary/whitebox-unexpectedly-running.summary b/cts/scheduler/summary/whitebox-unexpectedly-running.summary
index 5e4dbb224e..597349719d 100644
--- a/cts/scheduler/summary/whitebox-unexpectedly-running.summary
+++ b/cts/scheduler/summary/whitebox-unexpectedly-running.summary
@@ -1,35 +1,35 @@
Current cluster status:
* Node List:
* Online: [ 18builder ]
* Full List of Resources:
* FAKE (ocf:pacemaker:Dummy): Started 18builder
* FAKE-crashed (ocf:pacemaker:Dummy): FAILED 18builder
Transition Summary:
* Fence (reboot) remote2 (resource: FAKE-crashed) 'guest is unclean'
* Recover FAKE-crashed ( 18builder )
* Start remote1 ( 18builder )
* Start remote2 ( 18builder )
Executing Cluster Transition:
* Resource action: FAKE monitor=60000 on 18builder
* Resource action: FAKE-crashed stop on 18builder
* Resource action: remote1 monitor on 18builder
* Resource action: remote2 monitor on 18builder
* Pseudo action: stonith-remote2-reboot on remote2
* Resource action: FAKE-crashed start on 18builder
* Resource action: remote1 start on 18builder
* Resource action: remote2 start on 18builder
* Resource action: FAKE-crashed monitor=60000 on 18builder
* Resource action: remote1 monitor=30000 on 18builder
* Resource action: remote2 monitor=30000 on 18builder
Revised Cluster Status:
* Node List:
* Online: [ 18builder ]
- * GuestOnline: [ remote1@18builder remote2@18builder ]
+ * GuestOnline: [ remote1 remote2 ]
* Full List of Resources:
* FAKE (ocf:pacemaker:Dummy): Started 18builder
* FAKE-crashed (ocf:pacemaker:Dummy): Started 18builder
diff --git a/cts/scheduler/summary/year-2038.summary b/cts/scheduler/summary/year-2038.summary
index dc276ada27..edaed2246d 100644
--- a/cts/scheduler/summary/year-2038.summary
+++ b/cts/scheduler/summary/year-2038.summary
@@ -1,112 +1,112 @@
Using the original execution date of: 2038-02-17 06:13:20Z
Current cluster status:
* Node List:
* RemoteNode overcloud-novacompute-1: UNCLEAN (offline)
* Online: [ controller-0 controller-1 controller-2 ]
* RemoteOnline: [ overcloud-novacompute-0 ]
- * GuestOnline: [ galera-bundle-0@controller-2 galera-bundle-1@controller-0 galera-bundle-2@controller-1 rabbitmq-bundle-0@controller-2 rabbitmq-bundle-1@controller-0 rabbitmq-bundle-2@controller-1 redis-bundle-0@controller-2 redis-bundle-1@controller-0 redis-bundle-2@controller-1 ]
+ * GuestOnline: [ galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2 ]
* Full List of Resources:
* overcloud-novacompute-0 (ocf:pacemaker:remote): Started controller-0
* overcloud-novacompute-1 (ocf:pacemaker:remote): FAILED controller-1
* Container bundle set: rabbitmq-bundle [192.168.24.1:8787/rhosp13/openstack-rabbitmq:pcmklatest]:
* rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Started controller-2
* rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): Started controller-0
* rabbitmq-bundle-2 (ocf:heartbeat:rabbitmq-cluster): Started controller-1
* Container bundle set: galera-bundle [192.168.24.1:8787/rhosp13/openstack-mariadb:pcmklatest]:
* galera-bundle-0 (ocf:heartbeat:galera): Promoted controller-2
* galera-bundle-1 (ocf:heartbeat:galera): Promoted controller-0
* galera-bundle-2 (ocf:heartbeat:galera): Promoted controller-1
* Container bundle set: redis-bundle [192.168.24.1:8787/rhosp13/openstack-redis:pcmklatest]:
* redis-bundle-0 (ocf:heartbeat:redis): Promoted controller-2
* redis-bundle-1 (ocf:heartbeat:redis): Unpromoted controller-0
* redis-bundle-2 (ocf:heartbeat:redis): Unpromoted controller-1
* ip-192.168.24.11 (ocf:heartbeat:IPaddr2): Started controller-2
* ip-10.0.0.110 (ocf:heartbeat:IPaddr2): Stopped
* ip-172.17.1.14 (ocf:heartbeat:IPaddr2): Started controller-1
* ip-172.17.1.17 (ocf:heartbeat:IPaddr2): Started controller-2
* ip-172.17.3.11 (ocf:heartbeat:IPaddr2): Started controller-0
* ip-172.17.4.17 (ocf:heartbeat:IPaddr2): Started controller-1
* Container bundle set: haproxy-bundle [192.168.24.1:8787/rhosp13/openstack-haproxy:pcmklatest]:
* haproxy-bundle-docker-0 (ocf:heartbeat:docker): Started controller-2
* haproxy-bundle-docker-1 (ocf:heartbeat:docker): Started controller-0
* haproxy-bundle-docker-2 (ocf:heartbeat:docker): Started controller-1
* stonith-fence_compute-fence-nova (stonith:fence_compute): FAILED controller-2
* Clone Set: compute-unfence-trigger-clone [compute-unfence-trigger]:
* compute-unfence-trigger (ocf:pacemaker:Dummy): Started overcloud-novacompute-1 (UNCLEAN)
* Started: [ overcloud-novacompute-0 ]
* Stopped: [ controller-0 controller-1 controller-2 ]
* nova-evacuate (ocf:openstack:NovaEvacuate): Started controller-0
* stonith-fence_ipmilan-5254008be2cc (stonith:fence_ipmilan): Started controller-1
* stonith-fence_ipmilan-525400803f9e (stonith:fence_ipmilan): Started controller-0
* stonith-fence_ipmilan-525400fca120 (stonith:fence_ipmilan): Started controller-2
* stonith-fence_ipmilan-525400953d48 (stonith:fence_ipmilan): Started controller-2
* stonith-fence_ipmilan-525400b02b86 (stonith:fence_ipmilan): Started controller-1
* Container bundle: openstack-cinder-volume [192.168.24.1:8787/rhosp13/openstack-cinder-volume:pcmklatest]:
* openstack-cinder-volume-docker-0 (ocf:heartbeat:docker): Started controller-0
Transition Summary:
* Fence (reboot) overcloud-novacompute-1 'remote connection is unrecoverable'
* Stop overcloud-novacompute-1 ( controller-1 ) due to node availability
* Start ip-10.0.0.110 ( controller-1 )
* Recover stonith-fence_compute-fence-nova ( controller-2 )
* Stop compute-unfence-trigger:1 ( overcloud-novacompute-1 ) due to node availability
Executing Cluster Transition:
* Resource action: overcloud-novacompute-1 stop on controller-1
* Resource action: stonith-fence_compute-fence-nova stop on controller-2
* Fencing overcloud-novacompute-1 (reboot)
* Cluster action: clear_failcount for overcloud-novacompute-1 on controller-1
* Resource action: ip-10.0.0.110 start on controller-1
* Resource action: stonith-fence_compute-fence-nova start on controller-2
* Resource action: stonith-fence_compute-fence-nova monitor=60000 on controller-2
* Pseudo action: compute-unfence-trigger-clone_stop_0
* Resource action: ip-10.0.0.110 monitor=10000 on controller-1
* Pseudo action: compute-unfence-trigger_stop_0
* Pseudo action: compute-unfence-trigger-clone_stopped_0
Using the original execution date of: 2038-02-17 06:13:20Z
Revised Cluster Status:
* Node List:
* RemoteNode overcloud-novacompute-1: UNCLEAN (offline)
* Online: [ controller-0 controller-1 controller-2 ]
* RemoteOnline: [ overcloud-novacompute-0 ]
- * GuestOnline: [ galera-bundle-0@controller-2 galera-bundle-1@controller-0 galera-bundle-2@controller-1 rabbitmq-bundle-0@controller-2 rabbitmq-bundle-1@controller-0 rabbitmq-bundle-2@controller-1 redis-bundle-0@controller-2 redis-bundle-1@controller-0 redis-bundle-2@controller-1 ]
+ * GuestOnline: [ galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2 ]
* Full List of Resources:
* overcloud-novacompute-0 (ocf:pacemaker:remote): Started controller-0
* overcloud-novacompute-1 (ocf:pacemaker:remote): FAILED
* Container bundle set: rabbitmq-bundle [192.168.24.1:8787/rhosp13/openstack-rabbitmq:pcmklatest]:
* rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Started controller-2
* rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): Started controller-0
* rabbitmq-bundle-2 (ocf:heartbeat:rabbitmq-cluster): Started controller-1
* Container bundle set: galera-bundle [192.168.24.1:8787/rhosp13/openstack-mariadb:pcmklatest]:
* galera-bundle-0 (ocf:heartbeat:galera): Promoted controller-2
* galera-bundle-1 (ocf:heartbeat:galera): Promoted controller-0
* galera-bundle-2 (ocf:heartbeat:galera): Promoted controller-1
* Container bundle set: redis-bundle [192.168.24.1:8787/rhosp13/openstack-redis:pcmklatest]:
* redis-bundle-0 (ocf:heartbeat:redis): Promoted controller-2
* redis-bundle-1 (ocf:heartbeat:redis): Unpromoted controller-0
* redis-bundle-2 (ocf:heartbeat:redis): Unpromoted controller-1
* ip-192.168.24.11 (ocf:heartbeat:IPaddr2): Started controller-2
* ip-10.0.0.110 (ocf:heartbeat:IPaddr2): Started controller-1
* ip-172.17.1.14 (ocf:heartbeat:IPaddr2): Started controller-1
* ip-172.17.1.17 (ocf:heartbeat:IPaddr2): Started controller-2
* ip-172.17.3.11 (ocf:heartbeat:IPaddr2): Started controller-0
* ip-172.17.4.17 (ocf:heartbeat:IPaddr2): Started controller-1
* Container bundle set: haproxy-bundle [192.168.24.1:8787/rhosp13/openstack-haproxy:pcmklatest]:
* haproxy-bundle-docker-0 (ocf:heartbeat:docker): Started controller-2
* haproxy-bundle-docker-1 (ocf:heartbeat:docker): Started controller-0
* haproxy-bundle-docker-2 (ocf:heartbeat:docker): Started controller-1
* stonith-fence_compute-fence-nova (stonith:fence_compute): Started controller-2
* Clone Set: compute-unfence-trigger-clone [compute-unfence-trigger]:
* Started: [ overcloud-novacompute-0 ]
* Stopped: [ controller-0 controller-1 controller-2 overcloud-novacompute-1 ]
* nova-evacuate (ocf:openstack:NovaEvacuate): Started controller-0
* stonith-fence_ipmilan-5254008be2cc (stonith:fence_ipmilan): Started controller-1
* stonith-fence_ipmilan-525400803f9e (stonith:fence_ipmilan): Started controller-0
* stonith-fence_ipmilan-525400fca120 (stonith:fence_ipmilan): Started controller-2
* stonith-fence_ipmilan-525400953d48 (stonith:fence_ipmilan): Started controller-2
* stonith-fence_ipmilan-525400b02b86 (stonith:fence_ipmilan): Started controller-1
* Container bundle: openstack-cinder-volume [192.168.24.1:8787/rhosp13/openstack-cinder-volume:pcmklatest]:
* openstack-cinder-volume-docker-0 (ocf:heartbeat:docker): Started controller-0
diff --git a/lib/pengine/pe_output.c b/lib/pengine/pe_output.c
index 59fc181142..df6fba6b00 100644
--- a/lib/pengine/pe_output.c
+++ b/lib/pengine/pe_output.c
@@ -1,3011 +1,3012 @@
/*
* Copyright 2019-2022 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
/* Never display node attributes whose name starts with one of these prefixes */
#define FILTER_STR { PCMK__FAIL_COUNT_PREFIX, PCMK__LAST_FAILURE_PREFIX, \
"shutdown", "terminate", "standby", "#", NULL }
static int
compare_attribute(gconstpointer a, gconstpointer b)
{
int rc;
rc = strcmp((const char *)a, (const char *)b);
return rc;
}
/*!
* \internal
* \brief Determine whether extended information about an attribute should be added.
*
* \param[in] node Node that ran this resource.
* \param[in] rsc_list The list of resources for this node.
* \param[in] attrname The attribute to find.
* \param[out] expected_score The expected value for this attribute.
*
* \return true if extended information should be printed, false otherwise
* \note Currently, extended information is only supported for ping/pingd
* resources, for which a message will be printed if connectivity is lost
* or degraded.
*/
static bool
add_extra_info(pe_node_t *node, GList *rsc_list, pe_working_set_t *data_set,
const char *attrname, int *expected_score)
{
GList *gIter = NULL;
for (gIter = rsc_list; gIter != NULL; gIter = gIter->next) {
pe_resource_t *rsc = (pe_resource_t *) gIter->data;
const char *type = g_hash_table_lookup(rsc->meta, "type");
const char *name = NULL;
GHashTable *params = NULL;
if (rsc->children != NULL) {
if (add_extra_info(node, rsc->children, data_set, attrname,
expected_score)) {
return true;
}
}
if (!pcmk__strcase_any_of(type, "ping", "pingd", NULL)) {
continue;
}
params = pe_rsc_params(rsc, node, data_set);
name = g_hash_table_lookup(params, "name");
if (name == NULL) {
name = "pingd";
}
/* To identify the resource with the attribute name. */
if (pcmk__str_eq(name, attrname, pcmk__str_casei)) {
int host_list_num = 0;
const char *hosts = g_hash_table_lookup(params, "host_list");
const char *multiplier = g_hash_table_lookup(params, "multiplier");
int multiplier_i;
if (hosts) {
char **host_list = g_strsplit(hosts, " ", 0);
host_list_num = g_strv_length(host_list);
g_strfreev(host_list);
}
if ((multiplier == NULL)
|| (pcmk__scan_min_int(multiplier, &multiplier_i,
INT_MIN) != pcmk_rc_ok)) {
/* The ocf:pacemaker:ping resource agent defaults multiplier to
* 1. The agent currently does not handle invalid text, but it
* should, and this would be a reasonable choice ...
*/
multiplier_i = 1;
}
*expected_score = host_list_num * multiplier_i;
return true;
}
}
return false;
}
static GList *
filter_attr_list(GList *attr_list, char *name)
{
int i;
const char *filt_str[] = FILTER_STR;
CRM_CHECK(name != NULL, return attr_list);
/* filtering automatic attributes */
for (i = 0; filt_str[i] != NULL; i++) {
if (g_str_has_prefix(name, filt_str[i])) {
return attr_list;
}
}
return g_list_insert_sorted(attr_list, name, compare_attribute);
}
static GList *
get_operation_list(xmlNode *rsc_entry) {
GList *op_list = NULL;
xmlNode *rsc_op = NULL;
for (rsc_op = pcmk__xe_first_child(rsc_entry); rsc_op != NULL;
rsc_op = pcmk__xe_next(rsc_op)) {
const char *task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK);
const char *interval_ms_s = crm_element_value(rsc_op,
XML_LRM_ATTR_INTERVAL_MS);
const char *op_rc = crm_element_value(rsc_op, XML_LRM_ATTR_RC);
int op_rc_i;
pcmk__scan_min_int(op_rc, &op_rc_i, 0);
/* Display 0-interval monitors as "probe" */
if (pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_casei)
&& pcmk__str_eq(interval_ms_s, "0", pcmk__str_null_matches | pcmk__str_casei)) {
task = "probe";
}
/* Ignore notifies and some probes */
if (pcmk__str_eq(task, CRMD_ACTION_NOTIFY, pcmk__str_casei) || (pcmk__str_eq(task, "probe", pcmk__str_casei) && (op_rc_i == 7))) {
continue;
}
if (pcmk__str_eq((const char *)rsc_op->name, XML_LRM_TAG_RSC_OP, pcmk__str_none)) {
op_list = g_list_append(op_list, rsc_op);
}
}
op_list = g_list_sort(op_list, sort_op_by_callid);
return op_list;
}
static void
add_dump_node(gpointer key, gpointer value, gpointer user_data)
{
xmlNodePtr node = user_data;
pcmk_create_xml_text_node(node, (const char *) key, (const char *) value);
}
static void
append_dump_text(gpointer key, gpointer value, gpointer user_data)
{
char **dump_text = user_data;
char *new_text = crm_strdup_printf("%s %s=%s",
*dump_text, (char *)key, (char *)value);
free(*dump_text);
*dump_text = new_text;
}
static const char *
get_cluster_stack(pe_working_set_t *data_set)
{
xmlNode *stack = get_xpath_object("//nvpair[@name='cluster-infrastructure']",
data_set->input, LOG_DEBUG);
return stack? crm_element_value(stack, XML_NVPAIR_ATTR_VALUE) : "unknown";
}
static char *
last_changed_string(const char *last_written, const char *user,
const char *client, const char *origin) {
if (last_written != NULL || user != NULL || client != NULL || origin != NULL) {
return crm_strdup_printf("%s%s%s%s%s%s%s",
last_written ? last_written : "",
user ? " by " : "",
user ? user : "",
client ? " via " : "",
client ? client : "",
origin ? " on " : "",
origin ? origin : "");
} else {
return strdup("");
}
}
static char *
op_history_string(xmlNode *xml_op, const char *task, const char *interval_ms_s,
int rc, bool print_timing) {
const char *call = crm_element_value(xml_op, XML_LRM_ATTR_CALLID);
char *interval_str = NULL;
char *buf = NULL;
if (interval_ms_s && !pcmk__str_eq(interval_ms_s, "0", pcmk__str_casei)) {
char *pair = pcmk__format_nvpair("interval", interval_ms_s, "ms");
interval_str = crm_strdup_printf(" %s", pair);
free(pair);
}
if (print_timing) {
char *last_change_str = NULL;
char *exec_str = NULL;
char *queue_str = NULL;
const char *value = NULL;
time_t epoch = 0;
if ((crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE, &epoch) == pcmk_ok)
&& (epoch > 0)) {
char *time = pcmk__format_named_time(XML_RSC_OP_LAST_CHANGE, epoch);
last_change_str = crm_strdup_printf(" %s", time);
free(time);
}
value = crm_element_value(xml_op, XML_RSC_OP_T_EXEC);
if (value) {
char *pair = pcmk__format_nvpair(XML_RSC_OP_T_EXEC, value, "ms");
exec_str = crm_strdup_printf(" %s", pair);
free(pair);
}
value = crm_element_value(xml_op, XML_RSC_OP_T_QUEUE);
if (value) {
char *pair = pcmk__format_nvpair(XML_RSC_OP_T_QUEUE, value, "ms");
queue_str = crm_strdup_printf(" %s", pair);
free(pair);
}
buf = crm_strdup_printf("(%s) %s:%s%s%s%s rc=%d (%s)", call, task,
interval_str ? interval_str : "",
last_change_str ? last_change_str : "",
exec_str ? exec_str : "",
queue_str ? queue_str : "",
rc, services_ocf_exitcode_str(rc));
if (last_change_str) {
free(last_change_str);
}
if (exec_str) {
free(exec_str);
}
if (queue_str) {
free(queue_str);
}
} else {
buf = crm_strdup_printf("(%s) %s%s%s", call, task,
interval_str ? ":" : "",
interval_str ? interval_str : "");
}
if (interval_str) {
free(interval_str);
}
return buf;
}
static char *
resource_history_string(pe_resource_t *rsc, const char *rsc_id, bool all,
int failcount, time_t last_failure) {
char *buf = NULL;
if (rsc == NULL) {
buf = crm_strdup_printf("%s: orphan", rsc_id);
} else if (all || failcount || last_failure > 0) {
char *failcount_s = NULL;
char *lastfail_s = NULL;
if (failcount > 0) {
failcount_s = crm_strdup_printf(" %s=%d", PCMK__FAIL_COUNT_PREFIX,
failcount);
} else {
failcount_s = strdup("");
}
if (last_failure > 0) {
lastfail_s = crm_strdup_printf(" %s='%s'",
PCMK__LAST_FAILURE_PREFIX,
pcmk__epoch2str(&last_failure));
}
buf = crm_strdup_printf("%s: migration-threshold=%d%s%s",
rsc_id, rsc->migration_threshold, failcount_s,
lastfail_s? lastfail_s : "");
free(failcount_s);
free(lastfail_s);
} else {
buf = crm_strdup_printf("%s:", rsc_id);
}
return buf;
}
static const char *
get_node_feature_set(pe_node_t *node) {
const char *feature_set = NULL;
if (node->details->online && !pe__is_guest_or_remote_node(node)) {
feature_set = g_hash_table_lookup(node->details->attrs,
CRM_ATTR_FEATURE_SET);
/* The feature set attribute is present since 3.15.1. If it is missing
* then the node must be running an earlier version. */
if (feature_set == NULL) {
feature_set = "<3.15.1";
}
}
return feature_set;
}
static bool
is_mixed_version(pe_working_set_t *data_set) {
const char *feature_set = NULL;
for (GList *gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
pe_node_t *node = gIter->data;
const char *node_feature_set = get_node_feature_set(node);
if (node_feature_set != NULL) {
if (feature_set == NULL) {
feature_set = node_feature_set;
} else if (strcmp(feature_set, node_feature_set) != 0) {
return true;
}
}
}
return false;
}
static char *
formatted_xml_buf(pe_resource_t *rsc, bool raw)
{
if (raw) {
return dump_xml_formatted(rsc->orig_xml ? rsc->orig_xml : rsc->xml);
} else {
return dump_xml_formatted(rsc->xml);
}
}
PCMK__OUTPUT_ARGS("cluster-summary", "pe_working_set_t *", "uint32_t", "uint32_t")
static int
cluster_summary(pcmk__output_t *out, va_list args) {
pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
uint32_t section_opts = va_arg(args, uint32_t);
uint32_t show_opts = va_arg(args, uint32_t);
int rc = pcmk_rc_no_output;
const char *stack_s = get_cluster_stack(data_set);
if (pcmk_is_set(section_opts, pcmk_section_stack)) {
PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
out->message(out, "cluster-stack", stack_s);
}
if (pcmk_is_set(section_opts, pcmk_section_dc)) {
xmlNode *dc_version = get_xpath_object("//nvpair[@name='dc-version']",
data_set->input, LOG_DEBUG);
const char *dc_version_s = dc_version?
crm_element_value(dc_version, XML_NVPAIR_ATTR_VALUE)
: NULL;
const char *quorum = crm_element_value(data_set->input, XML_ATTR_HAVE_QUORUM);
char *dc_name = data_set->dc_node ? pe__node_display_name(data_set->dc_node, pcmk_is_set(show_opts, pcmk_show_node_id)) : NULL;
bool mixed_version = is_mixed_version(data_set);
PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
out->message(out, "cluster-dc", data_set->dc_node, quorum,
dc_version_s, dc_name, mixed_version);
free(dc_name);
}
if (pcmk_is_set(section_opts, pcmk_section_times)) {
const char *last_written = crm_element_value(data_set->input, XML_CIB_ATTR_WRITTEN);
const char *user = crm_element_value(data_set->input, XML_ATTR_UPDATE_USER);
const char *client = crm_element_value(data_set->input, XML_ATTR_UPDATE_CLIENT);
const char *origin = crm_element_value(data_set->input, XML_ATTR_UPDATE_ORIG);
PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
out->message(out, "cluster-times", last_written, user, client, origin);
}
if (pcmk_is_set(section_opts, pcmk_section_counts)) {
PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
out->message(out, "cluster-counts", g_list_length(data_set->nodes),
data_set->ninstances, data_set->disabled_resources,
data_set->blocked_resources);
}
if (pcmk_is_set(section_opts, pcmk_section_options)) {
PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
out->message(out, "cluster-options", data_set);
}
PCMK__OUTPUT_LIST_FOOTER(out, rc);
if (pcmk_is_set(section_opts, pcmk_section_maint_mode)) {
if (out->message(out, "maint-mode", data_set->flags) == pcmk_rc_ok) {
rc = pcmk_rc_ok;
}
}
return rc;
}
PCMK__OUTPUT_ARGS("cluster-summary", "pe_working_set_t *", "uint32_t", "uint32_t")
static int
cluster_summary_html(pcmk__output_t *out, va_list args) {
pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
uint32_t section_opts = va_arg(args, uint32_t);
uint32_t show_opts = va_arg(args, uint32_t);
int rc = pcmk_rc_no_output;
const char *stack_s = get_cluster_stack(data_set);
if (pcmk_is_set(section_opts, pcmk_section_stack)) {
PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
out->message(out, "cluster-stack", stack_s);
}
/* Always print DC if none, even if not requested */
if (data_set->dc_node == NULL || pcmk_is_set(section_opts, pcmk_section_dc)) {
xmlNode *dc_version = get_xpath_object("//nvpair[@name='dc-version']",
data_set->input, LOG_DEBUG);
const char *dc_version_s = dc_version?
crm_element_value(dc_version, XML_NVPAIR_ATTR_VALUE)
: NULL;
const char *quorum = crm_element_value(data_set->input, XML_ATTR_HAVE_QUORUM);
char *dc_name = data_set->dc_node ? pe__node_display_name(data_set->dc_node, pcmk_is_set(show_opts, pcmk_show_node_id)) : NULL;
bool mixed_version = is_mixed_version(data_set);
PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
out->message(out, "cluster-dc", data_set->dc_node, quorum,
dc_version_s, dc_name, mixed_version);
free(dc_name);
}
if (pcmk_is_set(section_opts, pcmk_section_times)) {
const char *last_written = crm_element_value(data_set->input, XML_CIB_ATTR_WRITTEN);
const char *user = crm_element_value(data_set->input, XML_ATTR_UPDATE_USER);
const char *client = crm_element_value(data_set->input, XML_ATTR_UPDATE_CLIENT);
const char *origin = crm_element_value(data_set->input, XML_ATTR_UPDATE_ORIG);
PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
out->message(out, "cluster-times", last_written, user, client, origin);
}
if (pcmk_is_set(section_opts, pcmk_section_counts)) {
PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
out->message(out, "cluster-counts", g_list_length(data_set->nodes),
data_set->ninstances, data_set->disabled_resources,
data_set->blocked_resources);
}
if (pcmk_is_set(section_opts, pcmk_section_options)) {
/* Kind of a hack - close the list we may have opened earlier in this
* function so we can put all the options into their own list. We
* only want to do this on HTML output, though.
*/
PCMK__OUTPUT_LIST_FOOTER(out, rc);
out->begin_list(out, NULL, NULL, "Config Options");
out->message(out, "cluster-options", data_set);
}
PCMK__OUTPUT_LIST_FOOTER(out, rc);
if (pcmk_is_set(section_opts, pcmk_section_maint_mode)) {
if (out->message(out, "maint-mode", data_set->flags) == pcmk_rc_ok) {
rc = pcmk_rc_ok;
}
}
return rc;
}
char *
pe__node_display_name(pe_node_t *node, bool print_detail)
{
char *node_name;
const char *node_host = NULL;
const char *node_id = NULL;
int name_len;
CRM_ASSERT((node != NULL) && (node->details != NULL) && (node->details->uname != NULL));
- /* Host is displayed only if this is a guest node */
- if (pe__is_guest_node(node)) {
- pe_node_t *host_node = pe__current_node(node->details->remote_rsc);
+ /* Host is displayed only if this is a guest node and detail is requested */
+ if (print_detail && pe__is_guest_node(node)) {
+ const pe_resource_t *container = node->details->remote_rsc->container;
+ const pe_node_t *host_node = pe__current_node(container);
if (host_node && host_node->details) {
node_host = host_node->details->uname;
}
if (node_host == NULL) {
node_host = ""; /* so we at least get "uname@" to indicate guest */
}
}
/* Node ID is displayed if different from uname and detail is requested */
if (print_detail && !pcmk__str_eq(node->details->uname, node->details->id, pcmk__str_casei)) {
node_id = node->details->id;
}
/* Determine name length */
name_len = strlen(node->details->uname) + 1;
if (node_host) {
name_len += strlen(node_host) + 1; /* "@node_host" */
}
if (node_id) {
name_len += strlen(node_id) + 3; /* + " (node_id)" */
}
/* Allocate and populate display name */
node_name = malloc(name_len);
CRM_ASSERT(node_name != NULL);
strcpy(node_name, node->details->uname);
if (node_host) {
strcat(node_name, "@");
strcat(node_name, node_host);
}
if (node_id) {
strcat(node_name, " (");
strcat(node_name, node_id);
strcat(node_name, ")");
}
return node_name;
}
int
pe__name_and_nvpairs_xml(pcmk__output_t *out, bool is_list, const char *tag_name
, size_t pairs_count, ...)
{
xmlNodePtr xml_node = NULL;
va_list args;
CRM_ASSERT(tag_name != NULL);
xml_node = pcmk__output_xml_peek_parent(out);
CRM_ASSERT(xml_node != NULL);
xml_node = is_list
? create_xml_node(xml_node, tag_name)
: xmlNewChild(xml_node, NULL, (pcmkXmlStr) tag_name, NULL);
va_start(args, pairs_count);
while(pairs_count--) {
const char *param_name = va_arg(args, const char *);
const char *param_value = va_arg(args, const char *);
if (param_name && param_value) {
crm_xml_add(xml_node, param_name, param_value);
}
};
va_end(args);
if (is_list) {
pcmk__output_xml_push_parent(out, xml_node);
}
return pcmk_rc_ok;
}
static const char *
role_desc(enum rsc_role_e role)
{
if (role == RSC_ROLE_PROMOTED) {
#ifdef PCMK__COMPAT_2_0
return "as " RSC_ROLE_PROMOTED_LEGACY_S " ";
#else
return "in " RSC_ROLE_PROMOTED_S " role ";
#endif
}
return "";
}
PCMK__OUTPUT_ARGS("ban", "pe_node_t *", "pe__location_t *", "uint32_t")
static int
ban_html(pcmk__output_t *out, va_list args) {
pe_node_t *pe_node = va_arg(args, pe_node_t *);
pe__location_t *location = va_arg(args, pe__location_t *);
uint32_t show_opts = va_arg(args, uint32_t);
char *node_name = pe__node_display_name(pe_node,
pcmk_is_set(show_opts, pcmk_show_node_id));
char *buf = crm_strdup_printf("%s\tprevents %s from running %son %s",
location->id, location->rsc_lh->id,
role_desc(location->role_filter), node_name);
pcmk__output_create_html_node(out, "li", NULL, NULL, buf);
free(node_name);
free(buf);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("ban", "pe_node_t *", "pe__location_t *", "uint32_t")
static int
ban_text(pcmk__output_t *out, va_list args) {
pe_node_t *pe_node = va_arg(args, pe_node_t *);
pe__location_t *location = va_arg(args, pe__location_t *);
uint32_t show_opts = va_arg(args, uint32_t);
char *node_name = pe__node_display_name(pe_node,
pcmk_is_set(show_opts, pcmk_show_node_id));
out->list_item(out, NULL, "%s\tprevents %s from running %son %s",
location->id, location->rsc_lh->id,
role_desc(location->role_filter), node_name);
free(node_name);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("ban", "pe_node_t *", "pe__location_t *", "uint32_t")
static int
ban_xml(pcmk__output_t *out, va_list args) {
pe_node_t *pe_node = va_arg(args, pe_node_t *);
pe__location_t *location = va_arg(args, pe__location_t *);
uint32_t show_opts G_GNUC_UNUSED = va_arg(args, uint32_t);
const char *promoted_only = pcmk__btoa(location->role_filter == RSC_ROLE_PROMOTED);
char *weight_s = pcmk__itoa(pe_node->weight);
pcmk__output_create_xml_node(out, "ban",
"id", location->id,
"resource", location->rsc_lh->id,
"node", pe_node->details->uname,
"weight", weight_s,
"promoted-only", promoted_only,
/* This is a deprecated alias for
* promoted_only. Removing it will break
* backward compatibility of the API schema,
* which will require an API schema major
* version bump.
*/
"master_only", promoted_only,
NULL);
free(weight_s);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("ban-list", "pe_working_set_t *", "const char *", "GList *",
"uint32_t", "bool")
static int
ban_list(pcmk__output_t *out, va_list args) {
pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
const char *prefix = va_arg(args, const char *);
GList *only_rsc = va_arg(args, GList *);
uint32_t show_opts = va_arg(args, uint32_t);
bool print_spacer = va_arg(args, int);
GList *gIter, *gIter2;
int rc = pcmk_rc_no_output;
/* Print each ban */
for (gIter = data_set->placement_constraints; gIter != NULL; gIter = gIter->next) {
pe__location_t *location = gIter->data;
if (prefix != NULL && !g_str_has_prefix(location->id, prefix)) {
continue;
}
if (!pcmk__str_in_list(rsc_printable_id(location->rsc_lh), only_rsc, pcmk__str_star_matches) &&
!pcmk__str_in_list(rsc_printable_id(uber_parent(location->rsc_lh)), only_rsc, pcmk__str_star_matches)) {
continue;
}
for (gIter2 = location->node_list_rh; gIter2 != NULL; gIter2 = gIter2->next) {
pe_node_t *node = (pe_node_t *) gIter2->data;
if (node->weight < 0) {
PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc, "Negative Location Constraints");
out->message(out, "ban", node, location, show_opts);
}
}
}
PCMK__OUTPUT_LIST_FOOTER(out, rc);
return rc;
}
PCMK__OUTPUT_ARGS("cluster-counts", "unsigned int", "int", "int", "int")
static int
cluster_counts_html(pcmk__output_t *out, va_list args) {
unsigned int nnodes = va_arg(args, unsigned int);
int nresources = va_arg(args, int);
int ndisabled = va_arg(args, int);
int nblocked = va_arg(args, int);
xmlNodePtr nodes_node = pcmk__output_create_xml_node(out, "li", NULL);
xmlNodePtr resources_node = pcmk__output_create_xml_node(out, "li", NULL);
char *nnodes_str = crm_strdup_printf("%d node%s configured",
nnodes, pcmk__plural_s(nnodes));
pcmk_create_html_node(nodes_node, "span", NULL, NULL, nnodes_str);
free(nnodes_str);
if (ndisabled && nblocked) {
char *s = crm_strdup_printf("%d resource instance%s configured (%d ",
nresources, pcmk__plural_s(nresources),
ndisabled);
pcmk_create_html_node(resources_node, "span", NULL, NULL, s);
free(s);
pcmk_create_html_node(resources_node, "span", NULL, "bold", "DISABLED");
s = crm_strdup_printf(", %d ", nblocked);
pcmk_create_html_node(resources_node, "span", NULL, NULL, s);
free(s);
pcmk_create_html_node(resources_node, "span", NULL, "bold", "BLOCKED");
pcmk_create_html_node(resources_node, "span", NULL, NULL,
" from further action due to failure)");
} else if (ndisabled && !nblocked) {
char *s = crm_strdup_printf("%d resource instance%s configured (%d ",
nresources, pcmk__plural_s(nresources),
ndisabled);
pcmk_create_html_node(resources_node, "span", NULL, NULL, s);
free(s);
pcmk_create_html_node(resources_node, "span", NULL, "bold", "DISABLED");
pcmk_create_html_node(resources_node, "span", NULL, NULL, ")");
} else if (!ndisabled && nblocked) {
char *s = crm_strdup_printf("%d resource instance%s configured (%d ",
nresources, pcmk__plural_s(nresources),
nblocked);
pcmk_create_html_node(resources_node, "span", NULL, NULL, s);
free(s);
pcmk_create_html_node(resources_node, "span", NULL, "bold", "BLOCKED");
pcmk_create_html_node(resources_node, "span", NULL, NULL,
" from further action due to failure)");
} else {
char *s = crm_strdup_printf("%d resource instance%s configured",
nresources, pcmk__plural_s(nresources));
pcmk_create_html_node(resources_node, "span", NULL, NULL, s);
free(s);
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("cluster-counts", "unsigned int", "int", "int", "int")
static int
cluster_counts_text(pcmk__output_t *out, va_list args) {
unsigned int nnodes = va_arg(args, unsigned int);
int nresources = va_arg(args, int);
int ndisabled = va_arg(args, int);
int nblocked = va_arg(args, int);
out->list_item(out, NULL, "%d node%s configured",
nnodes, pcmk__plural_s(nnodes));
if (ndisabled && nblocked) {
out->list_item(out, NULL, "%d resource instance%s configured "
"(%d DISABLED, %d BLOCKED from "
"further action due to failure)",
nresources, pcmk__plural_s(nresources), ndisabled,
nblocked);
} else if (ndisabled && !nblocked) {
out->list_item(out, NULL, "%d resource instance%s configured "
"(%d DISABLED)",
nresources, pcmk__plural_s(nresources), ndisabled);
} else if (!ndisabled && nblocked) {
out->list_item(out, NULL, "%d resource instance%s configured "
"(%d BLOCKED from further action "
"due to failure)",
nresources, pcmk__plural_s(nresources), nblocked);
} else {
out->list_item(out, NULL, "%d resource instance%s configured",
nresources, pcmk__plural_s(nresources));
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("cluster-counts", "unsigned int", "int", "int", "int")
static int
cluster_counts_xml(pcmk__output_t *out, va_list args) {
unsigned int nnodes = va_arg(args, unsigned int);
int nresources = va_arg(args, int);
int ndisabled = va_arg(args, int);
int nblocked = va_arg(args, int);
xmlNodePtr nodes_node = pcmk__output_create_xml_node(out, "nodes_configured", NULL);
xmlNodePtr resources_node = pcmk__output_create_xml_node(out, "resources_configured", NULL);
char *s = pcmk__itoa(nnodes);
crm_xml_add(nodes_node, "number", s);
free(s);
s = pcmk__itoa(nresources);
crm_xml_add(resources_node, "number", s);
free(s);
s = pcmk__itoa(ndisabled);
crm_xml_add(resources_node, "disabled", s);
free(s);
s = pcmk__itoa(nblocked);
crm_xml_add(resources_node, "blocked", s);
free(s);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("cluster-dc", "pe_node_t *", "const char *", "const char *",
"char *", "int")
static int
cluster_dc_html(pcmk__output_t *out, va_list args) {
pe_node_t *dc = va_arg(args, pe_node_t *);
const char *quorum = va_arg(args, const char *);
const char *dc_version_s = va_arg(args, const char *);
char *dc_name = va_arg(args, char *);
bool mixed_version = va_arg(args, int);
xmlNodePtr node = pcmk__output_create_xml_node(out, "li", NULL);
pcmk_create_html_node(node, "span", NULL, "bold", "Current DC: ");
if (dc) {
char *buf = crm_strdup_printf("%s (version %s) -", dc_name,
dc_version_s ? dc_version_s : "unknown");
pcmk_create_html_node(node, "span", NULL, NULL, buf);
free(buf);
if (mixed_version) {
pcmk_create_html_node(node, "span", NULL, "warning",
" MIXED-VERSION");
}
pcmk_create_html_node(node, "span", NULL, NULL, " partition");
if (crm_is_true(quorum)) {
pcmk_create_html_node(node, "span", NULL, NULL, " with");
} else {
pcmk_create_html_node(node, "span", NULL, "warning", " WITHOUT");
}
pcmk_create_html_node(node, "span", NULL, NULL, " quorum");
} else {
pcmk_create_html_node(node, "span", NULL, "warning", "NONE");
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("cluster-dc", "pe_node_t *", "const char *", "const char *",
"char *", "int")
static int
cluster_dc_text(pcmk__output_t *out, va_list args) {
pe_node_t *dc = va_arg(args, pe_node_t *);
const char *quorum = va_arg(args, const char *);
const char *dc_version_s = va_arg(args, const char *);
char *dc_name = va_arg(args, char *);
bool mixed_version = va_arg(args, int);
if (dc) {
out->list_item(out, "Current DC",
"%s (version %s) - %spartition %s quorum",
dc_name, dc_version_s ? dc_version_s : "unknown",
mixed_version ? "MIXED-VERSION " : "",
crm_is_true(quorum) ? "with" : "WITHOUT");
} else {
out->list_item(out, "Current DC", "NONE");
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("cluster-dc", "pe_node_t *", "const char *", "const char *",
"char *", "int")
static int
cluster_dc_xml(pcmk__output_t *out, va_list args) {
pe_node_t *dc = va_arg(args, pe_node_t *);
const char *quorum = va_arg(args, const char *);
const char *dc_version_s = va_arg(args, const char *);
char *dc_name G_GNUC_UNUSED = va_arg(args, char *);
bool mixed_version = va_arg(args, int);
if (dc) {
pcmk__output_create_xml_node(out, "current_dc",
"present", "true",
"version", dc_version_s ? dc_version_s : "",
"name", dc->details->uname,
"id", dc->details->id,
"with_quorum", pcmk__btoa(crm_is_true(quorum)),
"mixed_version", pcmk__btoa(mixed_version),
NULL);
} else {
pcmk__output_create_xml_node(out, "current_dc",
"present", "false",
NULL);
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("maint-mode", "unsigned long long int")
static int
cluster_maint_mode_text(pcmk__output_t *out, va_list args) {
unsigned long long flags = va_arg(args, unsigned long long);
if (pcmk_is_set(flags, pe_flag_maintenance_mode)) {
pcmk__formatted_printf(out, "\n *** Resource management is DISABLED ***\n");
pcmk__formatted_printf(out, " The cluster will not attempt to start, stop or recover services\n");
return pcmk_rc_ok;
} else if (pcmk_is_set(flags, pe_flag_stop_everything)) {
pcmk__formatted_printf(out, "\n *** Resource management is DISABLED ***\n");
pcmk__formatted_printf(out, " The cluster will keep all resources stopped\n");
return pcmk_rc_ok;
} else {
return pcmk_rc_no_output;
}
}
PCMK__OUTPUT_ARGS("cluster-options", "pe_working_set_t *")
static int
cluster_options_html(pcmk__output_t *out, va_list args) {
pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
out->list_item(out, NULL, "STONITH of failed nodes %s",
pcmk_is_set(data_set->flags, pe_flag_stonith_enabled) ? "enabled" : "disabled");
out->list_item(out, NULL, "Cluster is %s",
pcmk_is_set(data_set->flags, pe_flag_symmetric_cluster) ? "symmetric" : "asymmetric");
switch (data_set->no_quorum_policy) {
case no_quorum_freeze:
out->list_item(out, NULL, "No quorum policy: Freeze resources");
break;
case no_quorum_stop:
out->list_item(out, NULL, "No quorum policy: Stop ALL resources");
break;
case no_quorum_demote:
out->list_item(out, NULL, "No quorum policy: Demote promotable "
"resources and stop all other resources");
break;
case no_quorum_ignore:
out->list_item(out, NULL, "No quorum policy: Ignore");
break;
case no_quorum_suicide:
out->list_item(out, NULL, "No quorum policy: Suicide");
break;
}
if (pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)) {
xmlNodePtr node = pcmk__output_create_xml_node(out, "li", NULL);
pcmk_create_html_node(node, "span", NULL, NULL, "Resource management: ");
pcmk_create_html_node(node, "span", NULL, "bold", "DISABLED");
pcmk_create_html_node(node, "span", NULL, NULL,
" (the cluster will not attempt to start, stop, or recover services)");
} else if (pcmk_is_set(data_set->flags, pe_flag_stop_everything)) {
xmlNodePtr node = pcmk__output_create_xml_node(out, "li", NULL);
pcmk_create_html_node(node, "span", NULL, NULL, "Resource management: ");
pcmk_create_html_node(node, "span", NULL, "bold", "STOPPED");
pcmk_create_html_node(node, "span", NULL, NULL,
" (the cluster will keep all resources stopped)");
} else {
out->list_item(out, NULL, "Resource management: enabled");
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("cluster-options", "pe_working_set_t *")
static int
cluster_options_log(pcmk__output_t *out, va_list args) {
pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
if (pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)) {
return out->info(out, "Resource management is DISABLED. The cluster will not attempt to start, stop or recover services.");
} else if (pcmk_is_set(data_set->flags, pe_flag_stop_everything)) {
return out->info(out, "Resource management is DISABLED. The cluster has stopped all resources.");
} else {
return pcmk_rc_no_output;
}
}
PCMK__OUTPUT_ARGS("cluster-options", "pe_working_set_t *")
static int
cluster_options_text(pcmk__output_t *out, va_list args) {
pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
out->list_item(out, NULL, "STONITH of failed nodes %s",
pcmk_is_set(data_set->flags, pe_flag_stonith_enabled) ? "enabled" : "disabled");
out->list_item(out, NULL, "Cluster is %s",
pcmk_is_set(data_set->flags, pe_flag_symmetric_cluster) ? "symmetric" : "asymmetric");
switch (data_set->no_quorum_policy) {
case no_quorum_freeze:
out->list_item(out, NULL, "No quorum policy: Freeze resources");
break;
case no_quorum_stop:
out->list_item(out, NULL, "No quorum policy: Stop ALL resources");
break;
case no_quorum_demote:
out->list_item(out, NULL, "No quorum policy: Demote promotable "
"resources and stop all other resources");
break;
case no_quorum_ignore:
out->list_item(out, NULL, "No quorum policy: Ignore");
break;
case no_quorum_suicide:
out->list_item(out, NULL, "No quorum policy: Suicide");
break;
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("cluster-options", "pe_working_set_t *")
static int
cluster_options_xml(pcmk__output_t *out, va_list args) {
pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
const char *no_quorum_policy = NULL;
char *stonith_timeout_str = pcmk__itoa(data_set->stonith_timeout);
char *priority_fencing_delay_str = pcmk__itoa(data_set->priority_fencing_delay * 1000);
switch (data_set->no_quorum_policy) {
case no_quorum_freeze:
no_quorum_policy = "freeze";
break;
case no_quorum_stop:
no_quorum_policy = "stop";
break;
case no_quorum_demote:
no_quorum_policy = "demote";
break;
case no_quorum_ignore:
no_quorum_policy = "ignore";
break;
case no_quorum_suicide:
no_quorum_policy = "suicide";
break;
}
pcmk__output_create_xml_node(out, "cluster_options",
"stonith-enabled", pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)),
"symmetric-cluster", pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_symmetric_cluster)),
"no-quorum-policy", no_quorum_policy,
"maintenance-mode", pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)),
"stop-all-resources", pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_stop_everything)),
"stonith-timeout-ms", stonith_timeout_str,
"priority-fencing-delay-ms", priority_fencing_delay_str,
NULL);
free(stonith_timeout_str);
free(priority_fencing_delay_str);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("cluster-stack", "const char *")
static int
cluster_stack_html(pcmk__output_t *out, va_list args) {
const char *stack_s = va_arg(args, const char *);
xmlNodePtr node = pcmk__output_create_xml_node(out, "li", NULL);
pcmk_create_html_node(node, "span", NULL, "bold", "Stack: ");
pcmk_create_html_node(node, "span", NULL, NULL, stack_s);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("cluster-stack", "const char *")
static int
cluster_stack_text(pcmk__output_t *out, va_list args) {
const char *stack_s = va_arg(args, const char *);
out->list_item(out, "Stack", "%s", stack_s);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("cluster-stack", "const char *")
static int
cluster_stack_xml(pcmk__output_t *out, va_list args) {
const char *stack_s = va_arg(args, const char *);
pcmk__output_create_xml_node(out, "stack",
"type", stack_s,
NULL);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("cluster-times", "const char *", "const char *", "const char *", "const char *")
static int
cluster_times_html(pcmk__output_t *out, va_list args) {
const char *last_written = va_arg(args, const char *);
const char *user = va_arg(args, const char *);
const char *client = va_arg(args, const char *);
const char *origin = va_arg(args, const char *);
xmlNodePtr updated_node = pcmk__output_create_xml_node(out, "li", NULL);
xmlNodePtr changed_node = pcmk__output_create_xml_node(out, "li", NULL);
char *buf = last_changed_string(last_written, user, client, origin);
pcmk_create_html_node(updated_node, "span", NULL, "bold", "Last updated: ");
pcmk_create_html_node(updated_node, "span", NULL, NULL,
pcmk__epoch2str(NULL));
pcmk_create_html_node(changed_node, "span", NULL, "bold", "Last change: ");
pcmk_create_html_node(changed_node, "span", NULL, NULL, buf);
free(buf);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("cluster-times", "const char *", "const char *", "const char *", "const char *")
static int
cluster_times_xml(pcmk__output_t *out, va_list args) {
const char *last_written = va_arg(args, const char *);
const char *user = va_arg(args, const char *);
const char *client = va_arg(args, const char *);
const char *origin = va_arg(args, const char *);
pcmk__output_create_xml_node(out, "last_update",
"time", pcmk__epoch2str(NULL),
NULL);
pcmk__output_create_xml_node(out, "last_change",
"time", last_written ? last_written : "",
"user", user ? user : "",
"client", client ? client : "",
"origin", origin ? origin : "",
NULL);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("cluster-times", "const char *", "const char *", "const char *", "const char *")
static int
cluster_times_text(pcmk__output_t *out, va_list args) {
const char *last_written = va_arg(args, const char *);
const char *user = va_arg(args, const char *);
const char *client = va_arg(args, const char *);
const char *origin = va_arg(args, const char *);
char *buf = last_changed_string(last_written, user, client, origin);
out->list_item(out, "Last updated", "%s", pcmk__epoch2str(NULL));
out->list_item(out, "Last change", " %s", buf);
free(buf);
return pcmk_rc_ok;
}
/*!
* \internal
* \brief Display a failed action in less-technical natural language
*/
static void
failed_action_friendly(pcmk__output_t *out, xmlNodePtr xml_op,
const char *op_key, const char *node_name, int rc,
int status, const char *exit_reason,
const char *exec_time)
{
char *rsc_id = NULL;
char *task = NULL;
guint interval_ms = 0;
const char *last_change_str = NULL;
time_t last_change_epoch = 0;
GString *str = NULL;
if (pcmk__str_empty(op_key)
|| !parse_op_key(op_key, &rsc_id, &task, &interval_ms)) {
rsc_id = strdup("unknown resource");
task = strdup("unknown action");
interval_ms = 0;
}
CRM_ASSERT((rsc_id != NULL) && (task != NULL));
str = g_string_sized_new(256); // Should be sufficient for most messages
g_string_printf(str, "%s ", rsc_id);
if (interval_ms != 0) {
g_string_append_printf(str, "%s-interval ",
pcmk__readable_interval(interval_ms));
}
g_string_append_printf(str, "%s on %s",
crm_action_str(task, interval_ms), node_name);
if (status == PCMK_EXEC_DONE) {
g_string_append_printf(str, " returned '%s'",
services_ocf_exitcode_str(rc));
if (!pcmk__str_empty(exit_reason)) {
g_string_append_printf(str, " (%s)", exit_reason);
}
} else {
g_string_append_printf(str, " could not be executed (%s",
pcmk_exec_status_str(status));
if (!pcmk__str_empty(exit_reason)) {
g_string_append_printf(str, ": %s", exit_reason);
}
g_string_append(str, ")");
}
if (crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE,
&last_change_epoch) == pcmk_ok) {
last_change_str = pcmk__epoch2str(&last_change_epoch);
if (last_change_str != NULL) {
g_string_append_printf(str, " at %s", last_change_str);
}
}
if (!pcmk__str_empty(exec_time)) {
int exec_time_ms = 0;
if ((pcmk__scan_min_int(exec_time, &exec_time_ms, 0) == pcmk_rc_ok)
&& (exec_time_ms > 0)) {
g_string_append_printf(str, " after %s",
pcmk__readable_interval(exec_time_ms));
}
}
out->list_item(out, NULL, "%s", str->str);
g_string_free(str, TRUE);
free(rsc_id);
free(task);
}
/*!
* \internal
* \brief Display a failed action with technical details
*/
static void
failed_action_technical(pcmk__output_t *out, xmlNodePtr xml_op,
const char *op_key, const char *node_name, int rc,
int status, const char *exit_reason,
const char *exec_time)
{
const char *call_id = crm_element_value(xml_op, XML_LRM_ATTR_CALLID);
const char *queue_time = crm_element_value(xml_op, XML_RSC_OP_T_QUEUE);
const char *exit_status = services_ocf_exitcode_str(rc);
const char *lrm_status = pcmk_exec_status_str(status);
const char *last_change_str = NULL;
time_t last_change_epoch = 0;
GString *str = NULL;
if (pcmk__str_empty(op_key)) {
op_key = "unknown operation";
}
if (pcmk__str_empty(exit_status)) {
exit_status = "unknown exit status";
}
if (pcmk__str_empty(call_id)) {
call_id = "unknown";
}
str = g_string_sized_new(strlen(op_key) + strlen(node_name)
+ strlen(exit_status) + strlen(call_id)
+ strlen(lrm_status) + 50); // rough estimate
g_string_printf(str, "%s on %s '%s' (%d): call=%s, status='%s'",
op_key, node_name, exit_status, rc, call_id, lrm_status);
if (!pcmk__str_empty(exit_reason)) {
g_string_append_printf(str, ", exitreason='%s'", exit_reason);
}
if (crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE,
&last_change_epoch) == pcmk_ok) {
last_change_str = pcmk__epoch2str(&last_change_epoch);
if (last_change_str != NULL) {
g_string_append_printf(str, ", " XML_RSC_OP_LAST_CHANGE "='%s'",
last_change_str);
}
}
if (!pcmk__str_empty(queue_time)) {
g_string_append_printf(str, ", queued=%sms", queue_time);
}
if (!pcmk__str_empty(exec_time)) {
g_string_append_printf(str, ", exec=%sms", exec_time);
}
out->list_item(out, NULL, "%s", str->str);
g_string_free(str, TRUE);
}
PCMK__OUTPUT_ARGS("failed-action", "xmlNodePtr", "uint32_t")
static int
failed_action_default(pcmk__output_t *out, va_list args)
{
xmlNodePtr xml_op = va_arg(args, xmlNodePtr);
uint32_t show_opts = va_arg(args, uint32_t);
const char *op_key = crm_element_value(xml_op, XML_LRM_ATTR_TASK_KEY);
const char *node_name = crm_element_value(xml_op, XML_ATTR_UNAME);
const char *exit_reason = crm_element_value(xml_op,
XML_LRM_ATTR_EXIT_REASON);
const char *exec_time = crm_element_value(xml_op, XML_RSC_OP_T_EXEC);
int rc;
int status;
pcmk__scan_min_int(crm_element_value(xml_op, XML_LRM_ATTR_RC), &rc, 0);
pcmk__scan_min_int(crm_element_value(xml_op, XML_LRM_ATTR_OPSTATUS),
&status, 0);
if (pcmk__str_empty(op_key)) {
op_key = ID(xml_op);
}
if (pcmk__str_empty(node_name)) {
node_name = "unknown node";
}
if (pcmk_is_set(show_opts, pcmk_show_failed_detail)) {
failed_action_technical(out, xml_op, op_key, node_name, rc, status,
exit_reason, exec_time);
} else {
failed_action_friendly(out, xml_op, op_key, node_name, rc, status,
exit_reason, exec_time);
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("failed-action", "xmlNodePtr", "uint32_t")
static int
failed_action_xml(pcmk__output_t *out, va_list args) {
xmlNodePtr xml_op = va_arg(args, xmlNodePtr);
uint32_t show_opts G_GNUC_UNUSED = va_arg(args, uint32_t);
const char *op_key = crm_element_value(xml_op, XML_LRM_ATTR_TASK_KEY);
int rc;
int status;
const char *exit_reason = crm_element_value(xml_op, XML_LRM_ATTR_EXIT_REASON);
time_t epoch = 0;
char *rc_s = NULL;
char *reason_s = crm_xml_escape(exit_reason ? exit_reason : "none");
xmlNodePtr node = NULL;
pcmk__scan_min_int(crm_element_value(xml_op, XML_LRM_ATTR_RC), &rc, 0);
pcmk__scan_min_int(crm_element_value(xml_op, XML_LRM_ATTR_OPSTATUS),
&status, 0);
rc_s = pcmk__itoa(rc);
node = pcmk__output_create_xml_node(out, "failure",
(op_key == NULL)? "id" : "op_key",
(op_key == NULL)? ID(xml_op) : op_key,
"node", crm_element_value(xml_op, XML_ATTR_UNAME),
"exitstatus", services_ocf_exitcode_str(rc),
"exitreason", pcmk__s(reason_s, ""),
"exitcode", rc_s,
"call", crm_element_value(xml_op, XML_LRM_ATTR_CALLID),
"status", pcmk_exec_status_str(status),
NULL);
free(rc_s);
if ((crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE,
&epoch) == pcmk_ok) && (epoch > 0)) {
guint interval_ms = 0;
char *s = NULL;
crm_time_t *crm_when = crm_time_new_undefined();
char *rc_change = NULL;
crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms);
s = pcmk__itoa(interval_ms);
crm_time_set_timet(crm_when, &epoch);
rc_change = crm_time_as_string(crm_when, crm_time_log_date | crm_time_log_timeofday | crm_time_log_with_timezone);
pcmk__xe_set_props(node, XML_RSC_OP_LAST_CHANGE, rc_change,
"queued", crm_element_value(xml_op, XML_RSC_OP_T_QUEUE),
"exec", crm_element_value(xml_op, XML_RSC_OP_T_EXEC),
"interval", s,
"task", crm_element_value(xml_op, XML_LRM_ATTR_TASK),
NULL);
free(s);
free(rc_change);
crm_time_free(crm_when);
}
free(reason_s);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("failed-action-list", "pe_working_set_t *", "GList *",
"GList *", "uint32_t", "bool")
static int
failed_action_list(pcmk__output_t *out, va_list args) {
pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
GList *only_node = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
uint32_t show_opts = va_arg(args, uint32_t);
bool print_spacer = va_arg(args, int);
xmlNode *xml_op = NULL;
int rc = pcmk_rc_no_output;
const char *id = NULL;
if (xmlChildElementCount(data_set->failed) == 0) {
return rc;
}
for (xml_op = pcmk__xml_first_child(data_set->failed); xml_op != NULL;
xml_op = pcmk__xml_next(xml_op)) {
char *rsc = NULL;
if (!pcmk__str_in_list(crm_element_value(xml_op, XML_ATTR_UNAME), only_node,
pcmk__str_star_matches|pcmk__str_casei)) {
continue;
}
if (pcmk_xe_mask_probe_failure(xml_op)) {
continue;
}
id = crm_element_value(xml_op, XML_LRM_ATTR_TASK_KEY);
if (!parse_op_key(id ? id : ID(xml_op), &rsc, NULL, NULL)) {
continue;
}
if (!pcmk__str_in_list(rsc, only_rsc, pcmk__str_star_matches)) {
free(rsc);
continue;
}
free(rsc);
PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc, "Failed Resource Actions");
out->message(out, "failed-action", xml_op, show_opts);
}
PCMK__OUTPUT_LIST_FOOTER(out, rc);
return rc;
}
static void
status_node(pe_node_t *node, xmlNodePtr parent, uint32_t show_opts)
{
int health = pe__node_health(node);
// Cluster membership
if (node->details->online) {
pcmk_create_html_node(parent, "span", NULL, "online", " online");
} else {
pcmk_create_html_node(parent, "span", NULL, "offline", " OFFLINE");
}
// Standby mode
if (node->details->standby_onfail && (node->details->running_rsc != NULL)) {
pcmk_create_html_node(parent, "span", NULL, "standby",
" (in standby due to on-fail,"
" with active resources)");
} else if (node->details->standby_onfail) {
pcmk_create_html_node(parent, "span", NULL, "standby",
" (in standby due to on-fail)");
} else if (node->details->standby && (node->details->running_rsc != NULL)) {
pcmk_create_html_node(parent, "span", NULL, "standby",
" (in standby, with active resources)");
} else if (node->details->standby) {
pcmk_create_html_node(parent, "span", NULL, "standby", " (in standby)");
}
// Maintenance mode
if (node->details->maintenance) {
pcmk_create_html_node(parent, "span", NULL, "maint",
" (in maintenance mode)");
}
// Node health
if (health < 0) {
pcmk_create_html_node(parent, "span", NULL, "health_red",
" (health is RED)");
} else if (health == 0) {
pcmk_create_html_node(parent, "span", NULL, "health_yellow",
" (health is YELLOW)");
}
// Feature set
if (pcmk_is_set(show_opts, pcmk_show_feature_set)) {
const char *feature_set = get_node_feature_set(node);
if (feature_set != NULL) {
char *buf = crm_strdup_printf(", feature set %s", feature_set);
pcmk_create_html_node(parent, "span", NULL, NULL, buf);
free(buf);
}
}
}
PCMK__OUTPUT_ARGS("node", "pe_node_t *", "uint32_t", "bool",
"GList *", "GList *")
static int
node_html(pcmk__output_t *out, va_list args) {
pe_node_t *node = va_arg(args, pe_node_t *);
uint32_t show_opts = va_arg(args, uint32_t);
bool full = va_arg(args, int);
GList *only_node = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
char *node_name = pe__node_display_name(node, pcmk_is_set(show_opts, pcmk_show_node_id));
if (full) {
xmlNodePtr item_node;
if (pcmk_all_flags_set(show_opts, pcmk_show_brief | pcmk_show_rscs_by_node)) {
GList *rscs = pe__filter_rsc_list(node->details->running_rsc, only_rsc);
out->begin_list(out, NULL, NULL, "%s:", node_name);
item_node = pcmk__output_xml_create_parent(out, "li", NULL);
pcmk_create_html_node(item_node, "span", NULL, NULL, "Status:");
status_node(node, item_node, show_opts);
if (rscs != NULL) {
uint32_t new_show_opts = (show_opts | pcmk_show_rsc_only) & ~pcmk_show_inactive_rscs;
out->begin_list(out, NULL, NULL, "Resources");
pe__rscs_brief_output(out, rscs, new_show_opts);
out->end_list(out);
}
pcmk__output_xml_pop_parent(out);
out->end_list(out);
} else if (pcmk_is_set(show_opts, pcmk_show_rscs_by_node)) {
GList *lpc2 = NULL;
int rc = pcmk_rc_no_output;
out->begin_list(out, NULL, NULL, "%s:", node_name);
item_node = pcmk__output_xml_create_parent(out, "li", NULL);
pcmk_create_html_node(item_node, "span", NULL, NULL, "Status:");
status_node(node, item_node, show_opts);
for (lpc2 = node->details->running_rsc; lpc2 != NULL; lpc2 = lpc2->next) {
pe_resource_t *rsc = (pe_resource_t *) lpc2->data;
PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Resources");
show_opts |= pcmk_show_rsc_only;
out->message(out, crm_map_element_name(rsc->xml), show_opts,
rsc, only_node, only_rsc);
}
PCMK__OUTPUT_LIST_FOOTER(out, rc);
pcmk__output_xml_pop_parent(out);
out->end_list(out);
} else {
char *buf = crm_strdup_printf("%s:", node_name);
item_node = pcmk__output_create_xml_node(out, "li", NULL);
pcmk_create_html_node(item_node, "span", NULL, "bold", buf);
status_node(node, item_node, show_opts);
free(buf);
}
} else {
out->begin_list(out, NULL, NULL, "%s:", node_name);
}
free(node_name);
return pcmk_rc_ok;
}
/*!
* \internal
* \brief Get a human-friendly textual description of a node's status
*
* \param[in] node Node to check
*
* \return String representation of node's status
*/
static const char *
node_text_status(pe_node_t *node)
{
if (node->details->unclean) {
if (node->details->online) {
return "UNCLEAN (online)";
} else if (node->details->pending) {
return "UNCLEAN (pending)";
} else {
return "UNCLEAN (offline)";
}
} else if (node->details->pending) {
return "pending";
} else if (node->details->standby_onfail && node->details->online) {
return "standby (on-fail)";
} else if (node->details->standby) {
if (node->details->online) {
if (node->details->running_rsc) {
return "standby (with active resources)";
} else {
return "standby";
}
} else {
return "OFFLINE (standby)";
}
} else if (node->details->maintenance) {
if (node->details->online) {
return "maintenance";
} else {
return "OFFLINE (maintenance)";
}
} else if (node->details->online) {
return "online";
}
return "OFFLINE";
}
PCMK__OUTPUT_ARGS("node", "pe_node_t *", "uint32_t", "bool", "GList *", "GList *")
static int
node_text(pcmk__output_t *out, va_list args) {
pe_node_t *node = va_arg(args, pe_node_t *);
uint32_t show_opts = va_arg(args, uint32_t);
bool full = va_arg(args, int);
GList *only_node = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
if (full) {
char *node_name = pe__node_display_name(node, pcmk_is_set(show_opts, pcmk_show_node_id));
GString *str = g_string_sized_new(64);
int health = pe__node_health(node);
// Create a summary line with node type, name, and status
if (pe__is_guest_node(node)) {
g_string_append(str, "GuestNode");
} else if (pe__is_remote_node(node)) {
g_string_append(str, "RemoteNode");
} else {
g_string_append(str, "Node");
}
g_string_append_printf(str, " %s: %s",
node_name, node_text_status(node));
if (health < 0) {
g_string_append(str, " (health is RED)");
} else if (health == 0) {
g_string_append(str, " (health is YELLOW)");
}
if (pcmk_is_set(show_opts, pcmk_show_feature_set)) {
const char *feature_set = get_node_feature_set(node);
if (feature_set != NULL) {
g_string_append_printf(str, ", feature set %s", feature_set);
}
}
/* If we're grouping by node, print its resources */
if (pcmk_is_set(show_opts, pcmk_show_rscs_by_node)) {
if (pcmk_is_set(show_opts, pcmk_show_brief)) {
GList *rscs = pe__filter_rsc_list(node->details->running_rsc, only_rsc);
if (rscs != NULL) {
uint32_t new_show_opts = (show_opts | pcmk_show_rsc_only) & ~pcmk_show_inactive_rscs;
out->begin_list(out, NULL, NULL, "%s", str->str);
out->begin_list(out, NULL, NULL, "Resources");
pe__rscs_brief_output(out, rscs, new_show_opts);
out->end_list(out);
out->end_list(out);
g_list_free(rscs);
}
} else {
GList *gIter2 = NULL;
out->begin_list(out, NULL, NULL, "%s", str->str);
out->begin_list(out, NULL, NULL, "Resources");
for (gIter2 = node->details->running_rsc; gIter2 != NULL; gIter2 = gIter2->next) {
pe_resource_t *rsc = (pe_resource_t *) gIter2->data;
show_opts |= pcmk_show_rsc_only;
out->message(out, crm_map_element_name(rsc->xml), show_opts,
rsc, only_node, only_rsc);
}
out->end_list(out);
out->end_list(out);
}
} else {
out->list_item(out, NULL, "%s", str->str);
}
g_string_free(str, TRUE);
free(node_name);
} else {
char *node_name = pe__node_display_name(node, pcmk_is_set(show_opts, pcmk_show_node_id));
out->begin_list(out, NULL, NULL, "Node: %s", node_name);
free(node_name);
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("node", "pe_node_t *", "uint32_t", "bool", "GList *", "GList *")
static int
node_xml(pcmk__output_t *out, va_list args) {
pe_node_t *node = va_arg(args, pe_node_t *);
uint32_t show_opts G_GNUC_UNUSED = va_arg(args, uint32_t);
bool full = va_arg(args, int);
GList *only_node = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
if (full) {
const char *node_type = "unknown";
char *length_s = pcmk__itoa(g_list_length(node->details->running_rsc));
int health = pe__node_health(node);
const char *health_s = NULL;
const char *feature_set;
switch (node->details->type) {
case node_member:
node_type = "member";
break;
case node_remote:
node_type = "remote";
break;
case node_ping:
node_type = "ping";
break;
}
if (health < 0) {
health_s = "red";
} else if (health == 0) {
health_s = "yellow";
} else {
health_s = "green";
}
feature_set = get_node_feature_set(node);
pe__name_and_nvpairs_xml(out, true, "node", 15,
"name", node->details->uname,
"id", node->details->id,
"online", pcmk__btoa(node->details->online),
"standby", pcmk__btoa(node->details->standby),
"standby_onfail", pcmk__btoa(node->details->standby_onfail),
"maintenance", pcmk__btoa(node->details->maintenance),
"pending", pcmk__btoa(node->details->pending),
"unclean", pcmk__btoa(node->details->unclean),
"health", health_s,
"feature_set", feature_set,
"shutdown", pcmk__btoa(node->details->shutdown),
"expected_up", pcmk__btoa(node->details->expected_up),
"is_dc", pcmk__btoa(node->details->is_dc),
"resources_running", length_s,
"type", node_type);
if (pe__is_guest_node(node)) {
xmlNodePtr xml_node = pcmk__output_xml_peek_parent(out);
crm_xml_add(xml_node, "id_as_resource", node->details->remote_rsc->container->id);
}
if (pcmk_is_set(show_opts, pcmk_show_rscs_by_node)) {
GList *lpc = NULL;
for (lpc = node->details->running_rsc; lpc != NULL; lpc = lpc->next) {
pe_resource_t *rsc = (pe_resource_t *) lpc->data;
show_opts |= pcmk_show_rsc_only;
out->message(out, crm_map_element_name(rsc->xml), show_opts,
rsc, only_node, only_rsc);
}
}
free(length_s);
out->end_list(out);
} else {
pcmk__output_xml_create_parent(out, "node",
"name", node->details->uname,
NULL);
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("node-attribute", "const char *", "const char *", "bool", "int")
static int
node_attribute_text(pcmk__output_t *out, va_list args) {
const char *name = va_arg(args, const char *);
const char *value = va_arg(args, const char *);
bool add_extra = va_arg(args, int);
int expected_score = va_arg(args, int);
if (add_extra) {
int v;
if (value == NULL) {
v = 0;
} else {
pcmk__scan_min_int(value, &v, INT_MIN);
}
if (v <= 0) {
out->list_item(out, NULL, "%-32s\t: %-10s\t: Connectivity is lost", name, value);
} else if (v < expected_score) {
out->list_item(out, NULL, "%-32s\t: %-10s\t: Connectivity is degraded (Expected=%d)", name, value, expected_score);
} else {
out->list_item(out, NULL, "%-32s\t: %-10s", name, value);
}
} else {
out->list_item(out, NULL, "%-32s\t: %-10s", name, value);
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("node-attribute", "const char *", "const char *", "bool", "int")
static int
node_attribute_html(pcmk__output_t *out, va_list args) {
const char *name = va_arg(args, const char *);
const char *value = va_arg(args, const char *);
bool add_extra = va_arg(args, int);
int expected_score = va_arg(args, int);
if (add_extra) {
int v;
char *s = crm_strdup_printf("%s: %s", name, value);
xmlNodePtr item_node = pcmk__output_create_xml_node(out, "li", NULL);
if (value == NULL) {
v = 0;
} else {
pcmk__scan_min_int(value, &v, INT_MIN);
}
pcmk_create_html_node(item_node, "span", NULL, NULL, s);
free(s);
if (v <= 0) {
pcmk_create_html_node(item_node, "span", NULL, "bold", "(connectivity is lost)");
} else if (v < expected_score) {
char *buf = crm_strdup_printf("(connectivity is degraded -- expected %d", expected_score);
pcmk_create_html_node(item_node, "span", NULL, "bold", buf);
free(buf);
}
} else {
out->list_item(out, NULL, "%s: %s", name, value);
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("node-and-op", "pe_working_set_t *", "xmlNodePtr")
static int
node_and_op(pcmk__output_t *out, va_list args) {
pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
xmlNodePtr xml_op = va_arg(args, xmlNodePtr);
pe_resource_t *rsc = NULL;
gchar *node_str = NULL;
char *last_change_str = NULL;
const char *op_rsc = crm_element_value(xml_op, "resource");
const char *op_key = crm_element_value(xml_op, XML_LRM_ATTR_TASK_KEY);
int status;
time_t last_change = 0;
pcmk__scan_min_int(crm_element_value(xml_op, XML_LRM_ATTR_OPSTATUS),
&status, PCMK_EXEC_UNKNOWN);
rsc = pe_find_resource(data_set->resources, op_rsc);
if (rsc) {
pe_node_t *node = pe__current_node(rsc);
const char *target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
uint32_t show_opts = pcmk_show_rsc_only | pcmk_show_pending;
if (node == NULL) {
node = rsc->pending_node;
}
node_str = pcmk__native_output_string(rsc, rsc_printable_id(rsc), node,
show_opts, target_role, false);
} else {
node_str = crm_strdup_printf("Unknown resource %s", op_rsc);
}
if (crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE,
&last_change) == pcmk_ok) {
last_change_str = crm_strdup_printf(", %s=%s, exec=%sms",
XML_RSC_OP_LAST_CHANGE,
pcmk__trim(ctime(&last_change)),
crm_element_value(xml_op, XML_RSC_OP_T_EXEC));
}
out->list_item(out, NULL, "%s: %s (node=%s, call=%s, rc=%s%s): %s",
node_str, op_key ? op_key : ID(xml_op),
crm_element_value(xml_op, XML_ATTR_UNAME),
crm_element_value(xml_op, XML_LRM_ATTR_CALLID),
crm_element_value(xml_op, XML_LRM_ATTR_RC),
last_change_str ? last_change_str : "",
pcmk_exec_status_str(status));
g_free(node_str);
free(last_change_str);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("node-and-op", "pe_working_set_t *", "xmlNodePtr")
static int
node_and_op_xml(pcmk__output_t *out, va_list args) {
pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
xmlNodePtr xml_op = va_arg(args, xmlNodePtr);
pe_resource_t *rsc = NULL;
const char *op_rsc = crm_element_value(xml_op, "resource");
const char *op_key = crm_element_value(xml_op, XML_LRM_ATTR_TASK_KEY);
int status;
time_t last_change = 0;
xmlNode *node = NULL;
pcmk__scan_min_int(crm_element_value(xml_op, XML_LRM_ATTR_OPSTATUS),
&status, PCMK_EXEC_UNKNOWN);
node = pcmk__output_create_xml_node(out, "operation",
"op", op_key ? op_key : ID(xml_op),
"node", crm_element_value(xml_op, XML_ATTR_UNAME),
"call", crm_element_value(xml_op, XML_LRM_ATTR_CALLID),
"rc", crm_element_value(xml_op, XML_LRM_ATTR_RC),
"status", pcmk_exec_status_str(status),
NULL);
rsc = pe_find_resource(data_set->resources, op_rsc);
if (rsc) {
const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
const char *kind = crm_element_value(rsc->xml, XML_ATTR_TYPE);
char *agent_tuple = NULL;
agent_tuple = crm_strdup_printf("%s:%s:%s", class,
pcmk_is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_provider) ? crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER) : "",
kind);
pcmk__xe_set_props(node, "rsc", rsc_printable_id(rsc),
"agent", agent_tuple,
NULL);
free(agent_tuple);
}
if (crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE,
&last_change) == pcmk_ok) {
pcmk__xe_set_props(node, XML_RSC_OP_LAST_CHANGE,
pcmk__trim(ctime(&last_change)),
XML_RSC_OP_T_EXEC, crm_element_value(xml_op, XML_RSC_OP_T_EXEC),
NULL);
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("node-attribute", "const char *", "const char *", "bool", "int")
static int
node_attribute_xml(pcmk__output_t *out, va_list args) {
const char *name = va_arg(args, const char *);
const char *value = va_arg(args, const char *);
bool add_extra = va_arg(args, int);
int expected_score = va_arg(args, int);
xmlNodePtr node = pcmk__output_create_xml_node(out, "attribute",
"name", name,
"value", value,
NULL);
if (add_extra) {
char *buf = pcmk__itoa(expected_score);
crm_xml_add(node, "expected", buf);
free(buf);
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("node-attribute-list", "pe_working_set_t *", "uint32_t",
"bool", "GList *", "GList *")
static int
node_attribute_list(pcmk__output_t *out, va_list args) {
pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
uint32_t show_opts = va_arg(args, uint32_t);
bool print_spacer = va_arg(args, int);
GList *only_node = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
int rc = pcmk_rc_no_output;
/* Display each node's attributes */
for (GList *gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
pe_node_t *node = gIter->data;
GList *attr_list = NULL;
GHashTableIter iter;
gpointer key;
if (!node || !node->details || !node->details->online) {
continue;
}
g_hash_table_iter_init(&iter, node->details->attrs);
while (g_hash_table_iter_next (&iter, &key, NULL)) {
attr_list = filter_attr_list(attr_list, key);
}
if (attr_list == NULL) {
continue;
}
if (!pcmk__str_in_list(node->details->uname, only_node, pcmk__str_star_matches|pcmk__str_casei)) {
g_list_free(attr_list);
continue;
}
PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc, "Node Attributes");
out->message(out, "node", node, show_opts, false, only_node, only_rsc);
for (GList *aIter = attr_list; aIter != NULL; aIter = aIter->next) {
const char *name = aIter->data;
const char *value = NULL;
int expected_score = 0;
bool add_extra = false;
value = pe_node_attribute_raw(node, name);
add_extra = add_extra_info(node, node->details->running_rsc,
data_set, name, &expected_score);
/* Print attribute name and value */
out->message(out, "node-attribute", name, value, add_extra,
expected_score);
}
g_list_free(attr_list);
out->end_list(out);
}
PCMK__OUTPUT_LIST_FOOTER(out, rc);
return rc;
}
PCMK__OUTPUT_ARGS("node-capacity", "pe_node_t *", "const char *")
static int
node_capacity(pcmk__output_t *out, va_list args)
{
pe_node_t *node = va_arg(args, pe_node_t *);
const char *comment = va_arg(args, const char *);
char *dump_text = crm_strdup_printf("%s: %s capacity:",
comment, pe__node_name(node));
g_hash_table_foreach(node->details->utilization, append_dump_text, &dump_text);
out->list_item(out, NULL, "%s", dump_text);
free(dump_text);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("node-capacity", "pe_node_t *", "const char *")
static int
node_capacity_xml(pcmk__output_t *out, va_list args)
{
pe_node_t *node = va_arg(args, pe_node_t *);
const char *comment = va_arg(args, const char *);
xmlNodePtr xml_node = pcmk__output_create_xml_node(out, "capacity",
"node", node->details->uname,
"comment", comment,
NULL);
g_hash_table_foreach(node->details->utilization, add_dump_node, xml_node);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("node-history-list", "pe_working_set_t *", "pe_node_t *", "xmlNodePtr",
"GList *", "GList *", "uint32_t", "uint32_t")
static int
node_history_list(pcmk__output_t *out, va_list args) {
pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
pe_node_t *node = va_arg(args, pe_node_t *);
xmlNode *node_state = va_arg(args, xmlNode *);
GList *only_node = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
uint32_t section_opts = va_arg(args, uint32_t);
uint32_t show_opts = va_arg(args, uint32_t);
xmlNode *lrm_rsc = NULL;
xmlNode *rsc_entry = NULL;
int rc = pcmk_rc_no_output;
lrm_rsc = find_xml_node(node_state, XML_CIB_TAG_LRM, FALSE);
lrm_rsc = find_xml_node(lrm_rsc, XML_LRM_TAG_RESOURCES, FALSE);
/* Print history of each of the node's resources */
for (rsc_entry = first_named_child(lrm_rsc, XML_LRM_TAG_RESOURCE);
rsc_entry != NULL; rsc_entry = crm_next_same_xml(rsc_entry)) {
const char *rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID);
pe_resource_t *rsc = pe_find_resource(data_set->resources, rsc_id);
/* We can't use is_filtered here to filter group resources. For is_filtered,
* we have to decide whether to check the parent or not. If we check the
* parent, all elements of a group will always be printed because that's how
* is_filtered works for groups. If we do not check the parent, sometimes
* this will filter everything out.
*
* For other resource types, is_filtered is okay.
*/
if (uber_parent(rsc)->variant == pe_group) {
if (!pcmk__str_in_list(rsc_printable_id(rsc), only_rsc, pcmk__str_star_matches) &&
!pcmk__str_in_list(rsc_printable_id(uber_parent(rsc)), only_rsc, pcmk__str_star_matches)) {
continue;
}
} else {
if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
continue;
}
}
if (!pcmk_is_set(section_opts, pcmk_section_operations)) {
time_t last_failure = 0;
int failcount = pe_get_failcount(node, rsc, &last_failure, pe_fc_default,
NULL, data_set);
if (failcount <= 0) {
continue;
}
if (rc == pcmk_rc_no_output) {
rc = pcmk_rc_ok;
out->message(out, "node", node, show_opts, false, only_node,
only_rsc);
}
out->message(out, "resource-history", rsc, rsc_id, false,
failcount, last_failure, false);
} else {
GList *op_list = get_operation_list(rsc_entry);
pe_resource_t *rsc = pe_find_resource(data_set->resources,
crm_element_value(rsc_entry, XML_ATTR_ID));
if (op_list == NULL) {
continue;
}
if (rc == pcmk_rc_no_output) {
rc = pcmk_rc_ok;
out->message(out, "node", node, show_opts, false, only_node,
only_rsc);
}
out->message(out, "resource-operation-list", data_set, rsc, node,
op_list, show_opts);
}
}
PCMK__OUTPUT_LIST_FOOTER(out, rc);
return rc;
}
PCMK__OUTPUT_ARGS("node-list", "GList *", "GList *", "GList *", "uint32_t", "bool")
static int
node_list_html(pcmk__output_t *out, va_list args) {
GList *nodes = va_arg(args, GList *);
GList *only_node = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
uint32_t show_opts = va_arg(args, uint32_t);
bool print_spacer G_GNUC_UNUSED = va_arg(args, int);
int rc = pcmk_rc_no_output;
for (GList *gIter = nodes; gIter != NULL; gIter = gIter->next) {
pe_node_t *node = (pe_node_t *) gIter->data;
if (!pcmk__str_in_list(node->details->uname, only_node,
pcmk__str_star_matches|pcmk__str_casei)) {
continue;
}
PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Node List");
out->message(out, "node", node, show_opts, true, only_node, only_rsc);
}
PCMK__OUTPUT_LIST_FOOTER(out, rc);
return rc;
}
PCMK__OUTPUT_ARGS("node-list", "GList *", "GList *", "GList *", "uint32_t", "bool")
static int
node_list_text(pcmk__output_t *out, va_list args) {
GList *nodes = va_arg(args, GList *);
GList *only_node = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
uint32_t show_opts = va_arg(args, uint32_t);
bool print_spacer = va_arg(args, int);
/* space-separated lists of node names */
char *online_nodes = NULL;
char *online_remote_nodes = NULL;
char *online_guest_nodes = NULL;
char *offline_nodes = NULL;
char *offline_remote_nodes = NULL;
size_t online_nodes_len = 0;
size_t online_remote_nodes_len = 0;
size_t online_guest_nodes_len = 0;
size_t offline_nodes_len = 0;
size_t offline_remote_nodes_len = 0;
int rc = pcmk_rc_no_output;
for (GList *gIter = nodes; gIter != NULL; gIter = gIter->next) {
pe_node_t *node = (pe_node_t *) gIter->data;
char *node_name = pe__node_display_name(node, pcmk_is_set(show_opts, pcmk_show_node_id));
if (!pcmk__str_in_list(node->details->uname, only_node,
pcmk__str_star_matches|pcmk__str_casei)) {
free(node_name);
continue;
}
PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc, "Node List");
// Determine whether to display node individually or in a list
if (node->details->unclean || node->details->pending
|| (node->details->standby_onfail && node->details->online)
|| node->details->standby || node->details->maintenance
|| pcmk_is_set(show_opts, pcmk_show_rscs_by_node)
|| pcmk_is_set(show_opts, pcmk_show_feature_set)
|| (pe__node_health(node) <= 0)) {
// Display node individually
} else if (node->details->online) {
// Display online node in a list
if (pe__is_guest_node(node)) {
pcmk__add_word(&online_guest_nodes,
&online_guest_nodes_len, node_name);
} else if (pe__is_remote_node(node)) {
pcmk__add_word(&online_remote_nodes,
&online_remote_nodes_len, node_name);
} else {
pcmk__add_word(&online_nodes, &online_nodes_len, node_name);
}
free(node_name);
continue;
} else {
// Display offline node in a list
if (pe__is_remote_node(node)) {
pcmk__add_word(&offline_remote_nodes,
&offline_remote_nodes_len, node_name);
} else if (pe__is_guest_node(node)) {
/* ignore offline guest nodes */
} else {
pcmk__add_word(&offline_nodes,
&offline_nodes_len, node_name);
}
free(node_name);
continue;
}
/* If we get here, node is in bad state, or we're grouping by node */
out->message(out, "node", node, show_opts, true, only_node, only_rsc);
free(node_name);
}
/* If we're not grouping by node, summarize nodes by status */
if (online_nodes) {
out->list_item(out, "Online", "[ %s ]", online_nodes);
free(online_nodes);
}
if (offline_nodes) {
out->list_item(out, "OFFLINE", "[ %s ]", offline_nodes);
free(offline_nodes);
}
if (online_remote_nodes) {
out->list_item(out, "RemoteOnline", "[ %s ]", online_remote_nodes);
free(online_remote_nodes);
}
if (offline_remote_nodes) {
out->list_item(out, "RemoteOFFLINE", "[ %s ]", offline_remote_nodes);
free(offline_remote_nodes);
}
if (online_guest_nodes) {
out->list_item(out, "GuestOnline", "[ %s ]", online_guest_nodes);
free(online_guest_nodes);
}
PCMK__OUTPUT_LIST_FOOTER(out, rc);
return rc;
}
PCMK__OUTPUT_ARGS("node-list", "GList *", "GList *", "GList *", "uint32_t", "bool")
static int
node_list_xml(pcmk__output_t *out, va_list args) {
GList *nodes = va_arg(args, GList *);
GList *only_node = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
uint32_t show_opts = va_arg(args, uint32_t);
bool print_spacer G_GNUC_UNUSED = va_arg(args, int);
out->begin_list(out, NULL, NULL, "nodes");
for (GList *gIter = nodes; gIter != NULL; gIter = gIter->next) {
pe_node_t *node = (pe_node_t *) gIter->data;
if (!pcmk__str_in_list(node->details->uname, only_node,
pcmk__str_star_matches|pcmk__str_casei)) {
continue;
}
out->message(out, "node", node, show_opts, true, only_node, only_rsc);
}
out->end_list(out);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("node-summary", "pe_working_set_t *", "GList *", "GList *",
"uint32_t", "uint32_t", "bool")
static int
node_summary(pcmk__output_t *out, va_list args) {
pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
GList *only_node = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
uint32_t section_opts = va_arg(args, uint32_t);
uint32_t show_opts = va_arg(args, uint32_t);
bool print_spacer = va_arg(args, int);
xmlNode *node_state = NULL;
xmlNode *cib_status = pcmk_find_cib_element(data_set->input,
XML_CIB_TAG_STATUS);
int rc = pcmk_rc_no_output;
if (xmlChildElementCount(cib_status) == 0) {
return rc;
}
for (node_state = first_named_child(cib_status, XML_CIB_TAG_STATE);
node_state != NULL; node_state = crm_next_same_xml(node_state)) {
pe_node_t *node = pe_find_node_id(data_set->nodes, ID(node_state));
if (!node || !node->details || !node->details->online) {
continue;
}
if (!pcmk__str_in_list(node->details->uname, only_node,
pcmk__str_star_matches|pcmk__str_casei)) {
continue;
}
PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc,
pcmk_is_set(section_opts, pcmk_section_operations) ? "Operations" : "Migration Summary");
out->message(out, "node-history-list", data_set, node, node_state,
only_node, only_rsc, section_opts, show_opts);
}
PCMK__OUTPUT_LIST_FOOTER(out, rc);
return rc;
}
PCMK__OUTPUT_ARGS("node-weight", "pe_resource_t *", "const char *", "const char *", "const char *")
static int
node_weight(pcmk__output_t *out, va_list args)
{
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
const char *prefix = va_arg(args, const char *);
const char *uname = va_arg(args, const char *);
const char *score = va_arg(args, const char *);
if (rsc) {
out->list_item(out, NULL, "%s: %s allocation score on %s: %s",
prefix, rsc->id, uname, score);
} else {
out->list_item(out, NULL, "%s: %s = %s", prefix, uname, score);
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("node-weight", "pe_resource_t *", "const char *", "const char *", "const char *")
static int
node_weight_xml(pcmk__output_t *out, va_list args)
{
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
const char *prefix = va_arg(args, const char *);
const char *uname = va_arg(args, const char *);
const char *score = va_arg(args, const char *);
xmlNodePtr node = pcmk__output_create_xml_node(out, "node_weight",
"function", prefix,
"node", uname,
"score", score,
NULL);
if (rsc) {
crm_xml_add(node, "id", rsc->id);
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("op-history", "xmlNodePtr", "const char *", "const char *", "int", "uint32_t")
static int
op_history_text(pcmk__output_t *out, va_list args) {
xmlNodePtr xml_op = va_arg(args, xmlNodePtr);
const char *task = va_arg(args, const char *);
const char *interval_ms_s = va_arg(args, const char *);
int rc = va_arg(args, int);
uint32_t show_opts = va_arg(args, uint32_t);
char *buf = op_history_string(xml_op, task, interval_ms_s, rc,
pcmk_is_set(show_opts, pcmk_show_timing));
out->list_item(out, NULL, "%s", buf);
free(buf);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("op-history", "xmlNodePtr", "const char *", "const char *", "int", "uint32_t")
static int
op_history_xml(pcmk__output_t *out, va_list args) {
xmlNodePtr xml_op = va_arg(args, xmlNodePtr);
const char *task = va_arg(args, const char *);
const char *interval_ms_s = va_arg(args, const char *);
int rc = va_arg(args, int);
uint32_t show_opts = va_arg(args, uint32_t);
char *rc_s = pcmk__itoa(rc);
xmlNodePtr node = pcmk__output_create_xml_node(out, "operation_history",
"call", crm_element_value(xml_op, XML_LRM_ATTR_CALLID),
"task", task,
"rc", rc_s,
"rc_text", services_ocf_exitcode_str(rc),
NULL);
free(rc_s);
if (interval_ms_s && !pcmk__str_eq(interval_ms_s, "0", pcmk__str_casei)) {
char *s = crm_strdup_printf("%sms", interval_ms_s);
crm_xml_add(node, "interval", s);
free(s);
}
if (pcmk_is_set(show_opts, pcmk_show_timing)) {
const char *value = NULL;
time_t epoch = 0;
if ((crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE,
&epoch) == pcmk_ok) && (epoch > 0)) {
crm_xml_add(node, XML_RSC_OP_LAST_CHANGE, pcmk__epoch2str(&epoch));
}
value = crm_element_value(xml_op, XML_RSC_OP_T_EXEC);
if (value) {
char *s = crm_strdup_printf("%sms", value);
crm_xml_add(node, XML_RSC_OP_T_EXEC, s);
free(s);
}
value = crm_element_value(xml_op, XML_RSC_OP_T_QUEUE);
if (value) {
char *s = crm_strdup_printf("%sms", value);
crm_xml_add(node, XML_RSC_OP_T_QUEUE, s);
free(s);
}
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("promotion-score", "pe_resource_t *", "pe_node_t *", "const char *")
static int
promotion_score(pcmk__output_t *out, va_list args)
{
pe_resource_t *child_rsc = va_arg(args, pe_resource_t *);
pe_node_t *chosen = va_arg(args, pe_node_t *);
const char *score = va_arg(args, const char *);
out->list_item(out, NULL, "%s promotion score on %s: %s",
child_rsc->id,
chosen? chosen->details->uname : "none",
score);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("promotion-score", "pe_resource_t *", "pe_node_t *", "const char *")
static int
promotion_score_xml(pcmk__output_t *out, va_list args)
{
pe_resource_t *child_rsc = va_arg(args, pe_resource_t *);
pe_node_t *chosen = va_arg(args, pe_node_t *);
const char *score = va_arg(args, const char *);
xmlNodePtr node = pcmk__output_create_xml_node(out, "promotion_score",
"id", child_rsc->id,
"score", score,
NULL);
if (chosen) {
crm_xml_add(node, "node", chosen->details->uname);
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("resource-config", "pe_resource_t *", "bool")
static int
resource_config(pcmk__output_t *out, va_list args) {
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
bool raw = va_arg(args, int);
char *rsc_xml = formatted_xml_buf(rsc, raw);
out->output_xml(out, "xml", rsc_xml);
free(rsc_xml);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("resource-config", "pe_resource_t *", "bool")
static int
resource_config_text(pcmk__output_t *out, va_list args) {
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
bool raw = va_arg(args, int);
char *rsc_xml = formatted_xml_buf(rsc, raw);
pcmk__formatted_printf(out, "Resource XML:\n");
out->output_xml(out, "xml", rsc_xml);
free(rsc_xml);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("resource-history", "pe_resource_t *", "const char *", "bool", "int", "time_t", "bool")
static int
resource_history_text(pcmk__output_t *out, va_list args) {
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
const char *rsc_id = va_arg(args, const char *);
bool all = va_arg(args, int);
int failcount = va_arg(args, int);
time_t last_failure = va_arg(args, time_t);
bool as_header = va_arg(args, int);
char *buf = resource_history_string(rsc, rsc_id, all, failcount, last_failure);
if (as_header) {
out->begin_list(out, NULL, NULL, "%s", buf);
} else {
out->list_item(out, NULL, "%s", buf);
}
free(buf);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("resource-history", "pe_resource_t *", "const char *", "bool", "int", "time_t", "bool")
static int
resource_history_xml(pcmk__output_t *out, va_list args) {
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
const char *rsc_id = va_arg(args, const char *);
bool all = va_arg(args, int);
int failcount = va_arg(args, int);
time_t last_failure = va_arg(args, time_t);
bool as_header = va_arg(args, int);
xmlNodePtr node = pcmk__output_xml_create_parent(out, "resource_history",
"id", rsc_id,
NULL);
if (rsc == NULL) {
pcmk__xe_set_bool_attr(node, "orphan", true);
} else if (all || failcount || last_failure > 0) {
char *migration_s = pcmk__itoa(rsc->migration_threshold);
pcmk__xe_set_props(node, "orphan", "false",
"migration-threshold", migration_s,
NULL);
free(migration_s);
if (failcount > 0) {
char *s = pcmk__itoa(failcount);
crm_xml_add(node, PCMK__FAIL_COUNT_PREFIX, s);
free(s);
}
if (last_failure > 0) {
crm_xml_add(node, PCMK__LAST_FAILURE_PREFIX, pcmk__epoch2str(&last_failure));
}
}
if (!as_header) {
pcmk__output_xml_pop_parent(out);
}
return pcmk_rc_ok;
}
static void
print_resource_header(pcmk__output_t *out, uint32_t show_opts)
{
if (pcmk_is_set(show_opts, pcmk_show_rscs_by_node)) {
/* Active resources have already been printed by node */
out->begin_list(out, NULL, NULL, "Inactive Resources");
} else if (pcmk_is_set(show_opts, pcmk_show_inactive_rscs)) {
out->begin_list(out, NULL, NULL, "Full List of Resources");
} else {
out->begin_list(out, NULL, NULL, "Active Resources");
}
}
PCMK__OUTPUT_ARGS("resource-list", "pe_working_set_t *", "uint32_t", "bool",
"GList *", "GList *", "bool")
static int
resource_list(pcmk__output_t *out, va_list args)
{
pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
uint32_t show_opts = va_arg(args, uint32_t);
bool print_summary = va_arg(args, int);
GList *only_node = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
bool print_spacer = va_arg(args, int);
GList *rsc_iter;
int rc = pcmk_rc_no_output;
bool printed_header = false;
/* If we already showed active resources by node, and
* we're not showing inactive resources, we have nothing to do
*/
if (pcmk_is_set(show_opts, pcmk_show_rscs_by_node) &&
!pcmk_is_set(show_opts, pcmk_show_inactive_rscs)) {
return rc;
}
/* If we haven't already printed resources grouped by node,
* and brief output was requested, print resource summary */
if (pcmk_is_set(show_opts, pcmk_show_brief) && !pcmk_is_set(show_opts, pcmk_show_rscs_by_node)) {
GList *rscs = pe__filter_rsc_list(data_set->resources, only_rsc);
PCMK__OUTPUT_SPACER_IF(out, print_spacer);
print_resource_header(out, show_opts);
printed_header = true;
rc = pe__rscs_brief_output(out, rscs, show_opts);
g_list_free(rscs);
}
/* For each resource, display it if appropriate */
for (rsc_iter = data_set->resources; rsc_iter != NULL; rsc_iter = rsc_iter->next) {
pe_resource_t *rsc = (pe_resource_t *) rsc_iter->data;
int x;
/* Complex resources may have some sub-resources active and some inactive */
gboolean is_active = rsc->fns->active(rsc, TRUE);
gboolean partially_active = rsc->fns->active(rsc, FALSE);
/* Skip inactive orphans (deleted but still in CIB) */
if (pcmk_is_set(rsc->flags, pe_rsc_orphan) && !is_active) {
continue;
/* Skip active resources if we already displayed them by node */
} else if (pcmk_is_set(show_opts, pcmk_show_rscs_by_node)) {
if (is_active) {
continue;
}
/* Skip primitives already counted in a brief summary */
} else if (pcmk_is_set(show_opts, pcmk_show_brief) && (rsc->variant == pe_native)) {
continue;
/* Skip resources that aren't at least partially active,
* unless we're displaying inactive resources
*/
} else if (!partially_active && !pcmk_is_set(show_opts, pcmk_show_inactive_rscs)) {
continue;
} else if (partially_active && !pe__rsc_running_on_any(rsc, only_node)) {
continue;
}
if (!printed_header) {
PCMK__OUTPUT_SPACER_IF(out, print_spacer);
print_resource_header(out, show_opts);
printed_header = true;
}
/* Print this resource */
x = out->message(out, crm_map_element_name(rsc->xml), show_opts, rsc,
only_node, only_rsc);
if (x == pcmk_rc_ok) {
rc = pcmk_rc_ok;
}
}
if (print_summary && rc != pcmk_rc_ok) {
if (!printed_header) {
PCMK__OUTPUT_SPACER_IF(out, print_spacer);
print_resource_header(out, show_opts);
printed_header = true;
}
if (pcmk_is_set(show_opts, pcmk_show_rscs_by_node)) {
out->list_item(out, NULL, "No inactive resources");
} else if (pcmk_is_set(show_opts, pcmk_show_inactive_rscs)) {
out->list_item(out, NULL, "No resources");
} else {
out->list_item(out, NULL, "No active resources");
}
}
if (printed_header) {
out->end_list(out);
}
return rc;
}
PCMK__OUTPUT_ARGS("resource-operation-list", "pe_working_set_t *", "pe_resource_t *",
"pe_node_t *", "GList *", "uint32_t")
static int
resource_operation_list(pcmk__output_t *out, va_list args)
{
pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
pe_node_t *node = va_arg(args, pe_node_t *);
GList *op_list = va_arg(args, GList *);
uint32_t show_opts = va_arg(args, uint32_t);
GList *gIter = NULL;
int rc = pcmk_rc_no_output;
/* Print each operation */
for (gIter = op_list; gIter != NULL; gIter = gIter->next) {
xmlNode *xml_op = (xmlNode *) gIter->data;
const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
const char *interval_ms_s = crm_element_value(xml_op,
XML_LRM_ATTR_INTERVAL_MS);
const char *op_rc = crm_element_value(xml_op, XML_LRM_ATTR_RC);
int op_rc_i;
pcmk__scan_min_int(op_rc, &op_rc_i, 0);
/* Display 0-interval monitors as "probe" */
if (pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_casei)
&& pcmk__str_eq(interval_ms_s, "0", pcmk__str_null_matches | pcmk__str_casei)) {
task = "probe";
}
/* If this is the first printed operation, print heading for resource */
if (rc == pcmk_rc_no_output) {
time_t last_failure = 0;
int failcount = pe_get_failcount(node, rsc, &last_failure, pe_fc_default,
NULL, data_set);
out->message(out, "resource-history", rsc, rsc_printable_id(rsc), true,
failcount, last_failure, true);
rc = pcmk_rc_ok;
}
/* Print the operation */
out->message(out, "op-history", xml_op, task, interval_ms_s,
op_rc_i, show_opts);
}
/* Free the list we created (no need to free the individual items) */
g_list_free(op_list);
PCMK__OUTPUT_LIST_FOOTER(out, rc);
return rc;
}
PCMK__OUTPUT_ARGS("resource-util", "pe_resource_t *", "pe_node_t *", "const char *")
static int
resource_util(pcmk__output_t *out, va_list args)
{
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
pe_node_t *node = va_arg(args, pe_node_t *);
const char *fn = va_arg(args, const char *);
char *dump_text = crm_strdup_printf("%s: %s utilization on %s:",
fn, rsc->id, pe__node_name(node));
g_hash_table_foreach(rsc->utilization, append_dump_text, &dump_text);
out->list_item(out, NULL, "%s", dump_text);
free(dump_text);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("resource-util", "pe_resource_t *", "pe_node_t *", "const char *")
static int
resource_util_xml(pcmk__output_t *out, va_list args)
{
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
pe_node_t *node = va_arg(args, pe_node_t *);
const char *fn = va_arg(args, const char *);
xmlNodePtr xml_node = pcmk__output_create_xml_node(out, "utilization",
"resource", rsc->id,
"node", node->details->uname,
"function", fn,
NULL);
g_hash_table_foreach(rsc->utilization, add_dump_node, xml_node);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("ticket", "pe_ticket_t *")
static int
ticket_html(pcmk__output_t *out, va_list args) {
pe_ticket_t *ticket = va_arg(args, pe_ticket_t *);
if (ticket->last_granted > -1) {
char *time = pcmk__format_named_time("last-granted",
ticket->last_granted);
out->list_item(out, NULL, "%s:\t%s%s %s", ticket->id,
ticket->granted ? "granted" : "revoked",
ticket->standby ? " [standby]" : "",
time);
free(time);
} else {
out->list_item(out, NULL, "%s:\t%s%s", ticket->id,
ticket->granted ? "granted" : "revoked",
ticket->standby ? " [standby]" : "");
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("ticket", "pe_ticket_t *")
static int
ticket_text(pcmk__output_t *out, va_list args) {
pe_ticket_t *ticket = va_arg(args, pe_ticket_t *);
if (ticket->last_granted > -1) {
char *time = pcmk__format_named_time("last-granted",
ticket->last_granted);
out->list_item(out, ticket->id, "%s%s %s",
ticket->granted ? "granted" : "revoked",
ticket->standby ? " [standby]" : "",
time);
free(time);
} else {
out->list_item(out, ticket->id, "%s%s",
ticket->granted ? "granted" : "revoked",
ticket->standby ? " [standby]" : "");
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("ticket", "pe_ticket_t *")
static int
ticket_xml(pcmk__output_t *out, va_list args) {
pe_ticket_t *ticket = va_arg(args, pe_ticket_t *);
xmlNodePtr node = NULL;
node = pcmk__output_create_xml_node(out, "ticket",
"id", ticket->id,
"status", ticket->granted ? "granted" : "revoked",
"standby", pcmk__btoa(ticket->standby),
NULL);
if (ticket->last_granted > -1) {
crm_xml_add(node, "last-granted", pcmk__epoch2str(&ticket->last_granted));
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("ticket-list", "pe_working_set_t *", "bool")
static int
ticket_list(pcmk__output_t *out, va_list args) {
pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
bool print_spacer = va_arg(args, int);
GHashTableIter iter;
gpointer key, value;
if (g_hash_table_size(data_set->tickets) == 0) {
return pcmk_rc_no_output;
}
PCMK__OUTPUT_SPACER_IF(out, print_spacer);
/* Print section heading */
out->begin_list(out, NULL, NULL, "Tickets");
/* Print each ticket */
g_hash_table_iter_init(&iter, data_set->tickets);
while (g_hash_table_iter_next(&iter, &key, &value)) {
pe_ticket_t *ticket = (pe_ticket_t *) value;
out->message(out, "ticket", ticket);
}
/* Close section */
out->end_list(out);
return pcmk_rc_ok;
}
static pcmk__message_entry_t fmt_functions[] = {
{ "ban", "default", ban_text },
{ "ban", "html", ban_html },
{ "ban", "xml", ban_xml },
{ "ban-list", "default", ban_list },
{ "bundle", "default", pe__bundle_text },
{ "bundle", "xml", pe__bundle_xml },
{ "bundle", "html", pe__bundle_html },
{ "clone", "default", pe__clone_default },
{ "clone", "xml", pe__clone_xml },
{ "cluster-counts", "default", cluster_counts_text },
{ "cluster-counts", "html", cluster_counts_html },
{ "cluster-counts", "xml", cluster_counts_xml },
{ "cluster-dc", "default", cluster_dc_text },
{ "cluster-dc", "html", cluster_dc_html },
{ "cluster-dc", "xml", cluster_dc_xml },
{ "cluster-options", "default", cluster_options_text },
{ "cluster-options", "html", cluster_options_html },
{ "cluster-options", "log", cluster_options_log },
{ "cluster-options", "xml", cluster_options_xml },
{ "cluster-summary", "default", cluster_summary },
{ "cluster-summary", "html", cluster_summary_html },
{ "cluster-stack", "default", cluster_stack_text },
{ "cluster-stack", "html", cluster_stack_html },
{ "cluster-stack", "xml", cluster_stack_xml },
{ "cluster-times", "default", cluster_times_text },
{ "cluster-times", "html", cluster_times_html },
{ "cluster-times", "xml", cluster_times_xml },
{ "failed-action", "default", failed_action_default },
{ "failed-action", "xml", failed_action_xml },
{ "failed-action-list", "default", failed_action_list },
{ "group", "default", pe__group_default},
{ "group", "xml", pe__group_xml },
{ "maint-mode", "text", cluster_maint_mode_text },
{ "node", "default", node_text },
{ "node", "html", node_html },
{ "node", "xml", node_xml },
{ "node-and-op", "default", node_and_op },
{ "node-and-op", "xml", node_and_op_xml },
{ "node-capacity", "default", node_capacity },
{ "node-capacity", "xml", node_capacity_xml },
{ "node-history-list", "default", node_history_list },
{ "node-list", "default", node_list_text },
{ "node-list", "html", node_list_html },
{ "node-list", "xml", node_list_xml },
{ "node-weight", "default", node_weight },
{ "node-weight", "xml", node_weight_xml },
{ "node-attribute", "default", node_attribute_text },
{ "node-attribute", "html", node_attribute_html },
{ "node-attribute", "xml", node_attribute_xml },
{ "node-attribute-list", "default", node_attribute_list },
{ "node-summary", "default", node_summary },
{ "op-history", "default", op_history_text },
{ "op-history", "xml", op_history_xml },
{ "primitive", "default", pe__resource_text },
{ "primitive", "xml", pe__resource_xml },
{ "primitive", "html", pe__resource_html },
{ "promotion-score", "default", promotion_score },
{ "promotion-score", "xml", promotion_score_xml },
{ "resource-config", "default", resource_config },
{ "resource-config", "text", resource_config_text },
{ "resource-history", "default", resource_history_text },
{ "resource-history", "xml", resource_history_xml },
{ "resource-list", "default", resource_list },
{ "resource-operation-list", "default", resource_operation_list },
{ "resource-util", "default", resource_util },
{ "resource-util", "xml", resource_util_xml },
{ "ticket", "default", ticket_text },
{ "ticket", "html", ticket_html },
{ "ticket", "xml", ticket_xml },
{ "ticket-list", "default", ticket_list },
{ NULL, NULL, NULL }
};
void
pe__register_messages(pcmk__output_t *out) {
pcmk__register_messages(out, fmt_functions);
}