diff --git a/cts/cli/regression.crm_mon.exp b/cts/cli/regression.crm_mon.exp
index 3aab519c43..efaf7a59e3 100644
--- a/cts/cli/regression.crm_mon.exp
+++ b/cts/cli/regression.crm_mon.exp
@@ -1,3918 +1,3986 @@
=#=#=#= Begin test: Basic text output =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
* GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
Active Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
* Fencing (stonith:fence_xvm): Started cluster01
* dummy (ocf:pacemaker:Dummy): Started cluster02
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01
* httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02
* httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped
* Resource Group: exim-group:
* Public-IP (ocf:heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
* Clone Set: mysql-clone-group [mysql-group]:
* Started: [ cluster01 cluster02 ]
* Clone Set: promotable-clone [promotable-rsc] (promotable):
* Promoted: [ cluster02 ]
* Unpromoted: [ cluster01 ]
=#=#=#= End test: Basic text output - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output
=#=#=#= Begin test: XML output =#=#=#=
=#=#=#= End test: XML output - OK (0) =#=#=#=
* Passed: crm_mon - XML output
=#=#=#= Begin test: Basic text output without node section =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Active Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
* Fencing (stonith:fence_xvm): Started cluster01
* dummy (ocf:pacemaker:Dummy): Started cluster02
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01
* httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02
* httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped
* Resource Group: exim-group:
* Public-IP (ocf:heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
* Clone Set: mysql-clone-group [mysql-group]:
* Started: [ cluster01 cluster02 ]
* Clone Set: promotable-clone [promotable-rsc] (promotable):
* Promoted: [ cluster02 ]
* Unpromoted: [ cluster01 ]
=#=#=#= End test: Basic text output without node section - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output without node section
=#=#=#= Begin test: XML output without the node section =#=#=#=
=#=#=#= End test: XML output without the node section - OK (0) =#=#=#=
* Passed: crm_mon - XML output without the node section
=#=#=#= Begin test: Text output with only the node section =#=#=#=
Node List:
* Online: [ cluster01 cluster02 ]
* GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
=#=#=#= End test: Text output with only the node section - OK (0) =#=#=#=
* Passed: crm_mon - Text output with only the node section
=#=#=#= Begin test: Complete text output =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
* GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
Active Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
* Fencing (stonith:fence_xvm): Started cluster01
* dummy (ocf:pacemaker:Dummy): Started cluster02
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01
* httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02
* httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped
* Resource Group: exim-group:
* Public-IP (ocf:heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
* Clone Set: mysql-clone-group [mysql-group]:
* Started: [ cluster01 cluster02 ]
* Clone Set: promotable-clone [promotable-rsc] (promotable):
* Promoted: [ cluster02 ]
* Unpromoted: [ cluster01 ]
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* dummy: migration-threshold=1000000:
* (18) start
* (19) monitor: interval="60000ms"
* Public-IP: migration-threshold=1000000:
* (2) start
* Email: migration-threshold=1000000:
* (2) start
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* promotable-rsc: migration-threshold=1000000:
* (4) monitor: interval="10000ms"
* (5) cancel: interval="10000ms"
* (6) promote
* (7) monitor: interval="5000ms"
* httpd-bundle-ip-192.168.122.132: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-docker-1: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-1: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="30000ms"
* Node: cluster01:
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
* Fencing: migration-threshold=1000000:
* (15) start
* (20) monitor: interval="60000ms"
* dummy: migration-threshold=1000000:
* (16) stop
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* promotable-rsc: migration-threshold=1000000:
* (2) start
* (4) monitor: interval="10000ms"
* httpd-bundle-ip-192.168.122.131: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-docker-0: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-0: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="30000ms"
* Node: httpd-bundle-0@cluster01:
* httpd: migration-threshold=1000000:
* (1) start
* Node: httpd-bundle-1@cluster02:
* httpd: migration-threshold=1000000:
* (1) start
Negative Location Constraints:
* not-on-cluster1 prevents dummy from running on cluster01
=#=#=#= End test: Complete text output - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output
=#=#=#= Begin test: Complete text output with detail =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (2) (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 (1) cluster02 (2) ]
* GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
Active Resources:
* Clone Set: ping-clone [ping]:
* ping (ocf:pacemaker:ping): Started cluster02
* ping (ocf:pacemaker:ping): Started cluster01
* Fencing (stonith:fence_xvm): Started cluster01
* dummy (ocf:pacemaker:Dummy): Started cluster02
* Container bundle set: httpd-bundle [pcmk:http]:
* Replica[0]
* httpd-bundle-ip-192.168.122.131 (ocf:heartbeat:IPaddr2): Started cluster01
* httpd (ocf:heartbeat:apache): Started httpd-bundle-0
* httpd-bundle-docker-0 (ocf:heartbeat:docker): Started cluster01
* httpd-bundle-0 (ocf:pacemaker:remote): Started cluster01
* Replica[1]
* httpd-bundle-ip-192.168.122.132 (ocf:heartbeat:IPaddr2): Started cluster02
* httpd (ocf:heartbeat:apache): Started httpd-bundle-1
* httpd-bundle-docker-1 (ocf:heartbeat:docker): Started cluster02
* httpd-bundle-1 (ocf:pacemaker:remote): Started cluster02
* Replica[2]
* httpd-bundle-ip-192.168.122.133 (ocf:heartbeat:IPaddr2): Stopped
* httpd (ocf:heartbeat:apache): Stopped
* httpd-bundle-docker-2 (ocf:heartbeat:docker): Stopped
* httpd-bundle-2 (ocf:pacemaker:remote): Stopped
* Resource Group: exim-group:
* Public-IP (ocf:heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
* Clone Set: mysql-clone-group [mysql-group]:
* Resource Group: mysql-group:0:
* mysql-proxy (lsb:mysql-proxy): Started cluster02
* Resource Group: mysql-group:1:
* mysql-proxy (lsb:mysql-proxy): Started cluster01
- * Resource Group: mysql-group:2:
- * mysql-proxy (lsb:mysql-proxy): Stopped
- * Resource Group: mysql-group:3:
- * mysql-proxy (lsb:mysql-proxy): Stopped
- * Resource Group: mysql-group:4:
- * mysql-proxy (lsb:mysql-proxy): Stopped
* Clone Set: promotable-clone [promotable-rsc] (promotable):
* promotable-rsc (ocf:pacemaker:Stateful): Promoted cluster02
* promotable-rsc (ocf:pacemaker:Stateful): Unpromoted cluster01
* promotable-rsc (ocf:pacemaker:Stateful): Stopped
* promotable-rsc (ocf:pacemaker:Stateful): Stopped
* promotable-rsc (ocf:pacemaker:Stateful): Stopped
Node Attributes:
* Node: cluster01 (1):
* location : office
* pingd : 1000
* Node: cluster02 (2):
* pingd : 1000
Operations:
* Node: cluster02 (2):
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* dummy: migration-threshold=1000000:
* (18) start
* (19) monitor: interval="60000ms"
* Public-IP: migration-threshold=1000000:
* (2) start
* Email: migration-threshold=1000000:
* (2) start
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* promotable-rsc: migration-threshold=1000000:
* (4) monitor: interval="10000ms"
* (5) cancel: interval="10000ms"
* (6) promote
* (7) monitor: interval="5000ms"
* httpd-bundle-ip-192.168.122.132: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-docker-1: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-1: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="30000ms"
* Node: cluster01 (1):
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
* Fencing: migration-threshold=1000000:
* (15) start
* (20) monitor: interval="60000ms"
* dummy: migration-threshold=1000000:
* (16) stop
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* promotable-rsc: migration-threshold=1000000:
* (2) start
* (4) monitor: interval="10000ms"
* httpd-bundle-ip-192.168.122.131: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-docker-0: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-0: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="30000ms"
* Node: httpd-bundle-0@cluster01:
* httpd: migration-threshold=1000000:
* (1) start
* Node: httpd-bundle-1@cluster02:
* httpd: migration-threshold=1000000:
* (1) start
Negative Location Constraints:
* not-on-cluster1 prevents dummy from running on cluster01 (1)
=#=#=#= End test: Complete text output with detail - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output with detail
=#=#=#= Begin test: Complete brief text output =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
* GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
Active Resources:
* 1 (ocf:pacemaker:Dummy): Active cluster02
* 1 (stonith:fence_xvm): Active cluster01
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01
* httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02
* httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped
* Resource Group: exim-group:
* 1/1 (lsb:exim): Active cluster02
* 1/1 (ocf:heartbeat:IPaddr): Active cluster02
* Clone Set: mysql-clone-group [mysql-group]:
* Started: [ cluster01 cluster02 ]
* Clone Set: promotable-clone [promotable-rsc] (promotable):
* Promoted: [ cluster02 ]
* Unpromoted: [ cluster01 ]
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* dummy: migration-threshold=1000000:
* (18) start
* (19) monitor: interval="60000ms"
* Public-IP: migration-threshold=1000000:
* (2) start
* Email: migration-threshold=1000000:
* (2) start
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* promotable-rsc: migration-threshold=1000000:
* (4) monitor: interval="10000ms"
* (5) cancel: interval="10000ms"
* (6) promote
* (7) monitor: interval="5000ms"
* httpd-bundle-ip-192.168.122.132: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-docker-1: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-1: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="30000ms"
* Node: cluster01:
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
* Fencing: migration-threshold=1000000:
* (15) start
* (20) monitor: interval="60000ms"
* dummy: migration-threshold=1000000:
* (16) stop
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* promotable-rsc: migration-threshold=1000000:
* (2) start
* (4) monitor: interval="10000ms"
* httpd-bundle-ip-192.168.122.131: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-docker-0: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-0: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="30000ms"
* Node: httpd-bundle-0@cluster01:
* httpd: migration-threshold=1000000:
* (1) start
* Node: httpd-bundle-1@cluster02:
* httpd: migration-threshold=1000000:
* (1) start
Negative Location Constraints:
* not-on-cluster1 prevents dummy from running on cluster01
=#=#=#= End test: Complete brief text output - OK (0) =#=#=#=
* Passed: crm_mon - Complete brief text output
=#=#=#= Begin test: Complete text output grouped by node =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Node cluster01: online:
* Resources:
* ping (ocf:pacemaker:ping): Started
* Fencing (stonith:fence_xvm): Started
* mysql-proxy (lsb:mysql-proxy): Started
* promotable-rsc (ocf:pacemaker:Stateful): Unpromoted
* httpd-bundle-ip-192.168.122.131 (ocf:heartbeat:IPaddr2): Started
* httpd-bundle-docker-0 (ocf:heartbeat:docker): Started
* Node cluster02: online:
* Resources:
* ping (ocf:pacemaker:ping): Started
* dummy (ocf:pacemaker:Dummy): Started
* Public-IP (ocf:heartbeat:IPaddr): Started
* Email (lsb:exim): Started
* mysql-proxy (lsb:mysql-proxy): Started
* promotable-rsc (ocf:pacemaker:Stateful): Promoted
* httpd-bundle-ip-192.168.122.132 (ocf:heartbeat:IPaddr2): Started
* httpd-bundle-docker-1 (ocf:heartbeat:docker): Started
* GuestNode httpd-bundle-0@cluster01: online:
* Resources:
* httpd (ocf:heartbeat:apache): Started
* GuestNode httpd-bundle-1@cluster02: online:
* Resources:
* httpd (ocf:heartbeat:apache): Started
* GuestNode httpd-bundle-2@: OFFLINE:
* Resources:
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* dummy: migration-threshold=1000000:
* (18) start
* (19) monitor: interval="60000ms"
* Public-IP: migration-threshold=1000000:
* (2) start
* Email: migration-threshold=1000000:
* (2) start
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* promotable-rsc: migration-threshold=1000000:
* (4) monitor: interval="10000ms"
* (5) cancel: interval="10000ms"
* (6) promote
* (7) monitor: interval="5000ms"
* httpd-bundle-ip-192.168.122.132: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-docker-1: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-1: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="30000ms"
* Node: cluster01:
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
* Fencing: migration-threshold=1000000:
* (15) start
* (20) monitor: interval="60000ms"
* dummy: migration-threshold=1000000:
* (16) stop
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* promotable-rsc: migration-threshold=1000000:
* (2) start
* (4) monitor: interval="10000ms"
* httpd-bundle-ip-192.168.122.131: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-docker-0: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-0: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="30000ms"
* Node: httpd-bundle-0@cluster01:
* httpd: migration-threshold=1000000:
* (1) start
* Node: httpd-bundle-1@cluster02:
* httpd: migration-threshold=1000000:
* (1) start
Negative Location Constraints:
* not-on-cluster1 prevents dummy from running on cluster01
=#=#=#= End test: Complete text output grouped by node - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output grouped by node
=#=#=#= Begin test: Complete brief text output grouped by node =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Node cluster01: online:
* Resources:
* 1 (lsb:mysql-proxy): Active
* 1 (ocf:heartbeat:IPaddr2): Active
* 1 (ocf:heartbeat:docker): Active
* 1 (ocf:pacemaker:Stateful): Active
* 1 (ocf:pacemaker:ping): Active
* 1 (ocf:pacemaker:remote): Active
* 1 (stonith:fence_xvm): Active
* Node cluster02: online:
* Resources:
* 1 (lsb:exim): Active
* 1 (lsb:mysql-proxy): Active
* 1 (ocf:heartbeat:IPaddr): Active
* 1 (ocf:heartbeat:IPaddr2): Active
* 1 (ocf:heartbeat:docker): Active
* 1 (ocf:pacemaker:Dummy): Active
* 1 (ocf:pacemaker:Stateful): Active
* 1 (ocf:pacemaker:ping): Active
* 1 (ocf:pacemaker:remote): Active
* GuestNode httpd-bundle-0@cluster01: online:
* Resources:
* 1 (ocf:heartbeat:apache): Active
* GuestNode httpd-bundle-1@cluster02: online:
* Resources:
* 1 (ocf:heartbeat:apache): Active
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* dummy: migration-threshold=1000000:
* (18) start
* (19) monitor: interval="60000ms"
* Public-IP: migration-threshold=1000000:
* (2) start
* Email: migration-threshold=1000000:
* (2) start
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* promotable-rsc: migration-threshold=1000000:
* (4) monitor: interval="10000ms"
* (5) cancel: interval="10000ms"
* (6) promote
* (7) monitor: interval="5000ms"
* httpd-bundle-ip-192.168.122.132: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-docker-1: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-1: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="30000ms"
* Node: cluster01:
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
* Fencing: migration-threshold=1000000:
* (15) start
* (20) monitor: interval="60000ms"
* dummy: migration-threshold=1000000:
* (16) stop
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* promotable-rsc: migration-threshold=1000000:
* (2) start
* (4) monitor: interval="10000ms"
* httpd-bundle-ip-192.168.122.131: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-docker-0: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-0: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="30000ms"
* Node: httpd-bundle-0@cluster01:
* httpd: migration-threshold=1000000:
* (1) start
* Node: httpd-bundle-1@cluster02:
* httpd: migration-threshold=1000000:
* (1) start
Negative Location Constraints:
* not-on-cluster1 prevents dummy from running on cluster01
=#=#=#= End test: Complete brief text output grouped by node - OK (0) =#=#=#=
* Passed: crm_mon - Complete brief text output grouped by node
=#=#=#= Begin test: XML output grouped by node =#=#=#=
=#=#=#= End test: XML output grouped by node - OK (0) =#=#=#=
* Passed: crm_mon - XML output grouped by node
=#=#=#= Begin test: Complete text output filtered by node =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 ]
Active Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 ]
* Fencing (stonith:fence_xvm): Started cluster01
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01
* httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped
* Clone Set: mysql-clone-group [mysql-group]:
* Started: [ cluster01 ]
* Clone Set: promotable-clone [promotable-rsc] (promotable):
* Unpromoted: [ cluster01 ]
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
Operations:
* Node: cluster01:
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
* Fencing: migration-threshold=1000000:
* (15) start
* (20) monitor: interval="60000ms"
* dummy: migration-threshold=1000000:
* (16) stop
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* promotable-rsc: migration-threshold=1000000:
* (2) start
* (4) monitor: interval="10000ms"
* httpd-bundle-ip-192.168.122.131: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-docker-0: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-0: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="30000ms"
Negative Location Constraints:
* not-on-cluster1 prevents dummy from running on cluster01
=#=#=#= End test: Complete text output filtered by node - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output filtered by node
=#=#=#= Begin test: XML output filtered by node =#=#=#=
=#=#=#= End test: XML output filtered by node - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by node
=#=#=#= Begin test: Complete text output filtered by tag =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster02 ]
Active Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster02 ]
* dummy (ocf:pacemaker:Dummy): Started cluster02
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02
* httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped
* Resource Group: exim-group:
* Public-IP (ocf:heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
* Clone Set: mysql-clone-group [mysql-group]:
* Started: [ cluster02 ]
* Clone Set: promotable-clone [promotable-rsc] (promotable):
* Promoted: [ cluster02 ]
Node Attributes:
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* dummy: migration-threshold=1000000:
* (18) start
* (19) monitor: interval="60000ms"
* Public-IP: migration-threshold=1000000:
* (2) start
* Email: migration-threshold=1000000:
* (2) start
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* promotable-rsc: migration-threshold=1000000:
* (4) monitor: interval="10000ms"
* (5) cancel: interval="10000ms"
* (6) promote
* (7) monitor: interval="5000ms"
* httpd-bundle-ip-192.168.122.132: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-docker-1: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-1: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="30000ms"
Negative Location Constraints:
* not-on-cluster1 prevents dummy from running on cluster01
=#=#=#= End test: Complete text output filtered by tag - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output filtered by tag
=#=#=#= Begin test: XML output filtered by tag =#=#=#=
=#=#=#= End test: XML output filtered by tag - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by tag
=#=#=#= Begin test: Complete text output filtered by resource tag =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
* GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
Active Resources:
* Fencing (stonith:fence_xvm): Started cluster01
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster01:
* Fencing: migration-threshold=1000000:
* (15) start
* (20) monitor: interval="60000ms"
=#=#=#= End test: Complete text output filtered by resource tag - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output filtered by resource tag
=#=#=#= Begin test: XML output filtered by resource tag =#=#=#=
=#=#=#= End test: XML output filtered by resource tag - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by resource tag
=#=#=#= Begin test: Basic text output filtered by node that doesn't exist =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Active Resources:
* No active resources
=#=#=#= End test: Basic text output filtered by node that doesn't exist - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output filtered by node that doesn't exist
=#=#=#= Begin test: XML output filtered by node that doesn't exist =#=#=#=
=#=#=#= End test: XML output filtered by node that doesn't exist - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by node that doesn't exist
=#=#=#= Begin test: Basic text output with inactive resources =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
* GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
Full List of Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
* Fencing (stonith:fence_xvm): Started cluster01
* dummy (ocf:pacemaker:Dummy): Started cluster02
* Clone Set: inactive-clone [inactive-dhcpd] (disabled):
* Stopped (disabled): [ cluster01 cluster02 ]
* Resource Group: inactive-group (disabled):
* inactive-dummy-1 (ocf:pacemaker:Dummy): Stopped (disabled)
* inactive-dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled)
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01
* httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02
* httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped
* Resource Group: exim-group:
* Public-IP (ocf:heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
* Clone Set: mysql-clone-group [mysql-group]:
* Started: [ cluster01 cluster02 ]
* Clone Set: promotable-clone [promotable-rsc] (promotable):
* Promoted: [ cluster02 ]
* Unpromoted: [ cluster01 ]
=#=#=#= End test: Basic text output with inactive resources - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output with inactive resources
=#=#=#= Begin test: Basic text output with inactive resources, filtered by node =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster02 ]
Full List of Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster02 ]
* dummy (ocf:pacemaker:Dummy): Started cluster02
* Clone Set: inactive-clone [inactive-dhcpd] (disabled):
* Stopped (disabled): [ cluster02 ]
* Resource Group: inactive-group (disabled):
* inactive-dummy-1 (ocf:pacemaker:Dummy): Stopped (disabled)
* inactive-dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled)
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02
* httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped
* Resource Group: exim-group:
* Public-IP (ocf:heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
* Clone Set: mysql-clone-group [mysql-group]:
* Started: [ cluster02 ]
* Clone Set: promotable-clone [promotable-rsc] (promotable):
* Promoted: [ cluster02 ]
=#=#=#= End test: Basic text output with inactive resources, filtered by node - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output with inactive resources, filtered by node
=#=#=#= Begin test: Complete text output filtered by primitive resource =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
* GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
Active Resources:
* Fencing (stonith:fence_xvm): Started cluster01
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster01:
* Fencing: migration-threshold=1000000:
* (15) start
* (20) monitor: interval="60000ms"
=#=#=#= End test: Complete text output filtered by primitive resource - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output filtered by primitive resource
=#=#=#= Begin test: XML output filtered by primitive resource =#=#=#=
=#=#=#= End test: XML output filtered by primitive resource - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by primitive resource
=#=#=#= Begin test: Complete text output filtered by group resource =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
* GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
Active Resources:
* Resource Group: exim-group:
* Public-IP (ocf:heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* Public-IP: migration-threshold=1000000:
* (2) start
* Email: migration-threshold=1000000:
* (2) start
=#=#=#= End test: Complete text output filtered by group resource - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output filtered by group resource
=#=#=#= Begin test: XML output filtered by group resource =#=#=#=
=#=#=#= End test: XML output filtered by group resource - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by group resource
=#=#=#= Begin test: Complete text output filtered by group resource member =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
* GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
Active Resources:
* Resource Group: exim-group:
* Public-IP (ocf:heartbeat:IPaddr): Started cluster02
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* Public-IP: migration-threshold=1000000:
* (2) start
=#=#=#= End test: Complete text output filtered by group resource member - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output filtered by group resource member
=#=#=#= Begin test: XML output filtered by group resource member =#=#=#=
=#=#=#= End test: XML output filtered by group resource member - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by group resource member
=#=#=#= Begin test: Complete text output filtered by clone resource =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
* GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
Active Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* Node: cluster01:
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
=#=#=#= End test: Complete text output filtered by clone resource - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output filtered by clone resource
=#=#=#= Begin test: XML output filtered by clone resource =#=#=#=
=#=#=#= End test: XML output filtered by clone resource - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by clone resource
=#=#=#= Begin test: Complete text output filtered by clone resource instance =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
* GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
Active Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* Node: cluster01:
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
=#=#=#= End test: Complete text output filtered by clone resource instance - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output filtered by clone resource instance
=#=#=#= Begin test: XML output filtered by clone resource instance =#=#=#=
=#=#=#= End test: XML output filtered by clone resource instance - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by clone resource instance
=#=#=#= Begin test: Complete text output filtered by exact clone resource instance =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (2) (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 (1) cluster02 (2) ]
* GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
Active Resources:
* Clone Set: ping-clone [ping]:
* ping (ocf:pacemaker:ping): Started cluster02
Node Attributes:
* Node: cluster01 (1):
* location : office
* pingd : 1000
* Node: cluster02 (2):
* pingd : 1000
Operations:
* Node: cluster02 (2):
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* Node: cluster01 (1):
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
=#=#=#= End test: Complete text output filtered by exact clone resource instance - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output filtered by exact clone resource instance
=#=#=#= Begin test: XML output filtered by exact clone resource instance =#=#=#=
=#=#=#= End test: XML output filtered by exact clone resource instance - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by exact clone resource instance
=#=#=#= Begin test: Basic text output filtered by resource that doesn't exist =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
* GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
Active Resources:
* No active resources
=#=#=#= End test: Basic text output filtered by resource that doesn't exist - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output filtered by resource that doesn't exist
=#=#=#= Begin test: XML output filtered by resource that doesn't exist =#=#=#=
=#=#=#= End test: XML output filtered by resource that doesn't exist - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by resource that doesn't exist
=#=#=#= Begin test: Basic text output with inactive resources, filtered by tag =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
* GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
Full List of Resources:
* Clone Set: inactive-clone [inactive-dhcpd] (disabled):
* Stopped (disabled): [ cluster01 cluster02 ]
* Resource Group: inactive-group (disabled):
* inactive-dummy-1 (ocf:pacemaker:Dummy): Stopped (disabled)
* inactive-dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled)
=#=#=#= End test: Basic text output with inactive resources, filtered by tag - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output with inactive resources, filtered by tag
=#=#=#= Begin test: Basic text output with inactive resources, filtered by bundle resource =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
* GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
Full List of Resources:
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01
* httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02
* httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped
=#=#=#= End test: Basic text output with inactive resources, filtered by bundle resource - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output with inactive resources, filtered by bundle resource
=#=#=#= Begin test: XML output filtered by inactive bundle resource =#=#=#=
=#=#=#= End test: XML output filtered by inactive bundle resource - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by inactive bundle resource
=#=#=#= Begin test: Basic text output with inactive resources, filtered by bundled IP address resource =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
* GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
Full List of Resources:
* Container bundle set: httpd-bundle [pcmk:http]:
* Replica[0]
* httpd-bundle-ip-192.168.122.131 (ocf:heartbeat:IPaddr2): Started cluster01
=#=#=#= End test: Basic text output with inactive resources, filtered by bundled IP address resource - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output with inactive resources, filtered by bundled IP address resource
=#=#=#= Begin test: XML output filtered by bundled IP address resource =#=#=#=
=#=#=#= End test: XML output filtered by bundled IP address resource - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by bundled IP address resource
=#=#=#= Begin test: Basic text output with inactive resources, filtered by bundled container =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
* GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
Full List of Resources:
* Container bundle set: httpd-bundle [pcmk:http]:
* Replica[1]
* httpd-bundle-docker-1 (ocf:heartbeat:docker): Started cluster02
=#=#=#= End test: Basic text output with inactive resources, filtered by bundled container - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output with inactive resources, filtered by bundled container
=#=#=#= Begin test: XML output filtered by bundled container =#=#=#=
=#=#=#= End test: XML output filtered by bundled container - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by bundled container
=#=#=#= Begin test: Basic text output with inactive resources, filtered by bundle connection =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
* GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
Full List of Resources:
* Container bundle set: httpd-bundle [pcmk:http]:
* Replica[0]
* httpd-bundle-0 (ocf:pacemaker:remote): Started cluster01
=#=#=#= End test: Basic text output with inactive resources, filtered by bundle connection - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output with inactive resources, filtered by bundle connection
=#=#=#= Begin test: XML output filtered by bundle connection =#=#=#=
=#=#=#= End test: XML output filtered by bundle connection - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by bundle connection
=#=#=#= Begin test: Basic text output with inactive resources, filtered by bundled primitive resource =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
* GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
Full List of Resources:
* Container bundle set: httpd-bundle [pcmk:http]:
* Replica[0]
* httpd (ocf:heartbeat:apache): Started httpd-bundle-0
* Replica[1]
* httpd (ocf:heartbeat:apache): Started httpd-bundle-1
* Replica[2]
* httpd (ocf:heartbeat:apache): Stopped
=#=#=#= End test: Basic text output with inactive resources, filtered by bundled primitive resource - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output with inactive resources, filtered by bundled primitive resource
=#=#=#= Begin test: XML output filtered by bundled primitive resource =#=#=#=
=#=#=#= End test: XML output filtered by bundled primitive resource - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by bundled primitive resource
=#=#=#= Begin test: Complete text output, filtered by clone name in cloned group =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (2) (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 (1) cluster02 (2) ]
* GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
Active Resources:
* Clone Set: mysql-clone-group [mysql-group]:
* Resource Group: mysql-group:0:
* mysql-proxy (lsb:mysql-proxy): Started cluster02
* Resource Group: mysql-group:1:
* mysql-proxy (lsb:mysql-proxy): Started cluster01
- * Resource Group: mysql-group:2:
- * mysql-proxy (lsb:mysql-proxy): Stopped
- * Resource Group: mysql-group:3:
- * mysql-proxy (lsb:mysql-proxy): Stopped
- * Resource Group: mysql-group:4:
- * mysql-proxy (lsb:mysql-proxy): Stopped
Node Attributes:
* Node: cluster01 (1):
* location : office
* pingd : 1000
* Node: cluster02 (2):
* pingd : 1000
Operations:
* Node: cluster02 (2):
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* Node: cluster01 (1):
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
=#=#=#= End test: Complete text output, filtered by clone name in cloned group - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output, filtered by clone name in cloned group
=#=#=#= Begin test: XML output, filtered by clone name in cloned group =#=#=#=
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
=#=#=#= End test: XML output, filtered by clone name in cloned group - OK (0) =#=#=#=
* Passed: crm_mon - XML output, filtered by clone name in cloned group
=#=#=#= Begin test: Complete text output, filtered by group name in cloned group =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (2) (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 (1) cluster02 (2) ]
* GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
Active Resources:
* Clone Set: mysql-clone-group [mysql-group]:
* Resource Group: mysql-group:0:
* mysql-proxy (lsb:mysql-proxy): Started cluster02
* Resource Group: mysql-group:1:
* mysql-proxy (lsb:mysql-proxy): Started cluster01
- * Resource Group: mysql-group:2:
- * mysql-proxy (lsb:mysql-proxy): Stopped
- * Resource Group: mysql-group:3:
- * mysql-proxy (lsb:mysql-proxy): Stopped
- * Resource Group: mysql-group:4:
- * mysql-proxy (lsb:mysql-proxy): Stopped
Node Attributes:
* Node: cluster01 (1):
* location : office
* pingd : 1000
* Node: cluster02 (2):
* pingd : 1000
Operations:
* Node: cluster02 (2):
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* Node: cluster01 (1):
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
=#=#=#= End test: Complete text output, filtered by group name in cloned group - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output, filtered by group name in cloned group
=#=#=#= Begin test: XML output, filtered by group name in cloned group =#=#=#=
=#=#=#= End test: XML output, filtered by group name in cloned group - OK (0) =#=#=#=
* Passed: crm_mon - XML output, filtered by group name in cloned group
=#=#=#= Begin test: Complete text output, filtered by exact group instance name in cloned group =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (2) (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 (1) cluster02 (2) ]
* GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
Active Resources:
* Clone Set: mysql-clone-group [mysql-group]:
* Resource Group: mysql-group:1:
* mysql-proxy (lsb:mysql-proxy): Started cluster01
Node Attributes:
* Node: cluster01 (1):
* location : office
* pingd : 1000
* Node: cluster02 (2):
* pingd : 1000
Operations:
* Node: cluster02 (2):
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* Node: cluster01 (1):
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
=#=#=#= End test: Complete text output, filtered by exact group instance name in cloned group - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output, filtered by exact group instance name in cloned group
=#=#=#= Begin test: XML output, filtered by exact group instance name in cloned group =#=#=#=
=#=#=#= End test: XML output, filtered by exact group instance name in cloned group - OK (0) =#=#=#=
* Passed: crm_mon - XML output, filtered by exact group instance name in cloned group
=#=#=#= Begin test: Complete text output, filtered by primitive name in cloned group =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (2) (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 (1) cluster02 (2) ]
* GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
Active Resources:
* Clone Set: mysql-clone-group [mysql-group]:
* Resource Group: mysql-group:0:
* mysql-proxy (lsb:mysql-proxy): Started cluster02
* Resource Group: mysql-group:1:
* mysql-proxy (lsb:mysql-proxy): Started cluster01
- * Resource Group: mysql-group:2:
- * mysql-proxy (lsb:mysql-proxy): Stopped
- * Resource Group: mysql-group:3:
- * mysql-proxy (lsb:mysql-proxy): Stopped
- * Resource Group: mysql-group:4:
- * mysql-proxy (lsb:mysql-proxy): Stopped
Node Attributes:
* Node: cluster01 (1):
* location : office
* pingd : 1000
* Node: cluster02 (2):
* pingd : 1000
Operations:
* Node: cluster02 (2):
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* Node: cluster01 (1):
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
=#=#=#= End test: Complete text output, filtered by primitive name in cloned group - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output, filtered by primitive name in cloned group
=#=#=#= Begin test: XML output, filtered by primitive name in cloned group =#=#=#=
=#=#=#= End test: XML output, filtered by primitive name in cloned group - OK (0) =#=#=#=
* Passed: crm_mon - XML output, filtered by primitive name in cloned group
=#=#=#= Begin test: Complete text output, filtered by exact primitive instance name in cloned group =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (2) (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 (1) cluster02 (2) ]
* GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
Active Resources:
* Clone Set: mysql-clone-group [mysql-group]:
* Resource Group: mysql-group:1:
* mysql-proxy (lsb:mysql-proxy): Started cluster01
Node Attributes:
* Node: cluster01 (1):
* location : office
* pingd : 1000
* Node: cluster02 (2):
* pingd : 1000
Operations:
* Node: cluster02 (2):
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* Node: cluster01 (1):
* mysql-proxy: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
=#=#=#= End test: Complete text output, filtered by exact primitive instance name in cloned group - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output, filtered by exact primitive instance name in cloned group
=#=#=#= Begin test: XML output, filtered by exact primitive instance name in cloned group =#=#=#=
=#=#=#= End test: XML output, filtered by exact primitive instance name in cloned group - OK (0) =#=#=#=
* Passed: crm_mon - XML output, filtered by exact primitive instance name in cloned group
=#=#=#= Begin test: Text output of partially active resources =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 4 nodes configured
* 13 resource instances configured (1 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
* GuestOnline: [ httpd-bundle-0@cluster02 httpd-bundle-1@cluster01 ]
Active Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 ]
* Fencing (stonith:fence_xvm): Started cluster01
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster02
* httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Stopped cluster01
- * Resource Group: partially-active-group:
+ * Resource Group: partially-active-group (1 member inactive):
* dummy-1 (ocf:pacemaker:Dummy): Started cluster02
- * dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled)
=#=#=#= End test: Text output of partially active resources - OK (0) =#=#=#=
* Passed: crm_mon - Text output of partially active resources
=#=#=#= Begin test: XML output of partially active resources =#=#=#=
=#=#=#= End test: XML output of partially active resources - OK (0) =#=#=#=
* Passed: crm_mon - XML output of partially active resources
=#=#=#= Begin test: Text output of partially active resources, with inactive resources =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 4 nodes configured
* 13 resource instances configured (1 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
* GuestOnline: [ httpd-bundle-0@cluster02 httpd-bundle-1@cluster01 ]
Full List of Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 ]
* Stopped: [ cluster02 ]
* Fencing (stonith:fence_xvm): Started cluster01
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster02
* httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Stopped cluster01
* Resource Group: partially-active-group:
* dummy-1 (ocf:pacemaker:Dummy): Started cluster02
* dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled)
=#=#=#= End test: Text output of partially active resources, with inactive resources - OK (0) =#=#=#=
* Passed: crm_mon - Text output of partially active resources, with inactive resources
=#=#=#= Begin test: Complete brief text output, with inactive resources =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 4 nodes configured
* 13 resource instances configured (1 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
* GuestOnline: [ httpd-bundle-0@cluster02 httpd-bundle-1@cluster01 ]
Full List of Resources:
* 1/1 (stonith:fence_xvm): Active cluster01
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 ]
* Stopped: [ cluster02 ]
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster02
* httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Stopped cluster01
* Resource Group: partially-active-group:
* 1/2 (ocf:pacemaker:Dummy): Active cluster02
Node Attributes:
* Node: cluster01:
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* httpd-bundle-ip-192.168.122.131: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-docker-0: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-0: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="30000ms"
* dummy-1: migration-threshold=1000000:
* (2) start
* Node: cluster01:
* Fencing: migration-threshold=1000000:
* (15) start
* (20) monitor: interval="60000ms"
* ping: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* httpd-bundle-ip-192.168.122.132: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-docker-1: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-1: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="30000ms"
* Node: httpd-bundle-0@cluster02:
* httpd: migration-threshold=1000000:
* (1) start
=#=#=#= End test: Complete brief text output, with inactive resources - OK (0) =#=#=#=
* Passed: crm_mon - Complete brief text output, with inactive resources
+=#=#=#= Begin test: Text output of partially active group =#=#=#=
+Cluster Summary:
+ * Stack: corosync
+ * Current DC: cluster02 (version) - partition with quorum
+ * Last updated:
+ * Last change:
+ * 4 nodes configured
+ * 13 resource instances configured (1 DISABLED)
+
+Node List:
+ * Online: [ cluster01 cluster02 ]
+ * GuestOnline: [ httpd-bundle-0@cluster02 httpd-bundle-1@cluster01 ]
+
+Active Resources:
+ * Resource Group: partially-active-group (1 member inactive):
+ * dummy-1 (ocf:pacemaker:Dummy): Started cluster02
+=#=#=#= End test: Text output of partially active group - OK (0) =#=#=#=
+* Passed: crm_mon - Text output of partially active group
+=#=#=#= Begin test: Text output of partially active group, with inactive resources =#=#=#=
+Cluster Summary:
+ * Stack: corosync
+ * Current DC: cluster02 (version) - partition with quorum
+ * Last updated:
+ * Last change:
+ * 4 nodes configured
+ * 13 resource instances configured (1 DISABLED)
+
+Node List:
+ * Online: [ cluster01 cluster02 ]
+ * GuestOnline: [ httpd-bundle-0@cluster02 httpd-bundle-1@cluster01 ]
+
+Full List of Resources:
+ * Resource Group: partially-active-group:
+ * dummy-1 (ocf:pacemaker:Dummy): Started cluster02
+ * dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled)
+=#=#=#= End test: Text output of partially active group, with inactive resources - OK (0) =#=#=#=
+* Passed: crm_mon - Text output of partially active group, with inactive resources
+=#=#=#= Begin test: Text output of active member of partially active group =#=#=#=
+Cluster Summary:
+ * Stack: corosync
+ * Current DC: cluster02 (version) - partition with quorum
+ * Last updated:
+ * Last change:
+ * 4 nodes configured
+ * 13 resource instances configured (1 DISABLED)
+
+Node List:
+ * Online: [ cluster01 cluster02 ]
+ * GuestOnline: [ httpd-bundle-0@cluster02 httpd-bundle-1@cluster01 ]
+
+Active Resources:
+ * Resource Group: partially-active-group (1 member inactive):
+ * dummy-1 (ocf:pacemaker:Dummy): Started cluster02
+=#=#=#= End test: Text output of active member of partially active group - OK (0) =#=#=#=
+* Passed: crm_mon - Text output of active member of partially active group
+=#=#=#= Begin test: Text output of inactive member of partially active group =#=#=#=
+Cluster Summary:
+ * Stack: corosync
+ * Current DC: cluster02 (version) - partition with quorum
+ * Last updated:
+ * Last change:
+ * 4 nodes configured
+ * 13 resource instances configured (1 DISABLED)
+
+Node List:
+ * Online: [ cluster01 cluster02 ]
+ * GuestOnline: [ httpd-bundle-0@cluster02 httpd-bundle-1@cluster01 ]
+
+Active Resources:
+ * Resource Group: partially-active-group (1 member inactive):
+ * dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled)
+=#=#=#= End test: Text output of inactive member of partially active group - OK (0) =#=#=#=
+* Passed: crm_mon - Text output of inactive member of partially active group
=#=#=#= Begin test: Complete brief text output grouped by node, with inactive resources =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 4 nodes configured
* 13 resource instances configured (1 DISABLED)
Node List:
* Node cluster01: online:
* Resources:
* 1 (ocf:heartbeat:IPaddr2): Active
* 1 (ocf:heartbeat:docker): Active
* 1 (ocf:pacemaker:ping): Active
* 1 (ocf:pacemaker:remote): Active
* 1 (stonith:fence_xvm): Active
* Node cluster02: online:
* Resources:
* 1 (ocf:heartbeat:IPaddr2): Active
* 1 (ocf:heartbeat:docker): Active
* 1 (ocf:pacemaker:Dummy): Active
* 1 (ocf:pacemaker:remote): Active
* GuestNode httpd-bundle-0@cluster02: online:
* Resources:
* 1 (ocf:heartbeat:apache): Active
Inactive Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 ]
* Stopped: [ cluster02 ]
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster02
* httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Stopped cluster01
* Resource Group: partially-active-group:
* 1/2 (ocf:pacemaker:Dummy): Active cluster02
Node Attributes:
* Node: cluster01:
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* httpd-bundle-ip-192.168.122.131: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-docker-0: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-0: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="30000ms"
* dummy-1: migration-threshold=1000000:
* (2) start
* Node: cluster01:
* Fencing: migration-threshold=1000000:
* (15) start
* (20) monitor: interval="60000ms"
* ping: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="10000ms"
* httpd-bundle-ip-192.168.122.132: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-docker-1: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="60000ms"
* httpd-bundle-1: migration-threshold=1000000:
* (2) start
* (3) monitor: interval="30000ms"
* Node: httpd-bundle-0@cluster02:
* httpd: migration-threshold=1000000:
* (1) start
=#=#=#= End test: Complete brief text output grouped by node, with inactive resources - OK (0) =#=#=#=
* Passed: crm_mon - Complete brief text output grouped by node, with inactive resources
=#=#=#= Begin test: Text output of partially active resources, with inactive resources, filtered by node =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 4 nodes configured
* 13 resource instances configured (1 DISABLED)
Node List:
* Online: [ cluster01 ]
Full List of Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 ]
* Fencing (stonith:fence_xvm): Started cluster01
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Stopped cluster01
=#=#=#= End test: Text output of partially active resources, with inactive resources, filtered by node - OK (0) =#=#=#=
* Passed: crm_mon - Text output of partially active resources, with inactive resources, filtered by node
=#=#=#= Begin test: Text output of partially active resources, filtered by node =#=#=#=
=#=#=#= End test: Text output of partially active resources, filtered by node - OK (0) =#=#=#=
* Passed: crm_mon - Text output of partially active resources, filtered by node
=#=#=#= Begin test: Text output of all resources with maintenance-mode enabled =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
*** Resource management is DISABLED ***
The cluster will not attempt to start, stop or recover services
Node List:
* GuestNode httpd-bundle-0@cluster01: maintenance
* GuestNode httpd-bundle-1@cluster02: maintenance
* Online: [ cluster01 cluster02 ]
Full List of Resources:
* Clone Set: ping-clone [ping] (unmanaged):
* ping (ocf:pacemaker:ping): Started cluster02 (unmanaged)
* ping (ocf:pacemaker:ping): Started cluster01 (unmanaged)
* Fencing (stonith:fence_xvm): Started cluster01 (unmanaged)
* dummy (ocf:pacemaker:Dummy): Started cluster02 (unmanaged)
- * Clone Set: inactive-clone [inactive-dhcpd] (unmanaged) (disabled):
+ * Clone Set: inactive-clone [inactive-dhcpd] (unmanaged, disabled):
* Stopped (disabled): [ cluster01 cluster02 ]
- * Resource Group: inactive-group (unmanaged) (disabled):
+ * Resource Group: inactive-group (unmanaged, disabled):
* inactive-dummy-1 (ocf:pacemaker:Dummy): Stopped (disabled, unmanaged)
* inactive-dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled, unmanaged)
* Container bundle set: httpd-bundle [pcmk:http] (unmanaged):
* httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01 (unmanaged)
* httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02 (unmanaged)
* httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped (unmanaged)
* Resource Group: exim-group (unmanaged):
* Public-IP (ocf:heartbeat:IPaddr): Started cluster02 (unmanaged)
* Email (lsb:exim): Started cluster02 (unmanaged)
* Clone Set: mysql-clone-group [mysql-group] (unmanaged):
* Resource Group: mysql-group:0 (unmanaged):
* mysql-proxy (lsb:mysql-proxy): Started cluster02 (unmanaged)
* Resource Group: mysql-group:1 (unmanaged):
* mysql-proxy (lsb:mysql-proxy): Started cluster01 (unmanaged)
- * Clone Set: promotable-clone [promotable-rsc] (promotable) (unmanaged):
+ * Clone Set: promotable-clone [promotable-rsc] (promotable, unmanaged):
* promotable-rsc (ocf:pacemaker:Stateful): Promoted cluster02 (unmanaged)
* promotable-rsc (ocf:pacemaker:Stateful): Unpromoted cluster01 (unmanaged)
=#=#=#= End test: Text output of all resources with maintenance-mode enabled - OK (0) =#=#=#=
* Passed: crm_mon - Text output of all resources with maintenance-mode enabled
diff --git a/cts/cts-cli.in b/cts/cts-cli.in
index ba0da8757a..999b212149 100755
--- a/cts/cts-cli.in
+++ b/cts/cts-cli.in
@@ -1,1883 +1,1899 @@
#!@BASH_PATH@
#
# Copyright 2008-2021 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
# This source code is licensed under the GNU General Public License version 2
# or later (GPLv2+) WITHOUT ANY WARRANTY.
#
# Set the exit status of a command to the exit code of the last program to
# exit non-zero. This is bash-specific.
set -o pipefail
#
# Note on portable usage of sed: GNU/POSIX/*BSD sed have a limited subset of
# compatible functionality. Do not use the -i option, alternation (\|),
# \0, or character sequences such as \n or \s.
#
USAGE_TEXT="Usage: cts-cli []
Options:
--help Display this text, then exit
-V, --verbose Display any differences from expected output
-t 'TEST [...]' Run only specified tests (default: 'dates tools crm_mon acls validity upgrade rules')
-p DIR Look for executables in DIR (may be specified multiple times)
-v, --valgrind Run all commands under valgrind
-s Save actual output as expected output"
# If readlink supports -e (i.e. GNU), use it
readlink -e / >/dev/null 2>/dev/null
if [ $? -eq 0 ]; then
test_home="$(dirname "$(readlink -e "$0")")"
else
test_home="$(dirname "$0")"
fi
: ${shadow=cts-cli}
shadow_dir=$(mktemp -d ${TMPDIR:-/tmp}/cts-cli.shadow.XXXXXXXXXX)
num_errors=0
num_passed=0
verbose=0
tests="dates tools crm_mon acls validity upgrade rules"
do_save=0
XMLLINT_CMD=
VALGRIND_CMD=
VALGRIND_OPTS="
-q
--gen-suppressions=all
--show-reachable=no
--leak-check=full
--trace-children=no
--time-stamp=yes
--num-callers=20
--suppressions=$test_home/valgrind-pcmk.suppressions
"
# These constants must track crm_exit_t values
CRM_EX_OK=0
CRM_EX_ERROR=1
CRM_EX_INVALID_PARAM=2
CRM_EX_UNIMPLEMENT_FEATURE=3
CRM_EX_INSUFFICIENT_PRIV=4
CRM_EX_USAGE=64
CRM_EX_CONFIG=78
CRM_EX_OLD=103
CRM_EX_DIGEST=104
CRM_EX_NOSUCH=105
CRM_EX_UNSAFE=107
CRM_EX_EXISTS=108
CRM_EX_MULTIPLE=109
CRM_EX_EXPIRED=110
CRM_EX_NOT_YET_IN_EFFECT=111
reset_shadow_cib_version() {
local SHADOWPATH
SHADOWPATH="$(crm_shadow --file)"
# sed -i isn't portable :-(
cp -p "$SHADOWPATH" "${SHADOWPATH}.$$" # preserve permissions
sed -e 's/epoch="[0-9]*"/epoch="1"/g' \
-e 's/num_updates="[0-9]*"/num_updates="0"/g' \
-e 's/admin_epoch="[0-9]*"/admin_epoch="0"/g' \
"$SHADOWPATH" > "${SHADOWPATH}.$$"
mv -- "${SHADOWPATH}.$$" "$SHADOWPATH"
}
# A newly created empty CIB might or might not have a rsc_defaults section
# depending on whether the --with-resource-stickiness-default configure
# option was used. To ensure regression tests behave the same either way,
# delete any rsc_defaults after creating or erasing a CIB.
delete_shadow_resource_defaults() {
cibadmin --delete --xml-text ''
# The above command might or might not bump the CIB version, so reset it
# to ensure future changes result in the same version for comparison.
reset_shadow_cib_version
}
create_shadow_cib() {
local VALIDATE_WITH
local SHADOW_CMD
VALIDATE_WITH="$1"
export CIB_shadow_dir="${shadow_dir}"
SHADOW_CMD="$VALGRIND_CMD crm_shadow --batch --force --create-empty"
if [ -z "$VALIDATE_WITH" ]; then
$SHADOW_CMD "$shadow" 2>&1
else
$SHADOW_CMD "$shadow" --validate-with="${VALIDATE_WITH}" 2>&1
fi
export CIB_shadow="$shadow"
delete_shadow_resource_defaults
}
function _test_assert() {
target=$1; shift
validate=$1; shift
cib=$1; shift
app=`echo "$cmd" | sed 's/\ .*//'`
printf "* Running: $app - $desc\n" 1>&2
printf "=#=#=#= Begin test: $desc =#=#=#=\n"
export outfile=$(mktemp ${TMPDIR:-/tmp}/cts-cli.output.XXXXXXXXXX)
eval $VALGRIND_CMD $cmd 2>&1 | tee $outfile
rc=$?
if [ x$cib != x0 ]; then
printf "=#=#=#= Current cib after: $desc =#=#=#=\n"
CIB_user=root cibadmin -Q
fi
# Do not validate if running under valgrind, even if told to do so. Valgrind
# will output a lot more stuff that is not XML, so it wouldn't validate anyway.
if [ "$validate" = "1" ] && [ "$VALGRIND_CMD" = "" ] && [ $rc = 0 ] && [ "$XMLLINT_CMD" != "" ]; then
# The sed command filters out the "- validates" line that xmllint will output
# on success. grep cannot be used here because "grep -v 'validates$'" will
# return an exit code of 1 if its input consists entirely of "- validates".
$XMLLINT_CMD --noout --relaxng "$PCMK_schema_directory/api/api-result.rng" "$outfile" 2>&1 | sed -n '/validates$/ !p'
rc=$?
if [ $rc = 0 ]; then
printf "=#=#=#= End test: %s - $(crm_error --exit $rc) (%d) =#=#=#=\n" "$desc" $rc
else
printf "=#=#=#= End test: %s - Failed to validate (%d) =#=#=#=\n" "$desc" $rc
fi
else
printf "=#=#=#= End test: %s - $(crm_error --exit $rc) (%d) =#=#=#=\n" "$desc" $rc
fi
rm -f "$outfile"
if [ $rc -ne $target ]; then
num_errors=$(( $num_errors + 1 ))
printf "* Failed (rc=%.3d): %-14s - %s\n" $rc $app "$desc"
printf "* Failed (rc=%.3d): %-14s - %s\n" $rc $app "$desc (`which $app`)" 1>&2
return
exit $CRM_EX_ERROR
else
printf "* Passed: %-14s - %s\n" $app "$desc"
num_passed=$(( $num_passed + 1 ))
fi
}
function test_assert() {
_test_assert $1 0 $2
}
function test_assert_validate() {
_test_assert $1 1 $2
}
function test_crm_mon() {
local TMPXML
export CIB_file="$test_home/cli/crm_mon.xml"
desc="Basic text output"
cmd="crm_mon -1"
test_assert $CRM_EX_OK 0
desc="XML output"
cmd="crm_mon --output-as=xml"
test_assert_validate $CRM_EX_OK 0
desc="Basic text output without node section"
cmd="crm_mon -1 --exclude=nodes"
test_assert $CRM_EX_OK 0
desc="XML output without the node section"
cmd="crm_mon --output-as=xml --exclude=nodes"
test_assert_validate $CRM_EX_OK 0
desc="Text output with only the node section"
cmd="crm_mon -1 --exclude=all --include=nodes"
test_assert $CRM_EX_OK 0
# The above test doesn't need to be performed for other output formats. It's
# really just a test to make sure that blank lines are correct.
desc="Complete text output"
cmd="crm_mon -1 --include=all"
test_assert $CRM_EX_OK 0
# XML includes everything already so there's no need for a complete test
desc="Complete text output with detail"
cmd="crm_mon -1R --include=all"
test_assert $CRM_EX_OK 0
# XML includes detailed output already
desc="Complete brief text output"
cmd="crm_mon -1 --include=all --brief"
test_assert $CRM_EX_OK 0
desc="Complete text output grouped by node"
cmd="crm_mon -1 --include=all --group-by-node"
test_assert $CRM_EX_OK 0
# XML does not have a brief output option
desc="Complete brief text output grouped by node"
cmd="crm_mon -1 --include=all --group-by-node --brief"
test_assert $CRM_EX_OK 0
desc="XML output grouped by node"
cmd="crm_mon -1 --output-as=xml --group-by-node"
test_assert_validate $CRM_EX_OK 0
desc="Complete text output filtered by node"
cmd="crm_mon -1 --include=all --node=cluster01"
test_assert $CRM_EX_OK 0
desc="XML output filtered by node"
cmd="crm_mon --output-as xml --include=all --node=cluster01"
test_assert_validate $CRM_EX_OK 0
desc="Complete text output filtered by tag"
cmd="crm_mon -1 --include=all --node=even-nodes"
test_assert $CRM_EX_OK 0
desc="XML output filtered by tag"
cmd="crm_mon --output-as=xml --include=all --node=even-nodes"
test_assert_validate $CRM_EX_OK 0
desc="Complete text output filtered by resource tag"
cmd="crm_mon -1 --include=all --resource=fencing-rscs"
test_assert $CRM_EX_OK 0
desc="XML output filtered by resource tag"
cmd="crm_mon --output-as=xml --include=all --resource=fencing-rscs"
test_assert_validate $CRM_EX_OK 0
desc="Basic text output filtered by node that doesn't exist"
cmd="crm_mon -1 --node=blah"
test_assert $CRM_EX_OK 0
desc="XML output filtered by node that doesn't exist"
cmd="crm_mon --output-as=xml --node=blah"
test_assert_validate $CRM_EX_OK 0
desc="Basic text output with inactive resources"
cmd="crm_mon -1 -r"
test_assert $CRM_EX_OK 0
# XML already includes inactive resources
desc="Basic text output with inactive resources, filtered by node"
cmd="crm_mon -1 -r --node=cluster02"
test_assert $CRM_EX_OK 0
# XML already includes inactive resources
desc="Complete text output filtered by primitive resource"
cmd="crm_mon -1 --include=all --resource=Fencing"
test_assert $CRM_EX_OK 0
desc="XML output filtered by primitive resource"
cmd="crm_mon --output-as=xml --resource=Fencing"
test_assert_validate $CRM_EX_OK 0
desc="Complete text output filtered by group resource"
cmd="crm_mon -1 --include=all --resource=exim-group"
test_assert $CRM_EX_OK 0
desc="XML output filtered by group resource"
cmd="crm_mon --output-as=xml --resource=exim-group"
test_assert_validate $CRM_EX_OK 0
desc="Complete text output filtered by group resource member"
cmd="crm_mon -1 --include=all --resource=Public-IP"
test_assert $CRM_EX_OK 0
desc="XML output filtered by group resource member"
cmd="crm_mon --output-as=xml --resource=Email"
test_assert_validate $CRM_EX_OK 0
desc="Complete text output filtered by clone resource"
cmd="crm_mon -1 --include=all --resource=ping-clone"
test_assert $CRM_EX_OK 0
desc="XML output filtered by clone resource"
cmd="crm_mon --output-as=xml --resource=ping-clone"
test_assert_validate $CRM_EX_OK 0
desc="Complete text output filtered by clone resource instance"
cmd="crm_mon -1 --include=all --resource=ping"
test_assert $CRM_EX_OK 0
desc="XML output filtered by clone resource instance"
cmd="crm_mon --output-as=xml --resource=ping"
test_assert_validate $CRM_EX_OK 0
desc="Complete text output filtered by exact clone resource instance"
cmd="crm_mon -1 --include=all --show-detail --resource=ping:0"
test_assert $CRM_EX_OK 0
desc="XML output filtered by exact clone resource instance"
cmd="crm_mon --output-as=xml --resource=ping:1"
test_assert_validate $CRM_EX_OK 0
desc="Basic text output filtered by resource that doesn't exist"
cmd="crm_mon -1 --resource=blah"
test_assert $CRM_EX_OK 0
desc="XML output filtered by resource that doesn't exist"
cmd="crm_mon --output-as=xml --resource=blah"
test_assert_validate $CRM_EX_OK 0
desc="Basic text output with inactive resources, filtered by tag"
cmd="crm_mon -1 -r --resource=inactive-rscs"
test_assert $CRM_EX_OK 0
desc="Basic text output with inactive resources, filtered by bundle resource"
cmd="crm_mon -1 -r --resource=httpd-bundle"
test_assert $CRM_EX_OK 0
desc="XML output filtered by inactive bundle resource"
cmd="crm_mon --output-as=xml --resource=httpd-bundle"
test_assert_validate $CRM_EX_OK 0
desc="Basic text output with inactive resources, filtered by bundled IP address resource"
cmd="crm_mon -1 -r --resource=httpd-bundle-ip-192.168.122.131"
test_assert $CRM_EX_OK 0
desc="XML output filtered by bundled IP address resource"
cmd="crm_mon --output-as=xml --resource=httpd-bundle-ip-192.168.122.132"
test_assert_validate $CRM_EX_OK 0
desc="Basic text output with inactive resources, filtered by bundled container"
cmd="crm_mon -1 -r --resource=httpd-bundle-docker-1"
test_assert $CRM_EX_OK 0
desc="XML output filtered by bundled container"
cmd="crm_mon --output-as=xml --resource=httpd-bundle-docker-2"
test_assert_validate $CRM_EX_OK 0
desc="Basic text output with inactive resources, filtered by bundle connection"
cmd="crm_mon -1 -r --resource=httpd-bundle-0"
test_assert $CRM_EX_OK 0
desc="XML output filtered by bundle connection"
cmd="crm_mon --output-as=xml --resource=httpd-bundle-0"
test_assert_validate $CRM_EX_OK 0
desc="Basic text output with inactive resources, filtered by bundled primitive resource"
cmd="crm_mon -1 -r --resource=httpd"
test_assert $CRM_EX_OK 0
desc="XML output filtered by bundled primitive resource"
cmd="crm_mon --output-as=xml --resource=httpd"
test_assert_validate $CRM_EX_OK 0
desc="Complete text output, filtered by clone name in cloned group"
cmd="crm_mon -1 --include=all --show-detail --resource=mysql-clone-group"
test_assert $CRM_EX_OK 0
desc="XML output, filtered by clone name in cloned group"
cmd="crm_mon --output-as=xml --resource=mysql-clone-group"
test_assert_validate $CRM_EX_OK 0
desc="Complete text output, filtered by group name in cloned group"
cmd="crm_mon -1 --include=all --show-detail --resource=mysql-group"
test_assert $CRM_EX_OK 0
desc="XML output, filtered by group name in cloned group"
cmd="crm_mon --output-as=xml --resource=mysql-group"
test_assert_validate $CRM_EX_OK 0
desc="Complete text output, filtered by exact group instance name in cloned group"
cmd="crm_mon -1 --include=all --show-detail --resource=mysql-group:1"
test_assert $CRM_EX_OK 0
desc="XML output, filtered by exact group instance name in cloned group"
cmd="crm_mon --output-as=xml --resource=mysql-group:1"
test_assert_validate $CRM_EX_OK 0
desc="Complete text output, filtered by primitive name in cloned group"
cmd="crm_mon -1 --include=all --show-detail --resource=mysql-proxy"
test_assert $CRM_EX_OK 0
desc="XML output, filtered by primitive name in cloned group"
cmd="crm_mon --output-as=xml --resource=mysql-proxy"
test_assert_validate $CRM_EX_OK 0
desc="Complete text output, filtered by exact primitive instance name in cloned group"
cmd="crm_mon -1 --include=all --show-detail --resource=mysql-proxy:1"
test_assert $CRM_EX_OK 0
desc="XML output, filtered by exact primitive instance name in cloned group"
cmd="crm_mon --output-as=xml --resource=mysql-proxy:1"
test_assert_validate $CRM_EX_OK 0
unset CIB_file
export CIB_file="$test_home/cli/crm_mon-partial.xml"
desc="Text output of partially active resources"
cmd="crm_mon -1"
test_assert $CRM_EX_OK 0
desc="XML output of partially active resources"
cmd="crm_mon -1 --output-as=xml"
test_assert_validate $CRM_EX_OK 0
desc="Text output of partially active resources, with inactive resources"
cmd="crm_mon -1 -r"
test_assert $CRM_EX_OK 0
# XML already includes inactive resources
desc="Complete brief text output, with inactive resources"
cmd="crm_mon -1 -r --include=all --brief"
test_assert $CRM_EX_OK 0
# XML does not have a brief output option
+ desc="Text output of partially active group"
+ cmd="crm_mon -1 --resource=partially-active-group"
+ test_assert $CRM_EX_OK 0
+
+ desc="Text output of partially active group, with inactive resources"
+ cmd="crm_mon -1 --resource=partially-active-group -r"
+ test_assert $CRM_EX_OK 0
+
+ desc="Text output of active member of partially active group"
+ cmd="crm_mon -1 --resource=dummy-1"
+ test_assert $CRM_EX_OK 0
+
+ desc="Text output of inactive member of partially active group"
+ cmd="crm_mon -1 --resource=dummy-2"
+ test_assert $CRM_EX_OK 0
+
desc="Complete brief text output grouped by node, with inactive resources"
cmd="crm_mon -1 -r --include=all --group-by-node --brief"
test_assert $CRM_EX_OK 0
desc="Text output of partially active resources, with inactive resources, filtered by node"
cmd="crm_mon -1 -r --node=cluster01"
test_assert $CRM_EX_OK 0
desc="Text output of partially active resources, filtered by node"
cmd="crm_mon -1 --output-as=xml --node=cluster01"
test_assert_validate $CRM_EX_OK 0
unset CIB_file
export CIB_file=$(mktemp ${TMPDIR:-/tmp}/cts-cli.crm_mon.xml.XXXXXXXXXX)
sed -e '/maintenance-mode/ s/false/true/' "$test_home/cli/crm_mon.xml" > $CIB_file
desc="Text output of all resources with maintenance-mode enabled"
cmd="crm_mon -1 -r"
test_assert $CRM_EX_OK 0
rm -r "$CIB_file"
unset CIB_file
}
function test_tools() {
local TMPXML
local TMPORIG
TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX)
TMPORIG=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.existing.xml.XXXXXXXXXX)
create_shadow_cib
desc="Validate CIB"
cmd="cibadmin -Q"
test_assert $CRM_EX_OK
desc="Configure something before erasing"
cmd="crm_attribute -n cluster-delay -v 60s"
test_assert $CRM_EX_OK
desc="Require --force for CIB erasure"
cmd="cibadmin -E"
test_assert $CRM_EX_UNSAFE
desc="Allow CIB erasure with --force"
cmd="cibadmin -E --force"
test_assert $CRM_EX_OK 0
# Skip outputting the resulting CIB in the previous command, and delete
# rsc_defaults now, so tests behave the same regardless of build options.
delete_shadow_resource_defaults
# Verify the output after erasure
desc="Query CIB"
cmd="cibadmin -Q"
test_assert $CRM_EX_OK
# Save a copy of the CIB for a later test
cibadmin -Q > "$TMPORIG"
desc="Set cluster option"
cmd="crm_attribute -n cluster-delay -v 60s"
test_assert $CRM_EX_OK
desc="Query new cluster option"
cmd="cibadmin -Q -o crm_config | grep cib-bootstrap-options-cluster-delay"
test_assert $CRM_EX_OK
desc="Query cluster options"
cmd="cibadmin -Q -o crm_config > $TMPXML"
test_assert $CRM_EX_OK
desc="Set no-quorum policy"
cmd="crm_attribute -n no-quorum-policy -v ignore"
test_assert $CRM_EX_OK
desc="Delete nvpair"
cmd="cibadmin -D -o crm_config --xml-text ''"
test_assert $CRM_EX_OK
desc="Create operation should fail"
cmd="cibadmin -C -o crm_config --xml-file $TMPXML"
test_assert $CRM_EX_EXISTS
desc="Modify cluster options section"
cmd="cibadmin -M -o crm_config --xml-file $TMPXML"
test_assert $CRM_EX_OK
desc="Query updated cluster option"
cmd="cibadmin -Q -o crm_config | grep cib-bootstrap-options-cluster-delay"
test_assert $CRM_EX_OK
desc="Set duplicate cluster option"
cmd="crm_attribute -n cluster-delay -v 40s -s duplicate"
test_assert $CRM_EX_OK
desc="Setting multiply defined cluster option should fail"
cmd="crm_attribute -n cluster-delay -v 30s"
test_assert $CRM_EX_MULTIPLE
desc="Set cluster option with -s"
cmd="crm_attribute -n cluster-delay -v 30s -s duplicate"
test_assert $CRM_EX_OK
desc="Delete cluster option with -i"
cmd="crm_attribute -n cluster-delay -D -i cib-bootstrap-options-cluster-delay"
test_assert $CRM_EX_OK
desc="Create node1 and bring it online"
cmd="crm_simulate --live-check --in-place --node-up=node1"
test_assert $CRM_EX_OK
desc="Create node attribute"
cmd="crm_attribute -n ram -v 1024M -N node1 -t nodes"
test_assert $CRM_EX_OK
desc="Query new node attribute"
cmd="cibadmin -Q -o nodes | grep node1-ram"
test_assert $CRM_EX_OK
desc="Set a transient (fail-count) node attribute"
cmd="crm_attribute -n fail-count-foo -v 3 -N node1 -t status"
test_assert $CRM_EX_OK
desc="Query a fail count"
cmd="crm_failcount --query -r foo -N node1"
test_assert $CRM_EX_OK
desc="Delete a transient (fail-count) node attribute"
cmd="crm_attribute -n fail-count-foo -D -N node1 -t status"
test_assert $CRM_EX_OK
desc="Digest calculation"
cmd="cibadmin -Q | cibadmin -5 -p 2>&1 > /dev/null"
test_assert $CRM_EX_OK
# This update will fail because it has version numbers
desc="Replace operation should fail"
cmd="cibadmin -R --xml-file $TMPORIG"
test_assert $CRM_EX_OLD
desc="Default standby value"
cmd="crm_standby -N node1 -G"
test_assert $CRM_EX_OK
desc="Set standby status"
cmd="crm_standby -N node1 -v true"
test_assert $CRM_EX_OK
desc="Query standby value"
cmd="crm_standby -N node1 -G"
test_assert $CRM_EX_OK
desc="Delete standby value"
cmd="crm_standby -N node1 -D"
test_assert $CRM_EX_OK
desc="Create a resource"
cmd="cibadmin -C -o resources --xml-text ''"
test_assert $CRM_EX_OK
desc="Create a resource meta attribute"
cmd="crm_resource -r dummy --meta -p is-managed -v false"
test_assert $CRM_EX_OK
desc="Query a resource meta attribute"
cmd="crm_resource -r dummy --meta -g is-managed"
test_assert $CRM_EX_OK
desc="Remove a resource meta attribute"
cmd="crm_resource -r dummy --meta -d is-managed"
test_assert $CRM_EX_OK
desc="Create another resource meta attribute"
cmd="crm_resource -r dummy --meta -p target-role -v Stopped --output-as=xml"
test_assert_validate $CRM_EX_OK 0
desc="Show why a resource is not running"
cmd="crm_resource -Y -r dummy"
test_assert $CRM_EX_OK 0
desc="Remove another resource meta attribute"
cmd="crm_resource -r dummy --meta -d target-role --output-as=xml"
test_assert_validate $CRM_EX_OK 0
desc="Create a resource attribute"
cmd="crm_resource -r dummy -p delay -v 10s"
test_assert $CRM_EX_OK
desc="List the configured resources"
cmd="crm_resource -L"
test_assert $CRM_EX_OK
desc="List the configured resources in XML"
cmd="crm_resource -L --output-as=xml"
test_assert_validate $CRM_EX_OK 0
desc="List IDs of instantiated resources"
cmd="crm_resource -l"
test_assert $CRM_EX_OK 0
desc="Show XML configuration of resource"
cmd="crm_resource -q -r dummy"
test_assert $CRM_EX_OK 0
desc="Require a destination when migrating a resource that is stopped"
cmd="crm_resource -r dummy -M"
test_assert $CRM_EX_USAGE
desc="Don't support migration to non-existent locations"
cmd="crm_resource -r dummy -M -N i.do.not.exist"
test_assert $CRM_EX_NOSUCH
desc="Create a fencing resource"
cmd="cibadmin -C -o resources --xml-text ''"
test_assert $CRM_EX_OK
desc="Bring resources online"
cmd="crm_simulate --live-check --in-place -S"
test_assert $CRM_EX_OK
desc="Try to move a resource to its existing location"
cmd="crm_resource -r dummy --move --node node1"
test_assert $CRM_EX_EXISTS
desc="Move a resource from its existing location"
cmd="crm_resource -r dummy --move"
test_assert $CRM_EX_OK
desc="Clear out constraints generated by --move"
cmd="crm_resource -r dummy --clear"
test_assert $CRM_EX_OK
desc="Default ticket granted state"
cmd="crm_ticket -t ticketA -G granted -d false"
test_assert $CRM_EX_OK
desc="Set ticket granted state"
cmd="crm_ticket -t ticketA -r --force"
test_assert $CRM_EX_OK
desc="Query ticket granted state"
cmd="crm_ticket -t ticketA -G granted"
test_assert $CRM_EX_OK
desc="Delete ticket granted state"
cmd="crm_ticket -t ticketA -D granted --force"
test_assert $CRM_EX_OK
desc="Make a ticket standby"
cmd="crm_ticket -t ticketA -s"
test_assert $CRM_EX_OK
desc="Query ticket standby state"
cmd="crm_ticket -t ticketA -G standby"
test_assert $CRM_EX_OK
desc="Activate a ticket"
cmd="crm_ticket -t ticketA -a"
test_assert $CRM_EX_OK
desc="Delete ticket standby state"
cmd="crm_ticket -t ticketA -D standby"
test_assert $CRM_EX_OK
desc="Ban a resource on unknown node"
cmd="crm_resource -r dummy -B -N host1"
test_assert $CRM_EX_NOSUCH
desc="Create two more nodes and bring them online"
cmd="crm_simulate --live-check --in-place --node-up=node2 --node-up=node3"
test_assert $CRM_EX_OK
desc="Ban dummy from node1"
cmd="crm_resource -r dummy -B -N node1"
test_assert $CRM_EX_OK
desc="Show where a resource is running"
cmd="crm_resource -r dummy -W"
test_assert $CRM_EX_OK 0
desc="Show constraints on a resource"
cmd="crm_resource -a -r dummy"
test_assert $CRM_EX_OK 0
desc="Ban dummy from node2"
cmd="crm_resource -r dummy -B -N node2 --output-as=xml"
test_assert_validate $CRM_EX_OK
desc="Relocate resources due to ban"
cmd="crm_simulate --live-check --in-place -S"
test_assert $CRM_EX_OK
desc="Move dummy to node1"
cmd="crm_resource -r dummy -M -N node1 --output-as=xml"
test_assert_validate $CRM_EX_OK
desc="Clear implicit constraints for dummy on node2"
cmd="crm_resource -r dummy -U -N node2"
test_assert $CRM_EX_OK
desc="Drop the status section"
cmd="cibadmin -R -o status --xml-text ''"
test_assert $CRM_EX_OK 0
desc="Create a clone"
cmd="cibadmin -C -o resources --xml-text ''"
test_assert $CRM_EX_OK 0
desc="Create a resource meta attribute"
cmd="crm_resource -r test-primitive --meta -p is-managed -v false"
test_assert $CRM_EX_OK
desc="Create a resource meta attribute in the primitive"
cmd="crm_resource -r test-primitive --meta -p is-managed -v false --force"
test_assert $CRM_EX_OK
desc="Update resource meta attribute with duplicates"
cmd="crm_resource -r test-clone --meta -p is-managed -v true"
test_assert $CRM_EX_OK
desc="Update resource meta attribute with duplicates (force clone)"
cmd="crm_resource -r test-clone --meta -p is-managed -v true --force"
test_assert $CRM_EX_OK
desc="Update child resource meta attribute with duplicates"
cmd="crm_resource -r test-primitive --meta -p is-managed -v false"
test_assert $CRM_EX_OK
desc="Delete resource meta attribute with duplicates"
cmd="crm_resource -r test-clone --meta -d is-managed"
test_assert $CRM_EX_OK
desc="Delete resource meta attribute in parent"
cmd="crm_resource -r test-primitive --meta -d is-managed"
test_assert $CRM_EX_OK
desc="Create a resource meta attribute in the primitive"
cmd="crm_resource -r test-primitive --meta -p is-managed -v false --force"
test_assert $CRM_EX_OK
desc="Update existing resource meta attribute"
cmd="crm_resource -r test-clone --meta -p is-managed -v true"
test_assert $CRM_EX_OK
desc="Create a resource meta attribute in the parent"
cmd="crm_resource -r test-clone --meta -p is-managed -v true --force"
test_assert $CRM_EX_OK
desc="Copy resources"
cmd="cibadmin -Q -o resources > $TMPXML"
test_assert $CRM_EX_OK 0
desc="Delete resource parent meta attribute (force)"
cmd="crm_resource -r test-clone --meta -d is-managed --force"
test_assert $CRM_EX_OK
desc="Restore duplicates"
cmd="cibadmin -R -o resources --xml-file $TMPXML"
test_assert $CRM_EX_OK
desc="Delete resource child meta attribute"
cmd="crm_resource -r test-primitive --meta -d is-managed"
test_assert $CRM_EX_OK
cibadmin -C -o resources --xml-text ' \
\
\
'
desc="Create a resource meta attribute in dummy1"
cmd="crm_resource -r dummy1 --meta -p is-managed -v true"
test_assert $CRM_EX_OK
desc="Create a resource meta attribute in dummy-group"
cmd="crm_resource -r dummy-group --meta -p is-managed -v false"
test_assert $CRM_EX_OK
cibadmin -D -o resource --xml-text ''
desc="Specify a lifetime when moving a resource"
cmd="crm_resource -r dummy --move --node node2 --lifetime=PT1H"
test_assert $CRM_EX_OK
desc="Try to move a resource previously moved with a lifetime"
cmd="crm_resource -r dummy --move --node node1"
test_assert $CRM_EX_OK
desc="Ban dummy from node1 for a short time"
cmd="crm_resource -r dummy -B -N node1 --lifetime=PT1S"
test_assert $CRM_EX_OK
desc="Remove expired constraints"
sleep 2
cmd="crm_resource --clear --expired"
test_assert $CRM_EX_OK
# Clear has already been tested elsewhere, but we need to get rid of the
# constraints so testing delete works. It won't delete if there's still
# a reference to the resource somewhere.
desc="Clear all implicit constraints for dummy"
cmd="crm_resource -r dummy -U"
test_assert $CRM_EX_OK
desc="Delete a resource"
cmd="crm_resource -D -r dummy -t primitive"
test_assert $CRM_EX_OK
unset CIB_shadow
unset CIB_shadow_dir
rm -f "$TMPXML" "$TMPORIG"
desc="Create an XML patchset"
cmd="crm_diff -o $test_home/cli/crm_diff_old.xml -n $test_home/cli/crm_diff_new.xml"
test_assert $CRM_EX_ERROR 0
export CIB_file="$test_home/cli/constraints.xml"
for rsc in prim1 prim2 prim3 prim4 prim5 prim6 prim7 prim8 prim9 \
prim10 prim11 prim12 prim13 group clone; do
desc="Check locations and constraints for $rsc"
cmd="crm_resource -a -r $rsc"
test_assert $CRM_EX_OK 0
desc="Recursively check locations and constraints for $rsc"
cmd="crm_resource -A -r $rsc"
test_assert $CRM_EX_OK 0
desc="Check locations and constraints for $rsc in XML"
cmd="crm_resource -a -r $rsc --output-as=xml"
test_assert_validate $CRM_EX_OK 0
desc="Recursively check locations and constraints for $rsc in XML"
cmd="crm_resource -A -r $rsc --output-as=xml"
test_assert_validate $CRM_EX_OK 0
done
unset CIB_file
export CIB_file="$test_home/cli/crm_resource_digests.xml"
desc="Show resource digests"
cmd="crm_resource --digests -r rsc1 -N node1 --output-as=xml"
test_assert_validate $CRM_EX_OK 0
desc="Show resource digests with overrides"
cmd="$cmd CRM_meta_interval=10000 CRM_meta_timeout=20000"
test_assert $CRM_EX_OK 0
unset CIB_file
export CIB_file="$test_home/cli/crmadmin-cluster-remote-guest-nodes.xml"
desc="List all nodes"
cmd="crmadmin -N | wc -l | grep 11"
test_assert $CRM_EX_OK 0
desc="List cluster nodes"
cmd="crmadmin -N cluster | wc -l | grep 6"
test_assert $CRM_EX_OK 0
desc="List guest nodes"
cmd="crmadmin -N guest | wc -l | grep 2"
test_assert $CRM_EX_OK 0
desc="List remote nodes"
cmd="crmadmin -N remote | wc -l | grep 3"
test_assert $CRM_EX_OK 0
desc="List cluster,remote nodes"
cmd="crmadmin -N cluster,remote | wc -l | grep 9"
test_assert $CRM_EX_OK 0
desc="List guest,remote nodes"
cmd="crmadmin -N guest,remote | wc -l | grep 5"
test_assert $CRM_EX_OK 0
unset CIB_file
export CIB_file="$test_home/cli/crm_mon.xml"
desc="List a promotable clone resource"
cmd="crm_resource --locate -r promotable-clone"
test_assert $CRM_EX_OK 0
desc="List the primitive of a promotable clone resource"
cmd="crm_resource --locate -r promotable-rsc"
test_assert $CRM_EX_OK 0
desc="List a single instance of a promotable clone resource"
cmd="crm_resource --locate -r promotable-rsc:0"
test_assert $CRM_EX_OK 0
desc="List another instance of a promotable clone resource"
cmd="crm_resource --locate -r promotable-rsc:1"
test_assert $CRM_EX_OK 0
desc="List a promotable clone resource in XML"
cmd="crm_resource --locate -r promotable-clone --output-as=xml"
test_assert_validate $CRM_EX_OK 0
desc="List the primitive of a promotable clone resource in XML"
cmd="crm_resource --locate -r promotable-rsc --output-as=xml"
test_assert_validate $CRM_EX_OK 0
desc="List a single instance of a promotable clone resource in XML"
cmd="crm_resource --locate -r promotable-rsc:0 --output-as=xml"
test_assert_validate $CRM_EX_OK 0
desc="List another instance of a promotable clone resource in XML"
cmd="crm_resource --locate -r promotable-rsc:1 --output-as=xml"
test_assert_validate $CRM_EX_OK 0
unset CIB_file
export CIB_file="-"
desc="Check that CIB_file=\"-\" works - crm_mon"
cmd="cat $test_home/cli/crm_mon.xml | crm_mon -1"
test_assert $CRM_EX_OK 0
desc="Check that CIB_file=\"-\" works - crm_resource"
cmd="cat $test_home/cli/crm_resource_digests.xml | crm_resource --digests -r rsc1 -N node1 --output-as=xml"
test_assert_validate $CRM_EX_OK 0
desc="Check that CIB_file=\"-\" works - crmadmin"
cmd="cat $test_home/cli/crmadmin-cluster-remote-guest-nodes.xml | crmadmin -N | wc -l | grep 11"
test_assert $CRM_EX_OK 0
unset CIB_file
}
INVALID_PERIODS=(
"2019-01-01 00:00:00Z" # Start with no end
"2019-01-01 00:00:00Z/" # Start with only a trailing slash
"PT2S/P1M" # Two durations
"2019-13-01 00:00:00Z/P1M" # Out-of-range month
"20191077T15/P1M" # Out-of-range day
"2019-10-01T25:00:00Z/P1M" # Out-of-range hour
"2019-10-01T24:00:01Z/P1M" # Hour 24 with anything but :00:00
"PT5H/20191001T007000Z" # Out-of-range minute
"2019-10-01 00:00:80Z/P1M" # Out-of-range second
"2019-10-01 00:00:10 +25:00/P1M" # Out-of-range offset hour
"20191001T000010 -00:61/P1M" # Out-of-range offset minute
"P1Y/2019-02-29 00:00:00Z" # Feb. 29 in non-leap-year
"2019-01-01 00:00:00Z/P" # Duration with no values
"P1Z/2019-02-20 00:00:00Z" # Invalid duration unit
"P1YM/2019-02-20 00:00:00Z" # No number for duration unit
)
function test_dates() {
# Ensure invalid period specifications are rejected
for spec in '' "${INVALID_PERIODS[@]}"; do
desc="Invalid period - [$spec]"
cmd="iso8601 -p \"$spec\""
test_assert $CRM_EX_INVALID_PARAM 0
done
desc="2014-01-01 00:30:00 - 1 Hour"
cmd="iso8601 -d '2014-01-01 00:30:00Z' -D P-1H -E '2013-12-31 23:30:00Z'"
test_assert $CRM_EX_OK 0
desc="Valid date - Feb 29 in leap year"
cmd="iso8601 -d '2020-02-29 00:00:00Z' -E '2020-02-29 00:00:00Z'"
test_assert $CRM_EX_OK 0
desc="Valid date - using 'T' and offset"
cmd="iso8601 -d '20191201T131211 -05:00' -E '2019-12-01 18:12:11Z'"
test_assert $CRM_EX_OK 0
desc="24:00:00 equivalent to 00:00:00 of next day"
cmd="iso8601 -d '2019-12-31 24:00:00Z' -E '2020-01-01 00:00:00Z'"
test_assert $CRM_EX_OK 0
for y in 06 07 08 09 10 11 12 13 14 15 16 17 18 40; do
desc="20$y-W01-7"
cmd="iso8601 -d '20$y-W01-7 00Z'"
test_assert $CRM_EX_OK 0
desc="20$y-W01-7 - round-trip"
cmd="iso8601 -d '20$y-W01-7 00Z' -W -E '20$y-W01-7 00:00:00Z'"
test_assert $CRM_EX_OK 0
desc="20$y-W01-1"
cmd="iso8601 -d '20$y-W01-1 00Z'"
test_assert $CRM_EX_OK 0
desc="20$y-W01-1 - round-trip"
cmd="iso8601 -d '20$y-W01-1 00Z' -W -E '20$y-W01-1 00:00:00Z'"
test_assert $CRM_EX_OK 0
done
desc="2009-W53-07"
cmd="iso8601 -d '2009-W53-7 00:00:00Z' -W -E '2009-W53-7 00:00:00Z'"
test_assert $CRM_EX_OK 0
desc="epoch + 2 Years 5 Months 6 Minutes"
cmd="iso8601 -d 'epoch' -D P2Y5MT6M -E '1972-06-01 00:06:00Z'"
test_assert $CRM_EX_OK 0
desc="2009-01-31 + 1 Month"
cmd="iso8601 -d '20090131T000000Z' -D P1M -E '2009-02-28 00:00:00Z'"
test_assert $CRM_EX_OK 0
desc="2009-01-31 + 2 Months"
cmd="iso8601 -d '2009-01-31 00:00:00Z' -D P2M -E '2009-03-31 00:00:00Z'"
test_assert $CRM_EX_OK 0
desc="2009-01-31 + 3 Months"
cmd="iso8601 -d '2009-01-31 00:00:00Z' -D P3M -E '2009-04-30 00:00:00Z'"
test_assert $CRM_EX_OK 0
desc="2009-03-31 - 1 Month"
cmd="iso8601 -d '2009-03-31 01:00:00 +01:00' -D P-1M -E '2009-02-28 00:00:00Z'"
test_assert $CRM_EX_OK 0
desc="2038-01-01 + 3 Months"
cmd="iso8601 -d '2038-01-01 00:00:00Z' -D P3M -E '2038-04-01 00:00:00Z'"
test_assert $CRM_EX_OK 0
}
function test_acl_loop() {
local TMPXML
TMPXML="$1"
# Make sure we're rejecting things for the right reasons
export PCMK_trace_functions=pcmk__check_acl,pcmk__apply_creation_acl
export PCMK_stderr=1
CIB_user=root cibadmin --replace --xml-text ''
### no ACL ###
export CIB_user=unknownguy
desc="$CIB_user: Query configuration"
cmd="cibadmin -Q"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
desc="$CIB_user: Set enable-acl"
cmd="crm_attribute -n enable-acl -v false"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
desc="$CIB_user: Set stonith-enabled"
cmd="crm_attribute -n stonith-enabled -v false"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
desc="$CIB_user: Create a resource"
cmd="cibadmin -C -o resources --xml-text ''"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
### deny /cib permission ###
export CIB_user=l33t-haxor
desc="$CIB_user: Query configuration"
cmd="cibadmin -Q"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
desc="$CIB_user: Set enable-acl"
cmd="crm_attribute -n enable-acl -v false"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
desc="$CIB_user: Set stonith-enabled"
cmd="crm_attribute -n stonith-enabled -v false"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
desc="$CIB_user: Create a resource"
cmd="cibadmin -C -o resources --xml-text ''"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
### observer role ###
export CIB_user=niceguy
desc="$CIB_user: Query configuration"
cmd="cibadmin -Q"
test_assert $CRM_EX_OK 0
desc="$CIB_user: Set enable-acl"
cmd="crm_attribute -n enable-acl -v false"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
desc="$CIB_user: Set stonith-enabled"
cmd="crm_attribute -n stonith-enabled -v false"
test_assert $CRM_EX_OK
desc="$CIB_user: Create a resource"
cmd="cibadmin -C -o resources --xml-text ''"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
export CIB_user=root
desc="$CIB_user: Query configuration"
cmd="cibadmin -Q"
test_assert $CRM_EX_OK 0
desc="$CIB_user: Set stonith-enabled"
cmd="crm_attribute -n stonith-enabled -v true"
test_assert $CRM_EX_OK
desc="$CIB_user: Create a resource"
cmd="cibadmin -C -o resources --xml-text ''"
test_assert $CRM_EX_OK
### deny /cib permission ###
export CIB_user=l33t-haxor
desc="$CIB_user: Create a resource meta attribute"
cmd="crm_resource -r dummy --meta -p target-role -v Stopped"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
desc="$CIB_user: Query a resource meta attribute"
cmd="crm_resource -r dummy --meta -g target-role"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
desc="$CIB_user: Remove a resource meta attribute"
cmd="crm_resource -r dummy --meta -d target-role"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
### observer role ###
export CIB_user=niceguy
desc="$CIB_user: Create a resource meta attribute"
cmd="crm_resource -r dummy --meta -p target-role -v Stopped"
test_assert $CRM_EX_OK
desc="$CIB_user: Query a resource meta attribute"
cmd="crm_resource -r dummy --meta -g target-role"
test_assert $CRM_EX_OK
desc="$CIB_user: Remove a resource meta attribute"
cmd="crm_resource -r dummy --meta -d target-role"
test_assert $CRM_EX_OK
desc="$CIB_user: Create a resource meta attribute"
cmd="crm_resource -r dummy --meta -p target-role -v Started"
test_assert $CRM_EX_OK
### read //meta_attributes ###
export CIB_user=badidea
desc="$CIB_user: Query configuration - implied deny"
cmd="cibadmin -Q"
test_assert $CRM_EX_OK 0
### deny /cib, read //meta_attributes ###
export CIB_user=betteridea
desc="$CIB_user: Query configuration - explicit deny"
cmd="cibadmin -Q"
test_assert $CRM_EX_OK 0
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --delete --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
### observer role ###
export CIB_user=niceguy
desc="$CIB_user: Replace - remove acls"
cmd="cibadmin --replace --xml-file $TMPXML"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -C -o resources --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - create resource"
cmd="cibadmin --replace --xml-file $TMPXML"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" crm_attribute -n enable-acl -v false
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - modify attribute (deny)"
cmd="cibadmin --replace --xml-file $TMPXML"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --replace --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - delete attribute (deny)"
cmd="cibadmin --replace --xml-file $TMPXML"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - create attribute (deny)"
cmd="cibadmin --replace --xml-file $TMPXML"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
### admin role ###
CIB_user=bob
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - create attribute (direct allow)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_OK 0
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - modify attribute (direct allow)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_OK 0
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --replace -o resources --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - delete attribute (direct allow)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_OK 0
### super_user role ###
export CIB_user=joe
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - create attribute (inherited allow)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_OK 0
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - modify attribute (inherited allow)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_OK 0
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --replace -o resources --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - delete attribute (inherited allow)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_OK 0
### rsc_writer role ###
export CIB_user=mike
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - create attribute (allow overrides deny)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_OK 0
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - modify attribute (allow overrides deny)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_OK 0
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --replace -o resources --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - delete attribute (allow overrides deny)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_OK 0
### rsc_denied role ###
export CIB_user=chris
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - create attribute (deny overrides allow)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
# Set as root since setting as chris failed
CIB_user=root cibadmin --modify --xml-text ''
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - modify attribute (deny overrides allow)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
# Set as root since setting as chris failed
CIB_user=root cibadmin --modify --xml-text ''
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --replace -o resources --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - delete attribute (deny overrides allow)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
}
function test_acls() {
local SHADOWPATH
local TMPXML
TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.acls.xml.XXXXXXXXXX)
create_shadow_cib pacemaker-1.3
cat < "$TMPXML"
EOF
desc="Configure some ACLs"
cmd="cibadmin -M -o acls --xml-file $TMPXML"
test_assert $CRM_EX_OK
desc="Enable ACLs"
cmd="crm_attribute -n enable-acl -v true"
test_assert $CRM_EX_OK
desc="Set cluster option"
cmd="crm_attribute -n no-quorum-policy -v ignore"
test_assert $CRM_EX_OK
desc="New ACL"
cmd="cibadmin --create -o acls --xml-text ''"
test_assert $CRM_EX_OK
desc="Another ACL"
cmd="cibadmin --create -o acls --xml-text ''"
test_assert $CRM_EX_OK
desc="Updated ACL"
cmd="cibadmin --replace -o acls --xml-text ''"
test_assert $CRM_EX_OK
test_acl_loop "$TMPXML"
printf "\n\n !#!#!#!#! Upgrading to latest CIB schema and re-testing !#!#!#!#!\n"
printf "\nUpgrading to latest CIB schema and re-testing\n" 1>&2
export CIB_user=root
desc="$CIB_user: Upgrade to latest CIB schema"
cmd="cibadmin --upgrade --force -V"
test_assert $CRM_EX_OK
reset_shadow_cib_version
test_acl_loop "$TMPXML"
unset CIB_shadow_dir
rm -f "$TMPXML"
}
function test_validity() {
local TMPGOOD
local TMPBAD
TMPGOOD=$(mktemp ${TMPDIR:-/tmp}/cts-cli.validity.good.xml.XXXXXXXXXX)
TMPBAD=$(mktemp ${TMPDIR:-/tmp}/cts-cli.validity.bad.xml.XXXXXXXXXX)
create_shadow_cib pacemaker-1.2
export PCMK_trace_functions=apply_upgrade,update_validation,cli_config_update
export PCMK_stderr=1
cibadmin -C -o resources --xml-text ''
cibadmin -C -o resources --xml-text ''
cibadmin -C -o constraints --xml-text ''
cibadmin -Q > "$TMPGOOD"
desc="Try to make resulting CIB invalid (enum violation)"
cmd="cibadmin -M -o constraints --xml-text ''"
test_assert $CRM_EX_CONFIG
sed 's|"start"|"break"|' "$TMPGOOD" > "$TMPBAD"
desc="Run crm_simulate with invalid CIB (enum violation)"
cmd="crm_simulate -x $TMPBAD -S"
test_assert $CRM_EX_CONFIG 0
desc="Try to make resulting CIB invalid (unrecognized validate-with)"
cmd="cibadmin -M --xml-text ''"
test_assert $CRM_EX_CONFIG
sed 's|"pacemaker-1.2"|"pacemaker-9999.0"|' "$TMPGOOD" > "$TMPBAD"
desc="Run crm_simulate with invalid CIB (unrecognized validate-with)"
cmd="crm_simulate -x $TMPBAD -S"
test_assert $CRM_EX_CONFIG 0
desc="Try to make resulting CIB invalid, but possibly recoverable (valid with X.Y+1)"
cmd="cibadmin -C -o configuration --xml-text ''"
test_assert $CRM_EX_CONFIG
sed 's|||' "$TMPGOOD" > "$TMPBAD"
desc="Run crm_simulate with invalid, but possibly recoverable CIB (valid with X.Y+1)"
cmd="crm_simulate -x $TMPBAD -S"
test_assert $CRM_EX_OK 0
sed 's|[ ][ ]*validate-with="[^"]*"||' "$TMPGOOD" > "$TMPBAD"
desc="Make resulting CIB valid, although without validate-with attribute"
cmd="cibadmin -R --xml-file $TMPBAD"
test_assert $CRM_EX_OK
desc="Run crm_simulate with valid CIB, but without validate-with attribute"
cmd="crm_simulate -x $TMPBAD -S"
test_assert $CRM_EX_OK 0
# this will just disable validation and accept the config, outputting
# validation errors
sed -e 's|[ ][ ]*validate-with="[^"]*"||' \
-e 's|\([ ][ ]*epoch="[^"]*\)"|\10"|' -e 's|"start"|"break"|' \
"$TMPGOOD" > "$TMPBAD"
desc="Make resulting CIB invalid, and without validate-with attribute"
cmd="cibadmin -R --xml-file $TMPBAD"
test_assert $CRM_EX_OK
desc="Run crm_simulate with invalid CIB, also without validate-with attribute"
cmd="crm_simulate -x $TMPBAD -S"
test_assert $CRM_EX_OK 0
unset CIB_shadow_dir
rm -f "$TMPGOOD" "$TMPBAD"
}
test_upgrade() {
local TMPXML
TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX)
create_shadow_cib pacemaker-2.10
desc="Set stonith-enabled=false"
cmd="crm_attribute -n stonith-enabled -v false"
test_assert $CRM_EX_OK
cat < "$TMPXML"
EOF
desc="Configure the initial resource"
cmd="cibadmin -M -o resources --xml-file $TMPXML"
test_assert $CRM_EX_OK
desc="Upgrade to latest CIB schema (trigger 2.10.xsl + the wrapping)"
cmd="cibadmin --upgrade --force -V -V"
test_assert $CRM_EX_OK
desc="Query a resource instance attribute (shall survive)"
cmd="crm_resource -r mySmartFuse -g requires"
test_assert $CRM_EX_OK
unset CIB_shadow_dir
rm -f "$TMPXML"
}
test_rules() {
local TMPXML
create_shadow_cib
cibadmin -C -o resources --xml-text ''
TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX)
cat < "$TMPXML"
EOF
cibadmin -C -o constraints -x "$TMPXML"
rm -f "$TMPXML"
TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX)
cat < "$TMPXML"
EOF
cibadmin -C -o constraints -x "$TMPXML"
rm -f "$TMPXML"
if [ "$(uname)" == "FreeBSD" ]; then
tomorrow=$(date -v+1d +"%F %T %z")
else
tomorrow=$(date --date=tomorrow +"%F %T %z")
fi
TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX)
cat < "$TMPXML"
EOF
cibadmin -C -o constraints -x "$TMPXML"
rm -f "$TMPXML"
TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX)
cat < "$TMPXML"
EOF
cibadmin -C -o constraints -x "$TMPXML"
rm -f "$TMPXML"
TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX)
cat < "$TMPXML"
EOF
cibadmin -C -o constraints -x "$TMPXML"
rm -f "$TMPXML"
TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX)
cat < "$TMPXML"
EOF
cibadmin -C -o constraints -x "$TMPXML"
rm -f "$TMPXML"
TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX)
cat < "$TMPXML"
EOF
cibadmin -C -o constraints -x "$TMPXML"
rm -f "$TMPXML"
desc="Try to check a rule that doesn't exist"
cmd="crm_rule -c -r blahblah"
test_assert $CRM_EX_NOSUCH
desc="Try to check a rule that has too many date_expressions"
cmd="crm_rule -c -r cli-rule-too-many-date-expressions"
test_assert $CRM_EX_UNIMPLEMENT_FEATURE
desc="Verify basic rule is expired"
cmd="crm_rule -c -r cli-prefer-rule-dummy-expired"
test_assert $CRM_EX_EXPIRED
desc="Verify basic rule worked in the past"
cmd="crm_rule -c -r cli-prefer-rule-dummy-expired -d 20180101"
test_assert $CRM_EX_OK
desc="Verify basic rule is not yet in effect"
cmd="crm_rule -c -r cli-prefer-rule-dummy-not-yet"
test_assert $CRM_EX_NOT_YET_IN_EFFECT
desc="Verify date_spec rule with years has expired"
cmd="crm_rule -c -r cli-prefer-rule-dummy-date_spec-only-years"
test_assert $CRM_EX_EXPIRED
desc="Verify date_spec rule with years is in effect"
cmd="crm_rule -c -r cli-prefer-rule-dummy-date_spec-only-years -d 20190201"
test_assert $CRM_EX_OK
desc="Try to check a rule whose date_spec does not contain years="
cmd="crm_rule -c -r cli-prefer-rule-dummy-date_spec-without-years"
test_assert $CRM_EX_NOSUCH
desc="Try to check a rule whose date_spec contains years= and moon="
cmd="crm_rule -c -r cli-prefer-rule-dummy-date_spec-years-moon"
test_assert $CRM_EX_NOSUCH
desc="Try to check a rule with no date_expression"
cmd="crm_rule -c -r cli-no-date_expression-rule"
test_assert $CRM_EX_UNIMPLEMENT_FEATURE
unset CIB_shadow_dir
}
# Process command-line arguments
while [ $# -gt 0 ]; do
case "$1" in
-t)
tests="$2"
shift 2
;;
-V|--verbose)
verbose=1
shift
;;
-v|--valgrind)
export G_SLICE=always-malloc
VALGRIND_CMD="valgrind $VALGRIND_OPTS"
shift
;;
-s)
do_save=1
shift
;;
-p)
export PATH="$2:$PATH"
shift
;;
--help)
echo "$USAGE_TEXT"
exit $CRM_EX_OK
;;
*)
echo "error: unknown option $1"
echo
echo "$USAGE_TEXT"
exit $CRM_EX_USAGE
;;
esac
done
for t in $tests; do
case "$t" in
dates) ;;
tools) ;;
acls) ;;
validity) ;;
upgrade) ;;
rules) ;;
crm_mon) ;;
*)
echo "error: unknown test $t"
echo
echo "$USAGE_TEXT"
exit $CRM_EX_USAGE
;;
esac
done
XMLLINT_CMD=$(which xmllint 2>/dev/null)
if [ $? -ne 0 ]; then
XMLLINT_CMD=""
echo "xmllint is missing - install it to validate command output"
fi
# Check whether we're running from source directory
SRCDIR=$(dirname $test_home)
if [ -x "$SRCDIR/tools/crm_simulate" ]; then
export PATH="$SRCDIR/tools:$PATH"
echo "Using local binaries from: $SRCDIR/tools"
if [ -x "$SRCDIR/xml" ]; then
export PCMK_schema_directory="$SRCDIR/xml"
echo "Using local schemas from: $PCMK_schema_directory"
fi
else
export PCMK_schema_directory=@CRM_SCHEMA_DIRECTORY@
fi
for t in $tests; do
echo "Testing $t"
TMPFILE=$(mktemp ${TMPDIR:-/tmp}/cts-cli.$t.XXXXXXXXXX)
eval TMPFILE_$t="$TMPFILE"
test_$t > "$TMPFILE"
# last-rc-change= is always numeric in the CIB. However, for the crm_mon
# test we also need to compare against the XML output of the crm_mon
# program. There, these are shown as human readable strings (like the
# output of the `date` command).
sed -e 's/cib-last-written.*>/>/'\
-e 's/Last updated: .*/Last updated:/' \
-e 's/Last change: .*/Last change:/' \
-e 's/(version .*)/(version)/' \
-e 's/last_update time=\".*\"/last_update time=\"\"/' \
-e 's/last_change time=\".*\"/last_change time=\"\"/' \
-e 's/ api-version=\".*\" / api-version=\"X\" /' \
-e 's/ version=\".*\" / version=\"\" /' \
-e 's/request=\".*\(crm_[a-zA-Z0-9]*\)/request=\"\1/' \
-e 's/crm_feature_set="[^"]*" //'\
-e 's/validate-with="[^"]*" //'\
-e 's/Created new pacemaker-.* configuration/Created new pacemaker configuration/'\
-e 's/.*\(pcmk__.*\)@.*\.c:[0-9][0-9]*)/\1/g' \
-e 's/.*\(unpack_.*\)@.*\.c:[0-9][0-9]*)/\1/g' \
-e 's/.*\(update_validation\)@.*\.c:[0-9][0-9]*)/\1/g' \
-e 's/.*\(apply_upgrade\)@.*\.c:[0-9][0-9]*)/\1/g' \
-e 's/ last-rc-change=\"[A-Za-z0-9: ]*\"//'\
-e 's|^/tmp/cts-cli\.validity\.bad.xml\.[^:]*:|validity.bad.xml:|'\
-e 's/^Entity: line [0-9][0-9]*: //'\
-e 's/\(validation ([0-9][0-9]* of \)[0-9][0-9]*\().*\)/\1X\2/' \
-e 's/^Migration will take effect until: .*/Migration will take effect until:/' \
-e 's/ end=\"[0-9][-+: 0-9]*Z*\"/ end=\"\"/' \
-e 's/ start=\"[0-9][-+: 0-9]*Z*\"/ start=\"\"/' \
-e 's/^Error checking rule: Device not configured/Error checking rule: No such device or address/' \
-e 's/^lt-//' \
-e 's/ocf::/ocf:/' \
-e 's/Masters:/Promoted:/' \
-e 's/Slaves:/Unpromoted:/' \
-e 's/Master/Promoted/' \
-e 's/Slave/Unpromoted/' \
"$TMPFILE" > "${TMPFILE}.$$"
mv -- "${TMPFILE}.$$" "$TMPFILE"
if [ $do_save -eq 1 ]; then
cp "$TMPFILE" $test_home/cli/regression.$t.exp
fi
done
rm -rf "${shadow_dir}"
failed=0
if [ $verbose -eq 1 ]; then
echo -e "\n\nResults"
fi
for t in $tests; do
eval TMPFILE="\$TMPFILE_$t"
if [ $verbose -eq 1 ]; then
diff -wu $test_home/cli/regression.$t.exp "$TMPFILE"
else
diff -w $test_home/cli/regression.$t.exp "$TMPFILE" >/dev/null 2>&1
fi
if [ $? -ne 0 ]; then
failed=1
fi
done
echo -e "\n\nSummary"
for t in $tests; do
eval TMPFILE="\$TMPFILE_$t"
grep -e '^\* \(Passed\|Failed\)' "$TMPFILE"
done
if [ $num_errors -ne 0 ]; then
echo "$num_errors tests failed; see output in:"
for t in $tests; do
eval TMPFILE="\$TMPFILE_$t"
echo " $TMPFILE"
done
exit $CRM_EX_ERROR
elif [ $failed -eq 1 ]; then
echo "$num_passed tests passed but output was unexpected; see output in:"
for t in $tests; do
eval TMPFILE="\$TMPFILE_$t"
echo " $TMPFILE"
done
exit $CRM_EX_DIGEST
else
echo $num_passed tests passed
for t in $tests; do
eval TMPFILE="\$TMPFILE_$t"
rm -f "$TMPFILE"
done
crm_shadow --force --delete $shadow >/dev/null 2>&1
exit $CRM_EX_OK
fi
diff --git a/cts/scheduler/summary/bug-1822.summary b/cts/scheduler/summary/bug-1822.summary
index f0e4b9b0d1..00457a9c39 100644
--- a/cts/scheduler/summary/bug-1822.summary
+++ b/cts/scheduler/summary/bug-1822.summary
@@ -1,44 +1,44 @@
Current cluster status:
* Node List:
* Online: [ process1a process2b ]
* Full List of Resources:
- * Clone Set: ms-sf [ms-sf_group] (promotable) (unique):
+ * Clone Set: ms-sf [ms-sf_group] (promotable, unique):
* Resource Group: ms-sf_group:0:
* master_slave_Stateful:0 (ocf:heartbeat:Dummy-statful): Unpromoted process2b
* master_slave_procdctl:0 (ocf:heartbeat:procdctl): Stopped
* Resource Group: ms-sf_group:1:
* master_slave_Stateful:1 (ocf:heartbeat:Dummy-statful): Promoted process1a
* master_slave_procdctl:1 (ocf:heartbeat:procdctl): Promoted process1a
Transition Summary:
* Stop master_slave_Stateful:1 ( Promoted process1a ) due to node availability
* Stop master_slave_procdctl:1 ( Promoted process1a ) due to node availability
Executing Cluster Transition:
* Pseudo action: ms-sf_demote_0
* Pseudo action: ms-sf_group:1_demote_0
* Resource action: master_slave_Stateful:1 demote on process1a
* Resource action: master_slave_procdctl:1 demote on process1a
* Pseudo action: ms-sf_group:1_demoted_0
* Pseudo action: ms-sf_demoted_0
* Pseudo action: ms-sf_stop_0
* Pseudo action: ms-sf_group:1_stop_0
* Resource action: master_slave_Stateful:1 stop on process1a
* Resource action: master_slave_procdctl:1 stop on process1a
* Cluster action: do_shutdown on process1a
* Pseudo action: ms-sf_group:1_stopped_0
* Pseudo action: ms-sf_stopped_0
Revised Cluster Status:
* Node List:
* Online: [ process1a process2b ]
* Full List of Resources:
- * Clone Set: ms-sf [ms-sf_group] (promotable) (unique):
+ * Clone Set: ms-sf [ms-sf_group] (promotable, unique):
* Resource Group: ms-sf_group:0:
* master_slave_Stateful:0 (ocf:heartbeat:Dummy-statful): Unpromoted process2b
* master_slave_procdctl:0 (ocf:heartbeat:procdctl): Stopped
* Resource Group: ms-sf_group:1:
* master_slave_Stateful:1 (ocf:heartbeat:Dummy-statful): Stopped
* master_slave_procdctl:1 (ocf:heartbeat:procdctl): Stopped
diff --git a/cts/scheduler/summary/bug-5140-require-all-false.summary b/cts/scheduler/summary/bug-5140-require-all-false.summary
index 9623dd82da..a56fe6d6cc 100644
--- a/cts/scheduler/summary/bug-5140-require-all-false.summary
+++ b/cts/scheduler/summary/bug-5140-require-all-false.summary
@@ -1,83 +1,83 @@
4 of 35 resource instances DISABLED and 0 BLOCKED from further action due to failure
Current cluster status:
* Node List:
* Node hex-1: standby
* Node hex-2: standby (with active resources)
* Node hex-3: OFFLINE (standby)
* Full List of Resources:
* fencing (stonith:external/sbd): Stopped
* Clone Set: baseclone [basegrp]:
* Resource Group: basegrp:0:
* dlm (ocf:pacemaker:controld): Started hex-2
* clvmd (ocf:lvm2:clvmd): Started hex-2
* o2cb (ocf:ocfs2:o2cb): Started hex-2
* vg1 (ocf:heartbeat:LVM): Stopped
* fs-ocfs-1 (ocf:heartbeat:Filesystem): Stopped
* Stopped: [ hex-1 hex-3 ]
* fs-xfs-1 (ocf:heartbeat:Filesystem): Stopped
* Clone Set: fs2 [fs-ocfs-2]:
* Stopped: [ hex-1 hex-2 hex-3 ]
- * Clone Set: ms-r0 [drbd-r0] (promotable) (disabled):
+ * Clone Set: ms-r0 [drbd-r0] (promotable, disabled):
* Stopped (disabled): [ hex-1 hex-2 hex-3 ]
- * Clone Set: ms-r1 [drbd-r1] (promotable) (disabled):
+ * Clone Set: ms-r1 [drbd-r1] (promotable, disabled):
* Stopped (disabled): [ hex-1 hex-2 hex-3 ]
* Resource Group: md0-group:
* md0 (ocf:heartbeat:Raid1): Stopped
* vg-md0 (ocf:heartbeat:LVM): Stopped
* fs-md0 (ocf:heartbeat:Filesystem): Stopped
* dummy1 (ocf:heartbeat:Delay): Stopped
* dummy3 (ocf:heartbeat:Delay): Stopped
* dummy4 (ocf:heartbeat:Delay): Stopped
* dummy5 (ocf:heartbeat:Delay): Stopped
* dummy6 (ocf:heartbeat:Delay): Stopped
* Resource Group: r0-group:
* fs-r0 (ocf:heartbeat:Filesystem): Stopped
* dummy2 (ocf:heartbeat:Delay): Stopped
* cluster-md0 (ocf:heartbeat:Raid1): Stopped
Transition Summary:
* Stop dlm:0 ( hex-2 ) due to node availability
* Stop clvmd:0 ( hex-2 ) due to node availability
* Stop o2cb:0 ( hex-2 ) due to node availability
Executing Cluster Transition:
* Pseudo action: baseclone_stop_0
* Pseudo action: basegrp:0_stop_0
* Resource action: o2cb stop on hex-2
* Resource action: clvmd stop on hex-2
* Resource action: dlm stop on hex-2
* Pseudo action: basegrp:0_stopped_0
* Pseudo action: baseclone_stopped_0
Revised Cluster Status:
* Node List:
* Node hex-1: standby
* Node hex-2: standby
* Node hex-3: OFFLINE (standby)
* Full List of Resources:
* fencing (stonith:external/sbd): Stopped
* Clone Set: baseclone [basegrp]:
* Stopped: [ hex-1 hex-2 hex-3 ]
* fs-xfs-1 (ocf:heartbeat:Filesystem): Stopped
* Clone Set: fs2 [fs-ocfs-2]:
* Stopped: [ hex-1 hex-2 hex-3 ]
- * Clone Set: ms-r0 [drbd-r0] (promotable) (disabled):
+ * Clone Set: ms-r0 [drbd-r0] (promotable, disabled):
* Stopped (disabled): [ hex-1 hex-2 hex-3 ]
- * Clone Set: ms-r1 [drbd-r1] (promotable) (disabled):
+ * Clone Set: ms-r1 [drbd-r1] (promotable, disabled):
* Stopped (disabled): [ hex-1 hex-2 hex-3 ]
* Resource Group: md0-group:
* md0 (ocf:heartbeat:Raid1): Stopped
* vg-md0 (ocf:heartbeat:LVM): Stopped
* fs-md0 (ocf:heartbeat:Filesystem): Stopped
* dummy1 (ocf:heartbeat:Delay): Stopped
* dummy3 (ocf:heartbeat:Delay): Stopped
* dummy4 (ocf:heartbeat:Delay): Stopped
* dummy5 (ocf:heartbeat:Delay): Stopped
* dummy6 (ocf:heartbeat:Delay): Stopped
* Resource Group: r0-group:
* fs-r0 (ocf:heartbeat:Filesystem): Stopped
* dummy2 (ocf:heartbeat:Delay): Stopped
* cluster-md0 (ocf:heartbeat:Raid1): Stopped
diff --git a/cts/scheduler/summary/bug-lf-2358.summary b/cts/scheduler/summary/bug-lf-2358.summary
index 7c2c3d220b..b89aadc317 100644
--- a/cts/scheduler/summary/bug-lf-2358.summary
+++ b/cts/scheduler/summary/bug-lf-2358.summary
@@ -1,68 +1,68 @@
2 of 15 resource instances DISABLED and 0 BLOCKED from further action due to failure
Current cluster status:
* Node List:
* Online: [ alice.demo bob.demo ]
* Full List of Resources:
- * Clone Set: ms_drbd_nfsexport [res_drbd_nfsexport] (promotable) (disabled):
+ * Clone Set: ms_drbd_nfsexport [res_drbd_nfsexport] (promotable, disabled):
* Stopped (disabled): [ alice.demo bob.demo ]
* Resource Group: rg_nfs:
* res_fs_nfsexport (ocf:heartbeat:Filesystem): Stopped
* res_ip_nfs (ocf:heartbeat:IPaddr2): Stopped
* res_nfs (lsb:nfs): Stopped
* Resource Group: rg_mysql1:
* res_fs_mysql1 (ocf:heartbeat:Filesystem): Started bob.demo
* res_ip_mysql1 (ocf:heartbeat:IPaddr2): Started bob.demo
* res_mysql1 (ocf:heartbeat:mysql): Started bob.demo
* Clone Set: ms_drbd_mysql1 [res_drbd_mysql1] (promotable):
* Promoted: [ bob.demo ]
* Stopped: [ alice.demo ]
* Clone Set: ms_drbd_mysql2 [res_drbd_mysql2] (promotable):
* Promoted: [ alice.demo ]
* Unpromoted: [ bob.demo ]
* Resource Group: rg_mysql2:
* res_fs_mysql2 (ocf:heartbeat:Filesystem): Started alice.demo
* res_ip_mysql2 (ocf:heartbeat:IPaddr2): Started alice.demo
* res_mysql2 (ocf:heartbeat:mysql): Started alice.demo
Transition Summary:
* Start res_drbd_mysql1:1 ( alice.demo )
Executing Cluster Transition:
* Pseudo action: ms_drbd_mysql1_pre_notify_start_0
* Resource action: res_drbd_mysql1:0 notify on bob.demo
* Pseudo action: ms_drbd_mysql1_confirmed-pre_notify_start_0
* Pseudo action: ms_drbd_mysql1_start_0
* Resource action: res_drbd_mysql1:1 start on alice.demo
* Pseudo action: ms_drbd_mysql1_running_0
* Pseudo action: ms_drbd_mysql1_post_notify_running_0
* Resource action: res_drbd_mysql1:0 notify on bob.demo
* Resource action: res_drbd_mysql1:1 notify on alice.demo
* Pseudo action: ms_drbd_mysql1_confirmed-post_notify_running_0
Revised Cluster Status:
* Node List:
* Online: [ alice.demo bob.demo ]
* Full List of Resources:
- * Clone Set: ms_drbd_nfsexport [res_drbd_nfsexport] (promotable) (disabled):
+ * Clone Set: ms_drbd_nfsexport [res_drbd_nfsexport] (promotable, disabled):
* Stopped (disabled): [ alice.demo bob.demo ]
* Resource Group: rg_nfs:
* res_fs_nfsexport (ocf:heartbeat:Filesystem): Stopped
* res_ip_nfs (ocf:heartbeat:IPaddr2): Stopped
* res_nfs (lsb:nfs): Stopped
* Resource Group: rg_mysql1:
* res_fs_mysql1 (ocf:heartbeat:Filesystem): Started bob.demo
* res_ip_mysql1 (ocf:heartbeat:IPaddr2): Started bob.demo
* res_mysql1 (ocf:heartbeat:mysql): Started bob.demo
* Clone Set: ms_drbd_mysql1 [res_drbd_mysql1] (promotable):
* Promoted: [ bob.demo ]
* Unpromoted: [ alice.demo ]
* Clone Set: ms_drbd_mysql2 [res_drbd_mysql2] (promotable):
* Promoted: [ alice.demo ]
* Unpromoted: [ bob.demo ]
* Resource Group: rg_mysql2:
* res_fs_mysql2 (ocf:heartbeat:Filesystem): Started alice.demo
* res_ip_mysql2 (ocf:heartbeat:IPaddr2): Started alice.demo
* res_mysql2 (ocf:heartbeat:mysql): Started alice.demo
diff --git a/cts/scheduler/summary/bug-pm-11.summary b/cts/scheduler/summary/bug-pm-11.summary
index f638b3fc4f..7a9fc5c1b0 100644
--- a/cts/scheduler/summary/bug-pm-11.summary
+++ b/cts/scheduler/summary/bug-pm-11.summary
@@ -1,48 +1,48 @@
Current cluster status:
* Node List:
* Online: [ node-a node-b ]
* Full List of Resources:
- * Clone Set: ms-sf [group] (promotable) (unique):
+ * Clone Set: ms-sf [group] (promotable, unique):
* Resource Group: group:0:
* stateful-1:0 (ocf:heartbeat:Stateful): Unpromoted node-b
* stateful-2:0 (ocf:heartbeat:Stateful): Stopped
* Resource Group: group:1:
* stateful-1:1 (ocf:heartbeat:Stateful): Promoted node-a
* stateful-2:1 (ocf:heartbeat:Stateful): Stopped
Transition Summary:
* Start stateful-2:0 ( node-b )
* Promote stateful-2:1 ( Stopped -> Promoted node-a )
Executing Cluster Transition:
* Resource action: stateful-2:0 monitor on node-b
* Resource action: stateful-2:0 monitor on node-a
* Resource action: stateful-2:1 monitor on node-b
* Resource action: stateful-2:1 monitor on node-a
* Pseudo action: ms-sf_start_0
* Pseudo action: group:0_start_0
* Resource action: stateful-2:0 start on node-b
* Pseudo action: group:1_start_0
* Resource action: stateful-2:1 start on node-a
* Pseudo action: group:0_running_0
* Pseudo action: group:1_running_0
* Pseudo action: ms-sf_running_0
* Pseudo action: ms-sf_promote_0
* Pseudo action: group:1_promote_0
* Resource action: stateful-2:1 promote on node-a
* Pseudo action: group:1_promoted_0
* Pseudo action: ms-sf_promoted_0
Revised Cluster Status:
* Node List:
* Online: [ node-a node-b ]
* Full List of Resources:
- * Clone Set: ms-sf [group] (promotable) (unique):
+ * Clone Set: ms-sf [group] (promotable, unique):
* Resource Group: group:0:
* stateful-1:0 (ocf:heartbeat:Stateful): Unpromoted node-b
* stateful-2:0 (ocf:heartbeat:Stateful): Unpromoted node-b
* Resource Group: group:1:
* stateful-1:1 (ocf:heartbeat:Stateful): Promoted node-a
* stateful-2:1 (ocf:heartbeat:Stateful): Promoted node-a
diff --git a/cts/scheduler/summary/bug-pm-12.summary b/cts/scheduler/summary/bug-pm-12.summary
index c4f3adb908..2b473e8b91 100644
--- a/cts/scheduler/summary/bug-pm-12.summary
+++ b/cts/scheduler/summary/bug-pm-12.summary
@@ -1,57 +1,57 @@
Current cluster status:
* Node List:
* Online: [ node-a node-b ]
* Full List of Resources:
- * Clone Set: ms-sf [group] (promotable) (unique):
+ * Clone Set: ms-sf [group] (promotable, unique):
* Resource Group: group:0:
* stateful-1:0 (ocf:heartbeat:Stateful): Unpromoted node-b
* stateful-2:0 (ocf:heartbeat:Stateful): Unpromoted node-b
* Resource Group: group:1:
* stateful-1:1 (ocf:heartbeat:Stateful): Promoted node-a
* stateful-2:1 (ocf:heartbeat:Stateful): Promoted node-a
Transition Summary:
* Restart stateful-2:0 ( Unpromoted node-b ) due to resource definition change
* Restart stateful-2:1 ( Promoted node-a ) due to resource definition change
Executing Cluster Transition:
* Pseudo action: ms-sf_demote_0
* Pseudo action: group:1_demote_0
* Resource action: stateful-2:1 demote on node-a
* Pseudo action: group:1_demoted_0
* Pseudo action: ms-sf_demoted_0
* Pseudo action: ms-sf_stop_0
* Pseudo action: group:0_stop_0
* Resource action: stateful-2:0 stop on node-b
* Pseudo action: group:1_stop_0
* Resource action: stateful-2:1 stop on node-a
* Pseudo action: group:0_stopped_0
* Pseudo action: group:1_stopped_0
* Pseudo action: ms-sf_stopped_0
* Pseudo action: ms-sf_start_0
* Pseudo action: group:0_start_0
* Resource action: stateful-2:0 start on node-b
* Pseudo action: group:1_start_0
* Resource action: stateful-2:1 start on node-a
* Pseudo action: group:0_running_0
* Pseudo action: group:1_running_0
* Pseudo action: ms-sf_running_0
* Pseudo action: ms-sf_promote_0
* Pseudo action: group:1_promote_0
* Resource action: stateful-2:1 promote on node-a
* Pseudo action: group:1_promoted_0
* Pseudo action: ms-sf_promoted_0
Revised Cluster Status:
* Node List:
* Online: [ node-a node-b ]
* Full List of Resources:
- * Clone Set: ms-sf [group] (promotable) (unique):
+ * Clone Set: ms-sf [group] (promotable, unique):
* Resource Group: group:0:
* stateful-1:0 (ocf:heartbeat:Stateful): Unpromoted node-b
* stateful-2:0 (ocf:heartbeat:Stateful): Unpromoted node-b
* Resource Group: group:1:
* stateful-1:1 (ocf:heartbeat:Stateful): Promoted node-a
* stateful-2:1 (ocf:heartbeat:Stateful): Promoted node-a
diff --git a/cts/scheduler/summary/clone-max-zero.summary b/cts/scheduler/summary/clone-max-zero.summary
index b5f4ec7086..f0612d7100 100644
--- a/cts/scheduler/summary/clone-max-zero.summary
+++ b/cts/scheduler/summary/clone-max-zero.summary
@@ -1,51 +1,50 @@
Current cluster status:
* Node List:
* Online: [ c001n11 c001n12 ]
* Full List of Resources:
* fencing (stonith:external/ssh): Started c001n11
* Clone Set: dlm-clone [dlm]:
* dlm (ocf:pacemaker:controld): ORPHANED Started c001n12
* dlm (ocf:pacemaker:controld): ORPHANED Started c001n11
* Clone Set: o2cb-clone [o2cb]:
* Started: [ c001n11 c001n12 ]
* Clone Set: clone-drbd0 [drbd0]:
* Started: [ c001n11 c001n12 ]
* Clone Set: c-ocfs2-1 [ocfs2-1]:
* Started: [ c001n11 c001n12 ]
Transition Summary:
* Stop dlm:0 ( c001n12 ) due to node availability
* Stop dlm:1 ( c001n11 ) due to node availability
* Stop o2cb:0 ( c001n12 ) due to node availability
* Stop o2cb:1 ( c001n11 ) due to node availability
* Stop ocfs2-1:0 ( c001n12 ) due to node availability
* Stop ocfs2-1:1 ( c001n11 ) due to node availability
Executing Cluster Transition:
* Pseudo action: c-ocfs2-1_stop_0
* Resource action: ocfs2-1:1 stop on c001n12
* Resource action: ocfs2-1:0 stop on c001n11
* Pseudo action: c-ocfs2-1_stopped_0
* Pseudo action: o2cb-clone_stop_0
* Resource action: o2cb:1 stop on c001n12
* Resource action: o2cb:0 stop on c001n11
* Pseudo action: o2cb-clone_stopped_0
* Pseudo action: dlm-clone_stop_0
* Resource action: dlm:1 stop on c001n12
* Resource action: dlm:0 stop on c001n11
* Pseudo action: dlm-clone_stopped_0
Revised Cluster Status:
* Node List:
* Online: [ c001n11 c001n12 ]
* Full List of Resources:
* fencing (stonith:external/ssh): Started c001n11
- * Clone Set: dlm-clone [dlm]:
* Clone Set: o2cb-clone [o2cb]:
* Stopped: [ c001n11 c001n12 ]
* Clone Set: clone-drbd0 [drbd0]:
* Started: [ c001n11 c001n12 ]
* Clone Set: c-ocfs2-1 [ocfs2-1]:
* Stopped: [ c001n11 c001n12 ]
diff --git a/cts/scheduler/summary/group14.summary b/cts/scheduler/summary/group14.summary
index a1ba66a2e5..80ded38d78 100644
--- a/cts/scheduler/summary/group14.summary
+++ b/cts/scheduler/summary/group14.summary
@@ -1,102 +1,102 @@
Current cluster status:
* Node List:
* Online: [ c001n06 c001n07 ]
* OFFLINE: [ c001n02 c001n03 c001n04 c001n05 ]
* Full List of Resources:
* DcIPaddr (ocf:heartbeat:IPaddr): Stopped
* Resource Group: group-1:
* r192.168.100.181 (ocf:heartbeat:IPaddr): Started c001n06
* r192.168.100.182 (ocf:heartbeat:IPaddr): Stopped
* r192.168.100.183 (ocf:heartbeat:IPaddr): Stopped
* lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Stopped
* migrator (ocf:heartbeat:Dummy): Stopped
* rsc_c001n03 (ocf:heartbeat:IPaddr): Stopped
* rsc_c001n02 (ocf:heartbeat:IPaddr): Stopped
* rsc_c001n04 (ocf:heartbeat:IPaddr): Stopped
* rsc_c001n05 (ocf:heartbeat:IPaddr): Stopped
* rsc_c001n06 (ocf:heartbeat:IPaddr): Stopped
* rsc_c001n07 (ocf:heartbeat:IPaddr): Stopped
* Clone Set: DoFencing [child_DoFencing]:
* Stopped: [ c001n02 c001n03 c001n04 c001n05 c001n06 c001n07 ]
- * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique):
+ * Clone Set: master_rsc_1 [ocf_msdummy] (promotable, unique):
* ocf_msdummy:0 (ocf:heartbeat:Stateful): Stopped
* ocf_msdummy:1 (ocf:heartbeat:Stateful): Stopped
* ocf_msdummy:2 (ocf:heartbeat:Stateful): Stopped
* ocf_msdummy:3 (ocf:heartbeat:Stateful): Stopped
* ocf_msdummy:4 (ocf:heartbeat:Stateful): Stopped
* ocf_msdummy:5 (ocf:heartbeat:Stateful): Stopped
* ocf_msdummy:6 (ocf:heartbeat:Stateful): Stopped
* ocf_msdummy:7 (ocf:heartbeat:Stateful): Stopped
* ocf_msdummy:8 (ocf:heartbeat:Stateful): Stopped
* ocf_msdummy:9 (ocf:heartbeat:Stateful): Stopped
* ocf_msdummy:10 (ocf:heartbeat:Stateful): Stopped
* ocf_msdummy:11 (ocf:heartbeat:Stateful): Stopped
Transition Summary:
* Start DcIPaddr ( c001n06 ) due to no quorum (blocked)
* Stop r192.168.100.181 ( c001n06 ) due to no quorum
* Start r192.168.100.182 ( c001n07 ) due to no quorum (blocked)
* Start r192.168.100.183 ( c001n07 ) due to no quorum (blocked)
* Start lsb_dummy ( c001n06 ) due to no quorum (blocked)
* Start migrator ( c001n06 ) due to no quorum (blocked)
* Start rsc_c001n03 ( c001n06 ) due to no quorum (blocked)
* Start rsc_c001n02 ( c001n07 ) due to no quorum (blocked)
* Start rsc_c001n04 ( c001n06 ) due to no quorum (blocked)
* Start rsc_c001n05 ( c001n07 ) due to no quorum (blocked)
* Start rsc_c001n06 ( c001n06 ) due to no quorum (blocked)
* Start rsc_c001n07 ( c001n07 ) due to no quorum (blocked)
* Start child_DoFencing:0 ( c001n06 )
* Start child_DoFencing:1 ( c001n07 )
* Start ocf_msdummy:0 ( c001n06 ) due to no quorum (blocked)
* Start ocf_msdummy:1 ( c001n07 ) due to no quorum (blocked)
* Start ocf_msdummy:2 ( c001n06 ) due to no quorum (blocked)
* Start ocf_msdummy:3 ( c001n07 ) due to no quorum (blocked)
Executing Cluster Transition:
* Pseudo action: group-1_stop_0
* Resource action: r192.168.100.181 stop on c001n06
* Pseudo action: DoFencing_start_0
* Pseudo action: group-1_stopped_0
* Pseudo action: group-1_start_0
* Resource action: child_DoFencing:0 start on c001n06
* Resource action: child_DoFencing:1 start on c001n07
* Pseudo action: DoFencing_running_0
* Resource action: child_DoFencing:0 monitor=20000 on c001n06
* Resource action: child_DoFencing:1 monitor=20000 on c001n07
Revised Cluster Status:
* Node List:
* Online: [ c001n06 c001n07 ]
* OFFLINE: [ c001n02 c001n03 c001n04 c001n05 ]
* Full List of Resources:
* DcIPaddr (ocf:heartbeat:IPaddr): Stopped
* Resource Group: group-1:
* r192.168.100.181 (ocf:heartbeat:IPaddr): Stopped
* r192.168.100.182 (ocf:heartbeat:IPaddr): Stopped
* r192.168.100.183 (ocf:heartbeat:IPaddr): Stopped
* lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Stopped
* migrator (ocf:heartbeat:Dummy): Stopped
* rsc_c001n03 (ocf:heartbeat:IPaddr): Stopped
* rsc_c001n02 (ocf:heartbeat:IPaddr): Stopped
* rsc_c001n04 (ocf:heartbeat:IPaddr): Stopped
* rsc_c001n05 (ocf:heartbeat:IPaddr): Stopped
* rsc_c001n06 (ocf:heartbeat:IPaddr): Stopped
* rsc_c001n07 (ocf:heartbeat:IPaddr): Stopped
* Clone Set: DoFencing [child_DoFencing]:
* Started: [ c001n06 c001n07 ]
* Stopped: [ c001n02 c001n03 c001n04 c001n05 ]
- * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique):
+ * Clone Set: master_rsc_1 [ocf_msdummy] (promotable, unique):
* ocf_msdummy:0 (ocf:heartbeat:Stateful): Stopped
* ocf_msdummy:1 (ocf:heartbeat:Stateful): Stopped
* ocf_msdummy:2 (ocf:heartbeat:Stateful): Stopped
* ocf_msdummy:3 (ocf:heartbeat:Stateful): Stopped
* ocf_msdummy:4 (ocf:heartbeat:Stateful): Stopped
* ocf_msdummy:5 (ocf:heartbeat:Stateful): Stopped
* ocf_msdummy:6 (ocf:heartbeat:Stateful): Stopped
* ocf_msdummy:7 (ocf:heartbeat:Stateful): Stopped
* ocf_msdummy:8 (ocf:heartbeat:Stateful): Stopped
* ocf_msdummy:9 (ocf:heartbeat:Stateful): Stopped
* ocf_msdummy:10 (ocf:heartbeat:Stateful): Stopped
* ocf_msdummy:11 (ocf:heartbeat:Stateful): Stopped
diff --git a/cts/scheduler/summary/inc11.summary b/cts/scheduler/summary/inc11.summary
index 51e838c374..256a10e8f7 100644
--- a/cts/scheduler/summary/inc11.summary
+++ b/cts/scheduler/summary/inc11.summary
@@ -1,43 +1,43 @@
Current cluster status:
* Node List:
* Online: [ node0 node1 node2 ]
* Full List of Resources:
* simple-rsc (ocf:heartbeat:apache): Stopped
- * Clone Set: rsc1 [child_rsc1] (promotable) (unique):
+ * Clone Set: rsc1 [child_rsc1] (promotable, unique):
* child_rsc1:0 (ocf:heartbeat:apache): Stopped
* child_rsc1:1 (ocf:heartbeat:apache): Stopped
Transition Summary:
* Start simple-rsc ( node2 )
* Start child_rsc1:0 ( node1 )
* Promote child_rsc1:1 ( Stopped -> Promoted node2 )
Executing Cluster Transition:
* Resource action: simple-rsc monitor on node2
* Resource action: simple-rsc monitor on node1
* Resource action: simple-rsc monitor on node0
* Resource action: child_rsc1:0 monitor on node2
* Resource action: child_rsc1:0 monitor on node1
* Resource action: child_rsc1:0 monitor on node0
* Resource action: child_rsc1:1 monitor on node2
* Resource action: child_rsc1:1 monitor on node1
* Resource action: child_rsc1:1 monitor on node0
* Pseudo action: rsc1_start_0
* Resource action: simple-rsc start on node2
* Resource action: child_rsc1:0 start on node1
* Resource action: child_rsc1:1 start on node2
* Pseudo action: rsc1_running_0
* Pseudo action: rsc1_promote_0
* Resource action: child_rsc1:1 promote on node2
* Pseudo action: rsc1_promoted_0
Revised Cluster Status:
* Node List:
* Online: [ node0 node1 node2 ]
* Full List of Resources:
* simple-rsc (ocf:heartbeat:apache): Started node2
- * Clone Set: rsc1 [child_rsc1] (promotable) (unique):
+ * Clone Set: rsc1 [child_rsc1] (promotable, unique):
* child_rsc1:0 (ocf:heartbeat:apache): Unpromoted node1
* child_rsc1:1 (ocf:heartbeat:apache): Promoted node2
diff --git a/cts/scheduler/summary/inc12.summary b/cts/scheduler/summary/inc12.summary
index 1ada08dda0..2c93e2678c 100644
--- a/cts/scheduler/summary/inc12.summary
+++ b/cts/scheduler/summary/inc12.summary
@@ -1,132 +1,132 @@
Current cluster status:
* Node List:
* Online: [ c001n02 c001n03 c001n04 c001n05 c001n06 c001n07 ]
* Full List of Resources:
* DcIPaddr (ocf:heartbeat:IPaddr): Stopped
* Resource Group: group-1:
* ocf_192.168.100.181 (ocf:heartbeat:IPaddr): Started c001n02
* heartbeat_192.168.100.182 (ocf:heartbeat:IPaddr): Started c001n02
* ocf_192.168.100.183 (ocf:heartbeat:IPaddr): Started c001n02
* lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n04
* rsc_c001n03 (ocf:heartbeat:IPaddr): Started c001n05
* rsc_c001n02 (ocf:heartbeat:IPaddr): Started c001n02
* rsc_c001n04 (ocf:heartbeat:IPaddr): Started c001n04
* rsc_c001n05 (ocf:heartbeat:IPaddr): Started c001n05
* rsc_c001n06 (ocf:heartbeat:IPaddr): Started c001n06
* rsc_c001n07 (ocf:heartbeat:IPaddr): Started c001n07
* Clone Set: DoFencing [child_DoFencing]:
* Started: [ c001n02 c001n04 c001n05 c001n06 c001n07 ]
* Stopped: [ c001n03 ]
- * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique):
+ * Clone Set: master_rsc_1 [ocf_msdummy] (promotable, unique):
* ocf_msdummy:0 (ocf:heartbeat:Stateful): Stopped
* ocf_msdummy:1 (ocf:heartbeat:Stateful): Stopped
* ocf_msdummy:2 (ocf:heartbeat:Stateful): Unpromoted c001n04
* ocf_msdummy:3 (ocf:heartbeat:Stateful): Unpromoted c001n04
* ocf_msdummy:4 (ocf:heartbeat:Stateful): Unpromoted c001n05
* ocf_msdummy:5 (ocf:heartbeat:Stateful): Unpromoted c001n05
* ocf_msdummy:6 (ocf:heartbeat:Stateful): Unpromoted c001n06
* ocf_msdummy:7 (ocf:heartbeat:Stateful): Unpromoted c001n06
* ocf_msdummy:8 (ocf:heartbeat:Stateful): Unpromoted c001n07
* ocf_msdummy:9 (ocf:heartbeat:Stateful): Unpromoted c001n07
* ocf_msdummy:10 (ocf:heartbeat:Stateful): Unpromoted c001n02
* ocf_msdummy:11 (ocf:heartbeat:Stateful): Unpromoted c001n02
Transition Summary:
* Stop ocf_192.168.100.181 ( c001n02 ) due to node availability
* Stop heartbeat_192.168.100.182 ( c001n02 ) due to node availability
* Stop ocf_192.168.100.183 ( c001n02 ) due to node availability
* Stop lsb_dummy ( c001n04 ) due to node availability
* Stop rsc_c001n03 ( c001n05 ) due to node availability
* Stop rsc_c001n02 ( c001n02 ) due to node availability
* Stop rsc_c001n04 ( c001n04 ) due to node availability
* Stop rsc_c001n05 ( c001n05 ) due to node availability
* Stop rsc_c001n06 ( c001n06 ) due to node availability
* Stop rsc_c001n07 ( c001n07 ) due to node availability
* Stop child_DoFencing:0 ( c001n02 ) due to node availability
* Stop child_DoFencing:1 ( c001n04 ) due to node availability
* Stop child_DoFencing:2 ( c001n05 ) due to node availability
* Stop child_DoFencing:3 ( c001n06 ) due to node availability
* Stop child_DoFencing:4 ( c001n07 ) due to node availability
* Stop ocf_msdummy:2 ( Unpromoted c001n04 ) due to node availability
* Stop ocf_msdummy:3 ( Unpromoted c001n04 ) due to node availability
* Stop ocf_msdummy:4 ( Unpromoted c001n05 ) due to node availability
* Stop ocf_msdummy:5 ( Unpromoted c001n05 ) due to node availability
* Stop ocf_msdummy:6 ( Unpromoted c001n06 ) due to node availability
* Stop ocf_msdummy:7 ( Unpromoted c001n06 ) due to node availability
* Stop ocf_msdummy:8 ( Unpromoted c001n07 ) due to node availability
* Stop ocf_msdummy:9 ( Unpromoted c001n07 ) due to node availability
* Stop ocf_msdummy:10 ( Unpromoted c001n02 ) due to node availability
* Stop ocf_msdummy:11 ( Unpromoted c001n02 ) due to node availability
Executing Cluster Transition:
* Pseudo action: group-1_stop_0
* Resource action: ocf_192.168.100.183 stop on c001n02
* Resource action: lsb_dummy stop on c001n04
* Resource action: rsc_c001n03 stop on c001n05
* Resource action: rsc_c001n02 stop on c001n02
* Resource action: rsc_c001n04 stop on c001n04
* Resource action: rsc_c001n05 stop on c001n05
* Resource action: rsc_c001n06 stop on c001n06
* Resource action: rsc_c001n07 stop on c001n07
* Pseudo action: DoFencing_stop_0
* Pseudo action: master_rsc_1_stop_0
* Resource action: heartbeat_192.168.100.182 stop on c001n02
* Resource action: child_DoFencing:1 stop on c001n02
* Resource action: child_DoFencing:2 stop on c001n04
* Resource action: child_DoFencing:3 stop on c001n05
* Resource action: child_DoFencing:4 stop on c001n06
* Resource action: child_DoFencing:5 stop on c001n07
* Pseudo action: DoFencing_stopped_0
* Resource action: ocf_msdummy:2 stop on c001n04
* Resource action: ocf_msdummy:3 stop on c001n04
* Resource action: ocf_msdummy:4 stop on c001n05
* Resource action: ocf_msdummy:5 stop on c001n05
* Resource action: ocf_msdummy:6 stop on c001n06
* Resource action: ocf_msdummy:7 stop on c001n06
* Resource action: ocf_msdummy:8 stop on c001n07
* Resource action: ocf_msdummy:9 stop on c001n07
* Resource action: ocf_msdummy:10 stop on c001n02
* Resource action: ocf_msdummy:11 stop on c001n02
* Pseudo action: master_rsc_1_stopped_0
* Cluster action: do_shutdown on c001n07
* Cluster action: do_shutdown on c001n06
* Cluster action: do_shutdown on c001n05
* Cluster action: do_shutdown on c001n04
* Resource action: ocf_192.168.100.181 stop on c001n02
* Cluster action: do_shutdown on c001n02
* Pseudo action: group-1_stopped_0
* Cluster action: do_shutdown on c001n03
Revised Cluster Status:
* Node List:
* Online: [ c001n02 c001n03 c001n04 c001n05 c001n06 c001n07 ]
* Full List of Resources:
* DcIPaddr (ocf:heartbeat:IPaddr): Stopped
* Resource Group: group-1:
* ocf_192.168.100.181 (ocf:heartbeat:IPaddr): Stopped
* heartbeat_192.168.100.182 (ocf:heartbeat:IPaddr): Stopped
* ocf_192.168.100.183 (ocf:heartbeat:IPaddr): Stopped
* lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Stopped
* rsc_c001n03 (ocf:heartbeat:IPaddr): Stopped
* rsc_c001n02 (ocf:heartbeat:IPaddr): Stopped
* rsc_c001n04 (ocf:heartbeat:IPaddr): Stopped
* rsc_c001n05 (ocf:heartbeat:IPaddr): Stopped
* rsc_c001n06 (ocf:heartbeat:IPaddr): Stopped
* rsc_c001n07 (ocf:heartbeat:IPaddr): Stopped
* Clone Set: DoFencing [child_DoFencing]:
* Stopped: [ c001n02 c001n03 c001n04 c001n05 c001n06 c001n07 ]
- * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique):
+ * Clone Set: master_rsc_1 [ocf_msdummy] (promotable, unique):
* ocf_msdummy:0 (ocf:heartbeat:Stateful): Stopped
* ocf_msdummy:1 (ocf:heartbeat:Stateful): Stopped
* ocf_msdummy:2 (ocf:heartbeat:Stateful): Stopped
* ocf_msdummy:3 (ocf:heartbeat:Stateful): Stopped
* ocf_msdummy:4 (ocf:heartbeat:Stateful): Stopped
* ocf_msdummy:5 (ocf:heartbeat:Stateful): Stopped
* ocf_msdummy:6 (ocf:heartbeat:Stateful): Stopped
* ocf_msdummy:7 (ocf:heartbeat:Stateful): Stopped
* ocf_msdummy:8 (ocf:heartbeat:Stateful): Stopped
* ocf_msdummy:9 (ocf:heartbeat:Stateful): Stopped
* ocf_msdummy:10 (ocf:heartbeat:Stateful): Stopped
* ocf_msdummy:11 (ocf:heartbeat:Stateful): Stopped
diff --git a/cts/scheduler/summary/managed-1.summary b/cts/scheduler/summary/managed-1.summary
index 1c417a4fb5..9c25080237 100644
--- a/cts/scheduler/summary/managed-1.summary
+++ b/cts/scheduler/summary/managed-1.summary
@@ -1,132 +1,132 @@
Current cluster status:
* Node List:
* Online: [ c001n02 c001n03 c001n04 c001n05 c001n06 c001n07 c001n08 c001n09 ]
* Full List of Resources:
* DcIPaddr (ocf:heartbeat:IPaddr): Started c001n09
* rsc_c001n09 (ocf:heartbeat:IPaddr): Started c001n09
* rsc_c001n02 (ocf:heartbeat:IPaddr): Started c001n02
* rsc_c001n03 (ocf:heartbeat:IPaddr): Started c001n03
* rsc_c001n04 (ocf:heartbeat:IPaddr): Started c001n04
* rsc_c001n05 (ocf:heartbeat:IPaddr): Started c001n05
* rsc_c001n06 (ocf:heartbeat:IPaddr): Started c001n06
* rsc_c001n07 (ocf:heartbeat:IPaddr): Started c001n07
* rsc_c001n08 (ocf:heartbeat:IPaddr): Started c001n08
- * Clone Set: DoFencing [child_DoFencing] (unique) (unmanaged):
+ * Clone Set: DoFencing [child_DoFencing] (unique, unmanaged):
* child_DoFencing:0 (stonith:ssh): Started c001n02 (unmanaged)
* child_DoFencing:1 (stonith:ssh): Started c001n03 (unmanaged)
* child_DoFencing:2 (stonith:ssh): Started c001n04 (unmanaged)
* child_DoFencing:3 (stonith:ssh): Started c001n05 (unmanaged)
* child_DoFencing:4 (stonith:ssh): Started c001n06 (unmanaged)
* child_DoFencing:5 (stonith:ssh): Started c001n07 (unmanaged)
* child_DoFencing:6 (stonith:ssh): Started c001n08 (unmanaged)
* child_DoFencing:7 (stonith:ssh): Started c001n09 (unmanaged)
Transition Summary:
Executing Cluster Transition:
* Resource action: DcIPaddr monitor on c001n08
* Resource action: DcIPaddr monitor on c001n07
* Resource action: DcIPaddr monitor on c001n06
* Resource action: DcIPaddr monitor on c001n05
* Resource action: DcIPaddr monitor on c001n04
* Resource action: DcIPaddr monitor on c001n03
* Resource action: DcIPaddr monitor on c001n02
* Resource action: rsc_c001n09 monitor on c001n08
* Resource action: rsc_c001n09 monitor on c001n07
* Resource action: rsc_c001n09 monitor on c001n05
* Resource action: rsc_c001n09 monitor on c001n04
* Resource action: rsc_c001n09 monitor on c001n03
* Resource action: rsc_c001n09 monitor on c001n02
* Resource action: rsc_c001n02 monitor on c001n09
* Resource action: rsc_c001n02 monitor on c001n08
* Resource action: rsc_c001n02 monitor on c001n07
* Resource action: rsc_c001n02 monitor on c001n05
* Resource action: rsc_c001n02 monitor on c001n04
* Resource action: rsc_c001n03 monitor on c001n09
* Resource action: rsc_c001n03 monitor on c001n08
* Resource action: rsc_c001n03 monitor on c001n07
* Resource action: rsc_c001n03 monitor on c001n05
* Resource action: rsc_c001n03 monitor on c001n04
* Resource action: rsc_c001n03 monitor on c001n02
* Resource action: rsc_c001n04 monitor on c001n09
* Resource action: rsc_c001n04 monitor on c001n08
* Resource action: rsc_c001n04 monitor on c001n07
* Resource action: rsc_c001n04 monitor on c001n05
* Resource action: rsc_c001n04 monitor on c001n03
* Resource action: rsc_c001n04 monitor on c001n02
* Resource action: rsc_c001n05 monitor on c001n09
* Resource action: rsc_c001n05 monitor on c001n08
* Resource action: rsc_c001n05 monitor on c001n07
* Resource action: rsc_c001n05 monitor on c001n06
* Resource action: rsc_c001n05 monitor on c001n04
* Resource action: rsc_c001n05 monitor on c001n03
* Resource action: rsc_c001n05 monitor on c001n02
* Resource action: rsc_c001n06 monitor on c001n09
* Resource action: rsc_c001n06 monitor on c001n08
* Resource action: rsc_c001n06 monitor on c001n07
* Resource action: rsc_c001n06 monitor on c001n05
* Resource action: rsc_c001n06 monitor on c001n04
* Resource action: rsc_c001n06 monitor on c001n03
* Resource action: rsc_c001n07 monitor on c001n09
* Resource action: rsc_c001n07 monitor on c001n08
* Resource action: rsc_c001n07 monitor on c001n06
* Resource action: rsc_c001n07 monitor on c001n05
* Resource action: rsc_c001n07 monitor on c001n04
* Resource action: rsc_c001n08 monitor on c001n09
* Resource action: rsc_c001n08 monitor on c001n07
* Resource action: rsc_c001n08 monitor on c001n05
* Resource action: child_DoFencing:0 monitor on c001n09
* Resource action: child_DoFencing:0 monitor on c001n08
* Resource action: child_DoFencing:0 monitor on c001n07
* Resource action: child_DoFencing:1 monitor on c001n08
* Resource action: child_DoFencing:1 monitor on c001n07
* Resource action: child_DoFencing:1 monitor on c001n02
* Resource action: child_DoFencing:2 monitor on c001n09
* Resource action: child_DoFencing:2 monitor on c001n08
* Resource action: child_DoFencing:2 monitor on c001n07
* Resource action: child_DoFencing:2 monitor on c001n03
* Resource action: child_DoFencing:3 monitor on c001n08
* Resource action: child_DoFencing:3 monitor on c001n04
* Resource action: child_DoFencing:3 monitor on c001n02
* Resource action: child_DoFencing:4 monitor on c001n09
* Resource action: child_DoFencing:4 monitor on c001n05
* Resource action: child_DoFencing:4 monitor on c001n03
* Resource action: child_DoFencing:5 monitor on c001n08
* Resource action: child_DoFencing:5 monitor on c001n05
* Resource action: child_DoFencing:5 monitor on c001n04
* Resource action: child_DoFencing:5 monitor on c001n02
* Resource action: child_DoFencing:6 monitor on c001n09
* Resource action: child_DoFencing:6 monitor on c001n07
* Resource action: child_DoFencing:6 monitor on c001n05
* Resource action: child_DoFencing:6 monitor on c001n04
* Resource action: child_DoFencing:7 monitor on c001n08
* Resource action: child_DoFencing:7 monitor on c001n07
* Resource action: child_DoFencing:7 monitor on c001n05
* Resource action: child_DoFencing:7 monitor on c001n04
* Resource action: child_DoFencing:7 monitor on c001n03
* Resource action: child_DoFencing:7 monitor on c001n02
Revised Cluster Status:
* Node List:
* Online: [ c001n02 c001n03 c001n04 c001n05 c001n06 c001n07 c001n08 c001n09 ]
* Full List of Resources:
* DcIPaddr (ocf:heartbeat:IPaddr): Started c001n09
* rsc_c001n09 (ocf:heartbeat:IPaddr): Started c001n09
* rsc_c001n02 (ocf:heartbeat:IPaddr): Started c001n02
* rsc_c001n03 (ocf:heartbeat:IPaddr): Started c001n03
* rsc_c001n04 (ocf:heartbeat:IPaddr): Started c001n04
* rsc_c001n05 (ocf:heartbeat:IPaddr): Started c001n05
* rsc_c001n06 (ocf:heartbeat:IPaddr): Started c001n06
* rsc_c001n07 (ocf:heartbeat:IPaddr): Started c001n07
* rsc_c001n08 (ocf:heartbeat:IPaddr): Started c001n08
- * Clone Set: DoFencing [child_DoFencing] (unique) (unmanaged):
+ * Clone Set: DoFencing [child_DoFencing] (unique, unmanaged):
* child_DoFencing:0 (stonith:ssh): Started c001n02 (unmanaged)
* child_DoFencing:1 (stonith:ssh): Started c001n03 (unmanaged)
* child_DoFencing:2 (stonith:ssh): Started c001n04 (unmanaged)
* child_DoFencing:3 (stonith:ssh): Started c001n05 (unmanaged)
* child_DoFencing:4 (stonith:ssh): Started c001n06 (unmanaged)
* child_DoFencing:5 (stonith:ssh): Started c001n07 (unmanaged)
* child_DoFencing:6 (stonith:ssh): Started c001n08 (unmanaged)
* child_DoFencing:7 (stonith:ssh): Started c001n09 (unmanaged)
diff --git a/cts/scheduler/summary/managed-2.summary b/cts/scheduler/summary/managed-2.summary
index a1d327c3da..dd0a1870b8 100644
--- a/cts/scheduler/summary/managed-2.summary
+++ b/cts/scheduler/summary/managed-2.summary
@@ -1,166 +1,166 @@
Current cluster status:
* Node List:
* Online: [ c001n02 c001n03 c001n04 c001n05 c001n06 c001n07 c001n08 c001n09 ]
* Full List of Resources:
* DcIPaddr (ocf:heartbeat:IPaddr): Started c001n09
* rsc_c001n09 (ocf:heartbeat:IPaddr): Started c001n09
* rsc_c001n02 (ocf:heartbeat:IPaddr): Started c001n02
* rsc_c001n03 (ocf:heartbeat:IPaddr): Started c001n03
* rsc_c001n04 (ocf:heartbeat:IPaddr): Started c001n04
* rsc_c001n05 (ocf:heartbeat:IPaddr): Started c001n05
* rsc_c001n06 (ocf:heartbeat:IPaddr): Started c001n06
* rsc_c001n07 (ocf:heartbeat:IPaddr): Started c001n07
* rsc_c001n08 (ocf:heartbeat:IPaddr): Started c001n08
- * Clone Set: DoFencing [child_DoFencing] (unique) (unmanaged):
+ * Clone Set: DoFencing [child_DoFencing] (unique, unmanaged):
* child_DoFencing:0 (stonith:ssh): Stopped (unmanaged)
* child_DoFencing:1 (stonith:ssh): Stopped (unmanaged)
* child_DoFencing:2 (stonith:ssh): Stopped (unmanaged)
* child_DoFencing:3 (stonith:ssh): Stopped (unmanaged)
* child_DoFencing:4 (stonith:ssh): Stopped (unmanaged)
* child_DoFencing:5 (stonith:ssh): Stopped (unmanaged)
* child_DoFencing:6 (stonith:ssh): Stopped (unmanaged)
* child_DoFencing:7 (stonith:ssh): Stopped (unmanaged)
Transition Summary:
Executing Cluster Transition:
* Resource action: DcIPaddr monitor on c001n08
* Resource action: DcIPaddr monitor on c001n07
* Resource action: DcIPaddr monitor on c001n06
* Resource action: DcIPaddr monitor on c001n05
* Resource action: DcIPaddr monitor on c001n04
* Resource action: DcIPaddr monitor on c001n03
* Resource action: DcIPaddr monitor on c001n02
* Resource action: rsc_c001n09 monitor on c001n08
* Resource action: rsc_c001n09 monitor on c001n07
* Resource action: rsc_c001n09 monitor on c001n05
* Resource action: rsc_c001n09 monitor on c001n04
* Resource action: rsc_c001n09 monitor on c001n03
* Resource action: rsc_c001n09 monitor on c001n02
* Resource action: rsc_c001n02 monitor on c001n09
* Resource action: rsc_c001n02 monitor on c001n08
* Resource action: rsc_c001n02 monitor on c001n07
* Resource action: rsc_c001n02 monitor on c001n05
* Resource action: rsc_c001n02 monitor on c001n04
* Resource action: rsc_c001n03 monitor on c001n09
* Resource action: rsc_c001n03 monitor on c001n08
* Resource action: rsc_c001n03 monitor on c001n07
* Resource action: rsc_c001n03 monitor on c001n05
* Resource action: rsc_c001n03 monitor on c001n04
* Resource action: rsc_c001n03 monitor on c001n02
* Resource action: rsc_c001n04 monitor on c001n09
* Resource action: rsc_c001n04 monitor on c001n08
* Resource action: rsc_c001n04 monitor on c001n07
* Resource action: rsc_c001n04 monitor on c001n05
* Resource action: rsc_c001n04 monitor on c001n03
* Resource action: rsc_c001n04 monitor on c001n02
* Resource action: rsc_c001n05 monitor on c001n09
* Resource action: rsc_c001n05 monitor on c001n08
* Resource action: rsc_c001n05 monitor on c001n07
* Resource action: rsc_c001n05 monitor on c001n06
* Resource action: rsc_c001n05 monitor on c001n04
* Resource action: rsc_c001n05 monitor on c001n03
* Resource action: rsc_c001n05 monitor on c001n02
* Resource action: rsc_c001n06 monitor on c001n09
* Resource action: rsc_c001n06 monitor on c001n08
* Resource action: rsc_c001n06 monitor on c001n07
* Resource action: rsc_c001n06 monitor on c001n05
* Resource action: rsc_c001n06 monitor on c001n04
* Resource action: rsc_c001n06 monitor on c001n03
* Resource action: rsc_c001n07 monitor on c001n09
* Resource action: rsc_c001n07 monitor on c001n08
* Resource action: rsc_c001n07 monitor on c001n06
* Resource action: rsc_c001n07 monitor on c001n05
* Resource action: rsc_c001n07 monitor on c001n04
* Resource action: rsc_c001n08 monitor on c001n09
* Resource action: rsc_c001n08 monitor on c001n07
* Resource action: rsc_c001n08 monitor on c001n05
* Resource action: child_DoFencing:0 monitor on c001n09
* Resource action: child_DoFencing:0 monitor on c001n08
* Resource action: child_DoFencing:0 monitor on c001n07
* Resource action: child_DoFencing:0 monitor on c001n06
* Resource action: child_DoFencing:0 monitor on c001n05
* Resource action: child_DoFencing:0 monitor on c001n04
* Resource action: child_DoFencing:0 monitor on c001n03
* Resource action: child_DoFencing:0 monitor on c001n02
* Resource action: child_DoFencing:1 monitor on c001n09
* Resource action: child_DoFencing:1 monitor on c001n08
* Resource action: child_DoFencing:1 monitor on c001n07
* Resource action: child_DoFencing:1 monitor on c001n06
* Resource action: child_DoFencing:1 monitor on c001n05
* Resource action: child_DoFencing:1 monitor on c001n04
* Resource action: child_DoFencing:1 monitor on c001n03
* Resource action: child_DoFencing:1 monitor on c001n02
* Resource action: child_DoFencing:2 monitor on c001n09
* Resource action: child_DoFencing:2 monitor on c001n08
* Resource action: child_DoFencing:2 monitor on c001n07
* Resource action: child_DoFencing:2 monitor on c001n06
* Resource action: child_DoFencing:2 monitor on c001n05
* Resource action: child_DoFencing:2 monitor on c001n04
* Resource action: child_DoFencing:2 monitor on c001n03
* Resource action: child_DoFencing:2 monitor on c001n02
* Resource action: child_DoFencing:3 monitor on c001n09
* Resource action: child_DoFencing:3 monitor on c001n08
* Resource action: child_DoFencing:3 monitor on c001n07
* Resource action: child_DoFencing:3 monitor on c001n06
* Resource action: child_DoFencing:3 monitor on c001n05
* Resource action: child_DoFencing:3 monitor on c001n04
* Resource action: child_DoFencing:3 monitor on c001n03
* Resource action: child_DoFencing:3 monitor on c001n02
* Resource action: child_DoFencing:4 monitor on c001n09
* Resource action: child_DoFencing:4 monitor on c001n08
* Resource action: child_DoFencing:4 monitor on c001n07
* Resource action: child_DoFencing:4 monitor on c001n06
* Resource action: child_DoFencing:4 monitor on c001n05
* Resource action: child_DoFencing:4 monitor on c001n04
* Resource action: child_DoFencing:4 monitor on c001n03
* Resource action: child_DoFencing:4 monitor on c001n02
* Resource action: child_DoFencing:5 monitor on c001n09
* Resource action: child_DoFencing:5 monitor on c001n08
* Resource action: child_DoFencing:5 monitor on c001n07
* Resource action: child_DoFencing:5 monitor on c001n06
* Resource action: child_DoFencing:5 monitor on c001n05
* Resource action: child_DoFencing:5 monitor on c001n04
* Resource action: child_DoFencing:5 monitor on c001n03
* Resource action: child_DoFencing:5 monitor on c001n02
* Resource action: child_DoFencing:6 monitor on c001n09
* Resource action: child_DoFencing:6 monitor on c001n08
* Resource action: child_DoFencing:6 monitor on c001n07
* Resource action: child_DoFencing:6 monitor on c001n06
* Resource action: child_DoFencing:6 monitor on c001n05
* Resource action: child_DoFencing:6 monitor on c001n04
* Resource action: child_DoFencing:6 monitor on c001n03
* Resource action: child_DoFencing:6 monitor on c001n02
* Resource action: child_DoFencing:7 monitor on c001n09
* Resource action: child_DoFencing:7 monitor on c001n08
* Resource action: child_DoFencing:7 monitor on c001n07
* Resource action: child_DoFencing:7 monitor on c001n06
* Resource action: child_DoFencing:7 monitor on c001n05
* Resource action: child_DoFencing:7 monitor on c001n04
* Resource action: child_DoFencing:7 monitor on c001n03
* Resource action: child_DoFencing:7 monitor on c001n02
Revised Cluster Status:
* Node List:
* Online: [ c001n02 c001n03 c001n04 c001n05 c001n06 c001n07 c001n08 c001n09 ]
* Full List of Resources:
* DcIPaddr (ocf:heartbeat:IPaddr): Started c001n09
* rsc_c001n09 (ocf:heartbeat:IPaddr): Started c001n09
* rsc_c001n02 (ocf:heartbeat:IPaddr): Started c001n02
* rsc_c001n03 (ocf:heartbeat:IPaddr): Started c001n03
* rsc_c001n04 (ocf:heartbeat:IPaddr): Started c001n04
* rsc_c001n05 (ocf:heartbeat:IPaddr): Started c001n05
* rsc_c001n06 (ocf:heartbeat:IPaddr): Started c001n06
* rsc_c001n07 (ocf:heartbeat:IPaddr): Started c001n07
* rsc_c001n08 (ocf:heartbeat:IPaddr): Started c001n08
- * Clone Set: DoFencing [child_DoFencing] (unique) (unmanaged):
+ * Clone Set: DoFencing [child_DoFencing] (unique, unmanaged):
* child_DoFencing:0 (stonith:ssh): Stopped (unmanaged)
* child_DoFencing:1 (stonith:ssh): Stopped (unmanaged)
* child_DoFencing:2 (stonith:ssh): Stopped (unmanaged)
* child_DoFencing:3 (stonith:ssh): Stopped (unmanaged)
* child_DoFencing:4 (stonith:ssh): Stopped (unmanaged)
* child_DoFencing:5 (stonith:ssh): Stopped (unmanaged)
* child_DoFencing:6 (stonith:ssh): Stopped (unmanaged)
* child_DoFencing:7 (stonith:ssh): Stopped (unmanaged)
diff --git a/cts/scheduler/summary/promoted-0.summary b/cts/scheduler/summary/promoted-0.summary
index b80bb106cf..3e724ffdc4 100644
--- a/cts/scheduler/summary/promoted-0.summary
+++ b/cts/scheduler/summary/promoted-0.summary
@@ -1,47 +1,47 @@
Current cluster status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
- * Clone Set: rsc1 [child_rsc1] (promotable) (unique):
+ * Clone Set: rsc1 [child_rsc1] (promotable, unique):
* child_rsc1:0 (ocf:heartbeat:apache): Stopped
* child_rsc1:1 (ocf:heartbeat:apache): Stopped
* child_rsc1:2 (ocf:heartbeat:apache): Stopped
* child_rsc1:3 (ocf:heartbeat:apache): Stopped
* child_rsc1:4 (ocf:heartbeat:apache): Stopped
Transition Summary:
* Start child_rsc1:0 ( node1 )
* Start child_rsc1:1 ( node2 )
* Start child_rsc1:2 ( node1 )
* Start child_rsc1:3 ( node2 )
Executing Cluster Transition:
* Resource action: child_rsc1:0 monitor on node2
* Resource action: child_rsc1:0 monitor on node1
* Resource action: child_rsc1:1 monitor on node2
* Resource action: child_rsc1:1 monitor on node1
* Resource action: child_rsc1:2 monitor on node2
* Resource action: child_rsc1:2 monitor on node1
* Resource action: child_rsc1:3 monitor on node2
* Resource action: child_rsc1:3 monitor on node1
* Resource action: child_rsc1:4 monitor on node2
* Resource action: child_rsc1:4 monitor on node1
* Pseudo action: rsc1_start_0
* Resource action: child_rsc1:0 start on node1
* Resource action: child_rsc1:1 start on node2
* Resource action: child_rsc1:2 start on node1
* Resource action: child_rsc1:3 start on node2
* Pseudo action: rsc1_running_0
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
- * Clone Set: rsc1 [child_rsc1] (promotable) (unique):
+ * Clone Set: rsc1 [child_rsc1] (promotable, unique):
* child_rsc1:0 (ocf:heartbeat:apache): Unpromoted node1
* child_rsc1:1 (ocf:heartbeat:apache): Unpromoted node2
* child_rsc1:2 (ocf:heartbeat:apache): Unpromoted node1
* child_rsc1:3 (ocf:heartbeat:apache): Unpromoted node2
* child_rsc1:4 (ocf:heartbeat:apache): Stopped
diff --git a/cts/scheduler/summary/promoted-1.summary b/cts/scheduler/summary/promoted-1.summary
index 161f51834a..08100f3e36 100644
--- a/cts/scheduler/summary/promoted-1.summary
+++ b/cts/scheduler/summary/promoted-1.summary
@@ -1,50 +1,50 @@
Current cluster status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
- * Clone Set: rsc1 [child_rsc1] (promotable) (unique):
+ * Clone Set: rsc1 [child_rsc1] (promotable, unique):
* child_rsc1:0 (ocf:heartbeat:apache): Stopped
* child_rsc1:1 (ocf:heartbeat:apache): Stopped
* child_rsc1:2 (ocf:heartbeat:apache): Stopped
* child_rsc1:3 (ocf:heartbeat:apache): Stopped
* child_rsc1:4 (ocf:heartbeat:apache): Stopped
Transition Summary:
* Start child_rsc1:0 ( node1 )
* Promote child_rsc1:1 ( Stopped -> Promoted node2 )
* Start child_rsc1:2 ( node1 )
* Start child_rsc1:3 ( node2 )
Executing Cluster Transition:
* Resource action: child_rsc1:0 monitor on node2
* Resource action: child_rsc1:0 monitor on node1
* Resource action: child_rsc1:1 monitor on node2
* Resource action: child_rsc1:1 monitor on node1
* Resource action: child_rsc1:2 monitor on node2
* Resource action: child_rsc1:2 monitor on node1
* Resource action: child_rsc1:3 monitor on node2
* Resource action: child_rsc1:3 monitor on node1
* Resource action: child_rsc1:4 monitor on node2
* Resource action: child_rsc1:4 monitor on node1
* Pseudo action: rsc1_start_0
* Resource action: child_rsc1:0 start on node1
* Resource action: child_rsc1:1 start on node2
* Resource action: child_rsc1:2 start on node1
* Resource action: child_rsc1:3 start on node2
* Pseudo action: rsc1_running_0
* Pseudo action: rsc1_promote_0
* Resource action: child_rsc1:1 promote on node2
* Pseudo action: rsc1_promoted_0
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
- * Clone Set: rsc1 [child_rsc1] (promotable) (unique):
+ * Clone Set: rsc1 [child_rsc1] (promotable, unique):
* child_rsc1:0 (ocf:heartbeat:apache): Unpromoted node1
* child_rsc1:1 (ocf:heartbeat:apache): Promoted node2
* child_rsc1:2 (ocf:heartbeat:apache): Unpromoted node1
* child_rsc1:3 (ocf:heartbeat:apache): Unpromoted node2
* child_rsc1:4 (ocf:heartbeat:apache): Stopped
diff --git a/cts/scheduler/summary/promoted-10.summary b/cts/scheduler/summary/promoted-10.summary
index 54dbcd7e69..c35c61c793 100644
--- a/cts/scheduler/summary/promoted-10.summary
+++ b/cts/scheduler/summary/promoted-10.summary
@@ -1,75 +1,75 @@
Current cluster status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
- * Clone Set: rsc1 [child_rsc1] (promotable) (unique):
+ * Clone Set: rsc1 [child_rsc1] (promotable, unique):
* child_rsc1:0 (ocf:heartbeat:apache): Stopped
* child_rsc1:1 (ocf:heartbeat:apache): Stopped
* child_rsc1:2 (ocf:heartbeat:apache): Stopped
* child_rsc1:3 (ocf:heartbeat:apache): Stopped
* child_rsc1:4 (ocf:heartbeat:apache): Stopped
Transition Summary:
* Promote child_rsc1:0 ( Stopped -> Promoted node1 )
* Start child_rsc1:1 ( node2 )
* Start child_rsc1:2 ( node1 )
* Promote child_rsc1:3 ( Stopped -> Promoted node2 )
Executing Cluster Transition:
* Resource action: child_rsc1:0 monitor on node2
* Resource action: child_rsc1:0 monitor on node1
* Resource action: child_rsc1:1 monitor on node2
* Resource action: child_rsc1:1 monitor on node1
* Resource action: child_rsc1:2 monitor on node2
* Resource action: child_rsc1:2 monitor on node1
* Resource action: child_rsc1:3 monitor on node2
* Resource action: child_rsc1:3 monitor on node1
* Resource action: child_rsc1:4 monitor on node2
* Resource action: child_rsc1:4 monitor on node1
* Pseudo action: rsc1_pre_notify_start_0
* Pseudo action: rsc1_confirmed-pre_notify_start_0
* Pseudo action: rsc1_start_0
* Resource action: child_rsc1:0 start on node1
* Resource action: child_rsc1:1 start on node2
* Resource action: child_rsc1:2 start on node1
* Resource action: child_rsc1:3 start on node2
* Pseudo action: rsc1_running_0
* Pseudo action: rsc1_post_notify_running_0
* Resource action: child_rsc1:0 notify on node1
* Resource action: child_rsc1:1 notify on node2
* Resource action: child_rsc1:2 notify on node1
* Resource action: child_rsc1:3 notify on node2
* Pseudo action: rsc1_confirmed-post_notify_running_0
* Pseudo action: rsc1_pre_notify_promote_0
* Resource action: child_rsc1:0 notify on node1
* Resource action: child_rsc1:1 notify on node2
* Resource action: child_rsc1:2 notify on node1
* Resource action: child_rsc1:3 notify on node2
* Pseudo action: rsc1_confirmed-pre_notify_promote_0
* Pseudo action: rsc1_promote_0
* Resource action: child_rsc1:0 promote on node1
* Resource action: child_rsc1:3 promote on node2
* Pseudo action: rsc1_promoted_0
* Pseudo action: rsc1_post_notify_promoted_0
* Resource action: child_rsc1:0 notify on node1
* Resource action: child_rsc1:1 notify on node2
* Resource action: child_rsc1:2 notify on node1
* Resource action: child_rsc1:3 notify on node2
* Pseudo action: rsc1_confirmed-post_notify_promoted_0
* Resource action: child_rsc1:0 monitor=11000 on node1
* Resource action: child_rsc1:1 monitor=1000 on node2
* Resource action: child_rsc1:2 monitor=1000 on node1
* Resource action: child_rsc1:3 monitor=11000 on node2
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
- * Clone Set: rsc1 [child_rsc1] (promotable) (unique):
+ * Clone Set: rsc1 [child_rsc1] (promotable, unique):
* child_rsc1:0 (ocf:heartbeat:apache): Promoted node1
* child_rsc1:1 (ocf:heartbeat:apache): Unpromoted node2
* child_rsc1:2 (ocf:heartbeat:apache): Unpromoted node1
* child_rsc1:3 (ocf:heartbeat:apache): Promoted node2
* child_rsc1:4 (ocf:heartbeat:apache): Stopped
diff --git a/cts/scheduler/summary/promoted-11.summary b/cts/scheduler/summary/promoted-11.summary
index 89fb85776d..47732fb9da 100644
--- a/cts/scheduler/summary/promoted-11.summary
+++ b/cts/scheduler/summary/promoted-11.summary
@@ -1,40 +1,40 @@
Current cluster status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* simple-rsc (ocf:heartbeat:apache): Stopped
- * Clone Set: rsc1 [child_rsc1] (promotable) (unique):
+ * Clone Set: rsc1 [child_rsc1] (promotable, unique):
* child_rsc1:0 (ocf:heartbeat:apache): Stopped
* child_rsc1:1 (ocf:heartbeat:apache): Stopped
Transition Summary:
* Start simple-rsc ( node2 )
* Start child_rsc1:0 ( node1 )
* Promote child_rsc1:1 ( Stopped -> Promoted node2 )
Executing Cluster Transition:
* Resource action: simple-rsc monitor on node2
* Resource action: simple-rsc monitor on node1
* Resource action: child_rsc1:0 monitor on node2
* Resource action: child_rsc1:0 monitor on node1
* Resource action: child_rsc1:1 monitor on node2
* Resource action: child_rsc1:1 monitor on node1
* Pseudo action: rsc1_start_0
* Resource action: simple-rsc start on node2
* Resource action: child_rsc1:0 start on node1
* Resource action: child_rsc1:1 start on node2
* Pseudo action: rsc1_running_0
* Pseudo action: rsc1_promote_0
* Resource action: child_rsc1:1 promote on node2
* Pseudo action: rsc1_promoted_0
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* simple-rsc (ocf:heartbeat:apache): Started node2
- * Clone Set: rsc1 [child_rsc1] (promotable) (unique):
+ * Clone Set: rsc1 [child_rsc1] (promotable, unique):
* child_rsc1:0 (ocf:heartbeat:apache): Unpromoted node1
* child_rsc1:1 (ocf:heartbeat:apache): Promoted node2
diff --git a/cts/scheduler/summary/promoted-12.summary b/cts/scheduler/summary/promoted-12.summary
index 878d366883..9125a9aa28 100644
--- a/cts/scheduler/summary/promoted-12.summary
+++ b/cts/scheduler/summary/promoted-12.summary
@@ -1,33 +1,33 @@
Current cluster status:
* Node List:
* Online: [ sel3 sel4 ]
* Full List of Resources:
* Clone Set: ms-drbd0 [drbd0] (promotable):
* Promoted: [ sel3 ]
* Unpromoted: [ sel4 ]
- * Clone Set: ms-sf [sf] (promotable) (unique):
+ * Clone Set: ms-sf [sf] (promotable, unique):
* sf:0 (ocf:heartbeat:Stateful): Unpromoted sel3
* sf:1 (ocf:heartbeat:Stateful): Unpromoted sel4
* fs0 (ocf:heartbeat:Filesystem): Started sel3
Transition Summary:
* Promote sf:0 ( Unpromoted -> Promoted sel3 )
Executing Cluster Transition:
* Pseudo action: ms-sf_promote_0
* Resource action: sf:0 promote on sel3
* Pseudo action: ms-sf_promoted_0
Revised Cluster Status:
* Node List:
* Online: [ sel3 sel4 ]
* Full List of Resources:
* Clone Set: ms-drbd0 [drbd0] (promotable):
* Promoted: [ sel3 ]
* Unpromoted: [ sel4 ]
- * Clone Set: ms-sf [sf] (promotable) (unique):
+ * Clone Set: ms-sf [sf] (promotable, unique):
* sf:0 (ocf:heartbeat:Stateful): Promoted sel3
* sf:1 (ocf:heartbeat:Stateful): Unpromoted sel4
* fs0 (ocf:heartbeat:Filesystem): Started sel3
diff --git a/cts/scheduler/summary/promoted-2.summary b/cts/scheduler/summary/promoted-2.summary
index 3258499fc8..9adf43ef1d 100644
--- a/cts/scheduler/summary/promoted-2.summary
+++ b/cts/scheduler/summary/promoted-2.summary
@@ -1,71 +1,71 @@
Current cluster status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
- * Clone Set: rsc1 [child_rsc1] (promotable) (unique):
+ * Clone Set: rsc1 [child_rsc1] (promotable, unique):
* child_rsc1:0 (ocf:heartbeat:apache): Stopped
* child_rsc1:1 (ocf:heartbeat:apache): Stopped
* child_rsc1:2 (ocf:heartbeat:apache): Stopped
* child_rsc1:3 (ocf:heartbeat:apache): Stopped
* child_rsc1:4 (ocf:heartbeat:apache): Stopped
Transition Summary:
* Promote child_rsc1:0 ( Stopped -> Promoted node1 )
* Start child_rsc1:1 ( node2 )
* Start child_rsc1:2 ( node1 )
* Promote child_rsc1:3 ( Stopped -> Promoted node2 )
Executing Cluster Transition:
* Resource action: child_rsc1:0 monitor on node2
* Resource action: child_rsc1:0 monitor on node1
* Resource action: child_rsc1:1 monitor on node2
* Resource action: child_rsc1:1 monitor on node1
* Resource action: child_rsc1:2 monitor on node2
* Resource action: child_rsc1:2 monitor on node1
* Resource action: child_rsc1:3 monitor on node2
* Resource action: child_rsc1:3 monitor on node1
* Resource action: child_rsc1:4 monitor on node2
* Resource action: child_rsc1:4 monitor on node1
* Pseudo action: rsc1_pre_notify_start_0
* Pseudo action: rsc1_confirmed-pre_notify_start_0
* Pseudo action: rsc1_start_0
* Resource action: child_rsc1:0 start on node1
* Resource action: child_rsc1:1 start on node2
* Resource action: child_rsc1:2 start on node1
* Resource action: child_rsc1:3 start on node2
* Pseudo action: rsc1_running_0
* Pseudo action: rsc1_post_notify_running_0
* Resource action: child_rsc1:0 notify on node1
* Resource action: child_rsc1:1 notify on node2
* Resource action: child_rsc1:2 notify on node1
* Resource action: child_rsc1:3 notify on node2
* Pseudo action: rsc1_confirmed-post_notify_running_0
* Pseudo action: rsc1_pre_notify_promote_0
* Resource action: child_rsc1:0 notify on node1
* Resource action: child_rsc1:1 notify on node2
* Resource action: child_rsc1:2 notify on node1
* Resource action: child_rsc1:3 notify on node2
* Pseudo action: rsc1_confirmed-pre_notify_promote_0
* Pseudo action: rsc1_promote_0
* Resource action: child_rsc1:0 promote on node1
* Resource action: child_rsc1:3 promote on node2
* Pseudo action: rsc1_promoted_0
* Pseudo action: rsc1_post_notify_promoted_0
* Resource action: child_rsc1:0 notify on node1
* Resource action: child_rsc1:1 notify on node2
* Resource action: child_rsc1:2 notify on node1
* Resource action: child_rsc1:3 notify on node2
* Pseudo action: rsc1_confirmed-post_notify_promoted_0
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
- * Clone Set: rsc1 [child_rsc1] (promotable) (unique):
+ * Clone Set: rsc1 [child_rsc1] (promotable, unique):
* child_rsc1:0 (ocf:heartbeat:apache): Promoted node1
* child_rsc1:1 (ocf:heartbeat:apache): Unpromoted node2
* child_rsc1:2 (ocf:heartbeat:apache): Unpromoted node1
* child_rsc1:3 (ocf:heartbeat:apache): Promoted node2
* child_rsc1:4 (ocf:heartbeat:apache): Stopped
diff --git a/cts/scheduler/summary/promoted-3.summary b/cts/scheduler/summary/promoted-3.summary
index 161f51834a..08100f3e36 100644
--- a/cts/scheduler/summary/promoted-3.summary
+++ b/cts/scheduler/summary/promoted-3.summary
@@ -1,50 +1,50 @@
Current cluster status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
- * Clone Set: rsc1 [child_rsc1] (promotable) (unique):
+ * Clone Set: rsc1 [child_rsc1] (promotable, unique):
* child_rsc1:0 (ocf:heartbeat:apache): Stopped
* child_rsc1:1 (ocf:heartbeat:apache): Stopped
* child_rsc1:2 (ocf:heartbeat:apache): Stopped
* child_rsc1:3 (ocf:heartbeat:apache): Stopped
* child_rsc1:4 (ocf:heartbeat:apache): Stopped
Transition Summary:
* Start child_rsc1:0 ( node1 )
* Promote child_rsc1:1 ( Stopped -> Promoted node2 )
* Start child_rsc1:2 ( node1 )
* Start child_rsc1:3 ( node2 )
Executing Cluster Transition:
* Resource action: child_rsc1:0 monitor on node2
* Resource action: child_rsc1:0 monitor on node1
* Resource action: child_rsc1:1 monitor on node2
* Resource action: child_rsc1:1 monitor on node1
* Resource action: child_rsc1:2 monitor on node2
* Resource action: child_rsc1:2 monitor on node1
* Resource action: child_rsc1:3 monitor on node2
* Resource action: child_rsc1:3 monitor on node1
* Resource action: child_rsc1:4 monitor on node2
* Resource action: child_rsc1:4 monitor on node1
* Pseudo action: rsc1_start_0
* Resource action: child_rsc1:0 start on node1
* Resource action: child_rsc1:1 start on node2
* Resource action: child_rsc1:2 start on node1
* Resource action: child_rsc1:3 start on node2
* Pseudo action: rsc1_running_0
* Pseudo action: rsc1_promote_0
* Resource action: child_rsc1:1 promote on node2
* Pseudo action: rsc1_promoted_0
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
- * Clone Set: rsc1 [child_rsc1] (promotable) (unique):
+ * Clone Set: rsc1 [child_rsc1] (promotable, unique):
* child_rsc1:0 (ocf:heartbeat:apache): Unpromoted node1
* child_rsc1:1 (ocf:heartbeat:apache): Promoted node2
* child_rsc1:2 (ocf:heartbeat:apache): Unpromoted node1
* child_rsc1:3 (ocf:heartbeat:apache): Unpromoted node2
* child_rsc1:4 (ocf:heartbeat:apache): Stopped
diff --git a/cts/scheduler/summary/promoted-4.summary b/cts/scheduler/summary/promoted-4.summary
index 0dfe7c7263..2bcb25eaf1 100644
--- a/cts/scheduler/summary/promoted-4.summary
+++ b/cts/scheduler/summary/promoted-4.summary
@@ -1,94 +1,94 @@
Current cluster status:
* Node List:
* Online: [ c001n01 c001n02 c001n03 c001n08 ]
* Full List of Resources:
* DcIPaddr (ocf:heartbeat:IPaddr): Started c001n08
* Resource Group: group-1:
* ocf_child (ocf:heartbeat:IPaddr): Started c001n03
* heartbeat_child (ocf:heartbeat:IPaddr): Started c001n03
* lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n01
* rsc_c001n08 (ocf:heartbeat:IPaddr): Started c001n08
* rsc_c001n02 (ocf:heartbeat:IPaddr): Started c001n02
* rsc_c001n03 (ocf:heartbeat:IPaddr): Started c001n03
* rsc_c001n01 (ocf:heartbeat:IPaddr): Started c001n01
* Clone Set: DoFencing [child_DoFencing] (unique):
* child_DoFencing:0 (stonith:ssh): Started c001n08
* child_DoFencing:1 (stonith:ssh): Started c001n03
* child_DoFencing:2 (stonith:ssh): Started c001n01
* child_DoFencing:3 (stonith:ssh): Started c001n02
- * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique):
+ * Clone Set: master_rsc_1 [ocf_msdummy] (promotable, unique):
* ocf_msdummy:0 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n08
* ocf_msdummy:1 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n03
* ocf_msdummy:2 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n01
* ocf_msdummy:3 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n08
* ocf_msdummy:4 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n03
* ocf_msdummy:5 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n01
* ocf_msdummy:6 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n02
* ocf_msdummy:7 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n02
Transition Summary:
* Promote ocf_msdummy:0 ( Unpromoted -> Promoted c001n08 )
Executing Cluster Transition:
* Resource action: child_DoFencing:1 monitor on c001n08
* Resource action: child_DoFencing:1 monitor on c001n02
* Resource action: child_DoFencing:1 monitor on c001n01
* Resource action: child_DoFencing:2 monitor on c001n08
* Resource action: child_DoFencing:2 monitor on c001n03
* Resource action: child_DoFencing:2 monitor on c001n02
* Resource action: child_DoFencing:3 monitor on c001n08
* Resource action: child_DoFencing:3 monitor on c001n03
* Resource action: child_DoFencing:3 monitor on c001n01
* Resource action: ocf_msdummy:0 cancel=5000 on c001n08
* Resource action: ocf_msdummy:2 monitor on c001n08
* Resource action: ocf_msdummy:2 monitor on c001n03
* Resource action: ocf_msdummy:2 monitor on c001n02
* Resource action: ocf_msdummy:3 monitor on c001n03
* Resource action: ocf_msdummy:3 monitor on c001n02
* Resource action: ocf_msdummy:3 monitor on c001n01
* Resource action: ocf_msdummy:4 monitor on c001n08
* Resource action: ocf_msdummy:4 monitor on c001n02
* Resource action: ocf_msdummy:4 monitor on c001n01
* Resource action: ocf_msdummy:5 monitor on c001n08
* Resource action: ocf_msdummy:5 monitor on c001n03
* Resource action: ocf_msdummy:5 monitor on c001n02
* Resource action: ocf_msdummy:6 monitor on c001n08
* Resource action: ocf_msdummy:6 monitor on c001n03
* Resource action: ocf_msdummy:6 monitor on c001n01
* Resource action: ocf_msdummy:7 monitor on c001n08
* Resource action: ocf_msdummy:7 monitor on c001n03
* Resource action: ocf_msdummy:7 monitor on c001n01
* Pseudo action: master_rsc_1_promote_0
* Resource action: ocf_msdummy:0 promote on c001n08
* Pseudo action: master_rsc_1_promoted_0
* Resource action: ocf_msdummy:0 monitor=6000 on c001n08
Revised Cluster Status:
* Node List:
* Online: [ c001n01 c001n02 c001n03 c001n08 ]
* Full List of Resources:
* DcIPaddr (ocf:heartbeat:IPaddr): Started c001n08
* Resource Group: group-1:
* ocf_child (ocf:heartbeat:IPaddr): Started c001n03
* heartbeat_child (ocf:heartbeat:IPaddr): Started c001n03
* lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n01
* rsc_c001n08 (ocf:heartbeat:IPaddr): Started c001n08
* rsc_c001n02 (ocf:heartbeat:IPaddr): Started c001n02
* rsc_c001n03 (ocf:heartbeat:IPaddr): Started c001n03
* rsc_c001n01 (ocf:heartbeat:IPaddr): Started c001n01
* Clone Set: DoFencing [child_DoFencing] (unique):
* child_DoFencing:0 (stonith:ssh): Started c001n08
* child_DoFencing:1 (stonith:ssh): Started c001n03
* child_DoFencing:2 (stonith:ssh): Started c001n01
* child_DoFencing:3 (stonith:ssh): Started c001n02
- * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique):
+ * Clone Set: master_rsc_1 [ocf_msdummy] (promotable, unique):
* ocf_msdummy:0 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Promoted c001n08
* ocf_msdummy:1 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n03
* ocf_msdummy:2 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n01
* ocf_msdummy:3 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n08
* ocf_msdummy:4 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n03
* ocf_msdummy:5 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n01
* ocf_msdummy:6 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n02
* ocf_msdummy:7 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n02
diff --git a/cts/scheduler/summary/promoted-5.summary b/cts/scheduler/summary/promoted-5.summary
index 00fa1c2154..8a2f1a232f 100644
--- a/cts/scheduler/summary/promoted-5.summary
+++ b/cts/scheduler/summary/promoted-5.summary
@@ -1,88 +1,88 @@
Current cluster status:
* Node List:
* Online: [ c001n01 c001n02 c001n03 c001n08 ]
* Full List of Resources:
* DcIPaddr (ocf:heartbeat:IPaddr): Started c001n08
* Resource Group: group-1:
* ocf_child (ocf:heartbeat:IPaddr): Started c001n03
* heartbeat_child (ocf:heartbeat:IPaddr): Started c001n03
* lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n01
* rsc_c001n08 (ocf:heartbeat:IPaddr): Started c001n08
* rsc_c001n02 (ocf:heartbeat:IPaddr): Started c001n02
* rsc_c001n03 (ocf:heartbeat:IPaddr): Started c001n03
* rsc_c001n01 (ocf:heartbeat:IPaddr): Started c001n01
* Clone Set: DoFencing [child_DoFencing] (unique):
* child_DoFencing:0 (stonith:ssh): Started c001n08
* child_DoFencing:1 (stonith:ssh): Started c001n03
* child_DoFencing:2 (stonith:ssh): Started c001n01
* child_DoFencing:3 (stonith:ssh): Started c001n02
- * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique):
+ * Clone Set: master_rsc_1 [ocf_msdummy] (promotable, unique):
* ocf_msdummy:0 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Promoted c001n08
* ocf_msdummy:1 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n03
* ocf_msdummy:2 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n01
* ocf_msdummy:3 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n08
* ocf_msdummy:4 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n03
* ocf_msdummy:5 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n01
* ocf_msdummy:6 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n02
* ocf_msdummy:7 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n02
Transition Summary:
Executing Cluster Transition:
* Resource action: child_DoFencing:1 monitor on c001n08
* Resource action: child_DoFencing:1 monitor on c001n02
* Resource action: child_DoFencing:1 monitor on c001n01
* Resource action: child_DoFencing:2 monitor on c001n08
* Resource action: child_DoFencing:2 monitor on c001n03
* Resource action: child_DoFencing:2 monitor on c001n02
* Resource action: child_DoFencing:3 monitor on c001n08
* Resource action: child_DoFencing:3 monitor on c001n03
* Resource action: child_DoFencing:3 monitor on c001n01
* Resource action: ocf_msdummy:2 monitor on c001n08
* Resource action: ocf_msdummy:2 monitor on c001n03
* Resource action: ocf_msdummy:2 monitor on c001n02
* Resource action: ocf_msdummy:3 monitor on c001n03
* Resource action: ocf_msdummy:3 monitor on c001n02
* Resource action: ocf_msdummy:3 monitor on c001n01
* Resource action: ocf_msdummy:4 monitor on c001n08
* Resource action: ocf_msdummy:4 monitor on c001n02
* Resource action: ocf_msdummy:4 monitor on c001n01
* Resource action: ocf_msdummy:5 monitor on c001n08
* Resource action: ocf_msdummy:5 monitor on c001n03
* Resource action: ocf_msdummy:5 monitor on c001n02
* Resource action: ocf_msdummy:6 monitor on c001n08
* Resource action: ocf_msdummy:6 monitor on c001n03
* Resource action: ocf_msdummy:6 monitor on c001n01
* Resource action: ocf_msdummy:7 monitor on c001n08
* Resource action: ocf_msdummy:7 monitor on c001n03
* Resource action: ocf_msdummy:7 monitor on c001n01
Revised Cluster Status:
* Node List:
* Online: [ c001n01 c001n02 c001n03 c001n08 ]
* Full List of Resources:
* DcIPaddr (ocf:heartbeat:IPaddr): Started c001n08
* Resource Group: group-1:
* ocf_child (ocf:heartbeat:IPaddr): Started c001n03
* heartbeat_child (ocf:heartbeat:IPaddr): Started c001n03
* lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n01
* rsc_c001n08 (ocf:heartbeat:IPaddr): Started c001n08
* rsc_c001n02 (ocf:heartbeat:IPaddr): Started c001n02
* rsc_c001n03 (ocf:heartbeat:IPaddr): Started c001n03
* rsc_c001n01 (ocf:heartbeat:IPaddr): Started c001n01
* Clone Set: DoFencing [child_DoFencing] (unique):
* child_DoFencing:0 (stonith:ssh): Started c001n08
* child_DoFencing:1 (stonith:ssh): Started c001n03
* child_DoFencing:2 (stonith:ssh): Started c001n01
* child_DoFencing:3 (stonith:ssh): Started c001n02
- * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique):
+ * Clone Set: master_rsc_1 [ocf_msdummy] (promotable, unique):
* ocf_msdummy:0 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Promoted c001n08
* ocf_msdummy:1 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n03
* ocf_msdummy:2 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n01
* ocf_msdummy:3 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n08
* ocf_msdummy:4 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n03
* ocf_msdummy:5 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n01
* ocf_msdummy:6 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n02
* ocf_msdummy:7 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n02
diff --git a/cts/scheduler/summary/promoted-6.summary b/cts/scheduler/summary/promoted-6.summary
index 13c12dfc1c..2d9c953bfa 100644
--- a/cts/scheduler/summary/promoted-6.summary
+++ b/cts/scheduler/summary/promoted-6.summary
@@ -1,87 +1,87 @@
Current cluster status:
* Node List:
* Online: [ c001n01 c001n02 c001n03 c001n08 ]
* Full List of Resources:
* DcIPaddr (ocf:heartbeat:IPaddr): Started c001n08
* Resource Group: group-1:
* ocf_192.168.100.181 (ocf:heartbeat:IPaddr): Started c001n02
* heartbeat_192.168.100.182 (ocf:heartbeat:IPaddr): Started c001n02
* ocf_192.168.100.183 (ocf:heartbeat:IPaddr): Started c001n02
* lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n03
* rsc_c001n08 (ocf:heartbeat:IPaddr): Started c001n08
* rsc_c001n02 (ocf:heartbeat:IPaddr): Started c001n02
* rsc_c001n03 (ocf:heartbeat:IPaddr): Started c001n03
* rsc_c001n01 (ocf:heartbeat:IPaddr): Started c001n01
* Clone Set: DoFencing [child_DoFencing] (unique):
* child_DoFencing:0 (stonith:ssh): Started c001n08
* child_DoFencing:1 (stonith:ssh): Started c001n02
* child_DoFencing:2 (stonith:ssh): Started c001n03
* child_DoFencing:3 (stonith:ssh): Started c001n01
- * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique):
+ * Clone Set: master_rsc_1 [ocf_msdummy] (promotable, unique):
* ocf_msdummy:0 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Promoted c001n08
* ocf_msdummy:1 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n02
* ocf_msdummy:2 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n03
* ocf_msdummy:3 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n08
* ocf_msdummy:4 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n02
* ocf_msdummy:5 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n03
* ocf_msdummy:6 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n01
* ocf_msdummy:7 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n01
Transition Summary:
Executing Cluster Transition:
* Resource action: child_DoFencing:1 monitor on c001n08
* Resource action: child_DoFencing:1 monitor on c001n03
* Resource action: child_DoFencing:1 monitor on c001n01
* Resource action: child_DoFencing:2 monitor on c001n08
* Resource action: child_DoFencing:2 monitor on c001n01
* Resource action: child_DoFencing:3 monitor on c001n08
* Resource action: child_DoFencing:3 monitor on c001n03
* Resource action: child_DoFencing:3 monitor on c001n02
* Resource action: ocf_msdummy:2 monitor on c001n08
* Resource action: ocf_msdummy:2 monitor on c001n01
* Resource action: ocf_msdummy:3 monitor on c001n03
* Resource action: ocf_msdummy:3 monitor on c001n01
* Resource action: ocf_msdummy:4 monitor on c001n08
* Resource action: ocf_msdummy:4 monitor on c001n03
* Resource action: ocf_msdummy:4 monitor on c001n01
* Resource action: ocf_msdummy:5 monitor on c001n08
* Resource action: ocf_msdummy:5 monitor on c001n02
* Resource action: ocf_msdummy:5 monitor on c001n01
* Resource action: ocf_msdummy:6 monitor on c001n08
* Resource action: ocf_msdummy:6 monitor on c001n03
* Resource action: ocf_msdummy:6 monitor on c001n02
* Resource action: ocf_msdummy:7 monitor on c001n08
* Resource action: ocf_msdummy:7 monitor on c001n03
* Resource action: ocf_msdummy:7 monitor on c001n02
Revised Cluster Status:
* Node List:
* Online: [ c001n01 c001n02 c001n03 c001n08 ]
* Full List of Resources:
* DcIPaddr (ocf:heartbeat:IPaddr): Started c001n08
* Resource Group: group-1:
* ocf_192.168.100.181 (ocf:heartbeat:IPaddr): Started c001n02
* heartbeat_192.168.100.182 (ocf:heartbeat:IPaddr): Started c001n02
* ocf_192.168.100.183 (ocf:heartbeat:IPaddr): Started c001n02
* lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n03
* rsc_c001n08 (ocf:heartbeat:IPaddr): Started c001n08
* rsc_c001n02 (ocf:heartbeat:IPaddr): Started c001n02
* rsc_c001n03 (ocf:heartbeat:IPaddr): Started c001n03
* rsc_c001n01 (ocf:heartbeat:IPaddr): Started c001n01
* Clone Set: DoFencing [child_DoFencing] (unique):
* child_DoFencing:0 (stonith:ssh): Started c001n08
* child_DoFencing:1 (stonith:ssh): Started c001n02
* child_DoFencing:2 (stonith:ssh): Started c001n03
* child_DoFencing:3 (stonith:ssh): Started c001n01
- * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique):
+ * Clone Set: master_rsc_1 [ocf_msdummy] (promotable, unique):
* ocf_msdummy:0 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Promoted c001n08
* ocf_msdummy:1 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n02
* ocf_msdummy:2 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n03
* ocf_msdummy:3 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n08
* ocf_msdummy:4 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n02
* ocf_msdummy:5 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n03
* ocf_msdummy:6 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n01
* ocf_msdummy:7 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n01
diff --git a/cts/scheduler/summary/promoted-7.summary b/cts/scheduler/summary/promoted-7.summary
index 0602f95895..e43682c9d4 100644
--- a/cts/scheduler/summary/promoted-7.summary
+++ b/cts/scheduler/summary/promoted-7.summary
@@ -1,121 +1,121 @@
Current cluster status:
* Node List:
* Node c001n01: UNCLEAN (offline)
* Online: [ c001n02 c001n03 c001n08 ]
* Full List of Resources:
* DcIPaddr (ocf:heartbeat:IPaddr): Started c001n01 (UNCLEAN)
* Resource Group: group-1:
* ocf_192.168.100.181 (ocf:heartbeat:IPaddr): Started c001n03
* heartbeat_192.168.100.182 (ocf:heartbeat:IPaddr): Started c001n03
* ocf_192.168.100.183 (ocf:heartbeat:IPaddr): Started c001n03
* lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n02
* rsc_c001n01 (ocf:heartbeat:IPaddr): Started c001n01 (UNCLEAN)
* rsc_c001n08 (ocf:heartbeat:IPaddr): Started c001n08
* rsc_c001n02 (ocf:heartbeat:IPaddr): Started c001n02
* rsc_c001n03 (ocf:heartbeat:IPaddr): Started c001n03
* Clone Set: DoFencing [child_DoFencing] (unique):
* child_DoFencing:0 (stonith:ssh): Started c001n01 (UNCLEAN)
* child_DoFencing:1 (stonith:ssh): Started c001n03
* child_DoFencing:2 (stonith:ssh): Started c001n02
* child_DoFencing:3 (stonith:ssh): Started c001n08
- * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique):
+ * Clone Set: master_rsc_1 [ocf_msdummy] (promotable, unique):
* ocf_msdummy:0 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Promoted c001n01 (UNCLEAN)
* ocf_msdummy:1 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n03
* ocf_msdummy:2 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n02
* ocf_msdummy:3 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n08
* ocf_msdummy:4 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n01 (UNCLEAN)
* ocf_msdummy:5 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n03
* ocf_msdummy:6 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n02
* ocf_msdummy:7 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n08
Transition Summary:
* Fence (reboot) c001n01 'peer is no longer part of the cluster'
* Move DcIPaddr ( c001n01 -> c001n03 )
* Move ocf_192.168.100.181 ( c001n03 -> c001n02 )
* Move heartbeat_192.168.100.182 ( c001n03 -> c001n02 )
* Move ocf_192.168.100.183 ( c001n03 -> c001n02 )
* Move lsb_dummy ( c001n02 -> c001n08 )
* Move rsc_c001n01 ( c001n01 -> c001n03 )
* Stop child_DoFencing:0 ( c001n01 ) due to node availability
* Stop ocf_msdummy:0 ( Promoted c001n01 ) due to node availability
* Stop ocf_msdummy:4 ( Unpromoted c001n01 ) due to node availability
Executing Cluster Transition:
* Pseudo action: group-1_stop_0
* Resource action: ocf_192.168.100.183 stop on c001n03
* Resource action: lsb_dummy stop on c001n02
* Resource action: child_DoFencing:2 monitor on c001n08
* Resource action: child_DoFencing:2 monitor on c001n03
* Resource action: child_DoFencing:3 monitor on c001n03
* Resource action: child_DoFencing:3 monitor on c001n02
* Pseudo action: DoFencing_stop_0
* Resource action: ocf_msdummy:4 monitor on c001n08
* Resource action: ocf_msdummy:4 monitor on c001n03
* Resource action: ocf_msdummy:4 monitor on c001n02
* Resource action: ocf_msdummy:5 monitor on c001n08
* Resource action: ocf_msdummy:5 monitor on c001n02
* Resource action: ocf_msdummy:6 monitor on c001n08
* Resource action: ocf_msdummy:6 monitor on c001n03
* Resource action: ocf_msdummy:7 monitor on c001n03
* Resource action: ocf_msdummy:7 monitor on c001n02
* Pseudo action: master_rsc_1_demote_0
* Fencing c001n01 (reboot)
* Pseudo action: DcIPaddr_stop_0
* Resource action: heartbeat_192.168.100.182 stop on c001n03
* Resource action: lsb_dummy start on c001n08
* Pseudo action: rsc_c001n01_stop_0
* Pseudo action: child_DoFencing:0_stop_0
* Pseudo action: DoFencing_stopped_0
* Pseudo action: ocf_msdummy:0_demote_0
* Pseudo action: master_rsc_1_demoted_0
* Pseudo action: master_rsc_1_stop_0
* Resource action: DcIPaddr start on c001n03
* Resource action: ocf_192.168.100.181 stop on c001n03
* Resource action: lsb_dummy monitor=5000 on c001n08
* Resource action: rsc_c001n01 start on c001n03
* Pseudo action: ocf_msdummy:0_stop_0
* Pseudo action: ocf_msdummy:4_stop_0
* Pseudo action: master_rsc_1_stopped_0
* Resource action: DcIPaddr monitor=5000 on c001n03
* Pseudo action: group-1_stopped_0
* Pseudo action: group-1_start_0
* Resource action: ocf_192.168.100.181 start on c001n02
* Resource action: heartbeat_192.168.100.182 start on c001n02
* Resource action: ocf_192.168.100.183 start on c001n02
* Resource action: rsc_c001n01 monitor=5000 on c001n03
* Pseudo action: group-1_running_0
* Resource action: ocf_192.168.100.181 monitor=5000 on c001n02
* Resource action: heartbeat_192.168.100.182 monitor=5000 on c001n02
* Resource action: ocf_192.168.100.183 monitor=5000 on c001n02
Revised Cluster Status:
* Node List:
* Online: [ c001n02 c001n03 c001n08 ]
* OFFLINE: [ c001n01 ]
* Full List of Resources:
* DcIPaddr (ocf:heartbeat:IPaddr): Started c001n03
* Resource Group: group-1:
* ocf_192.168.100.181 (ocf:heartbeat:IPaddr): Started c001n02
* heartbeat_192.168.100.182 (ocf:heartbeat:IPaddr): Started c001n02
* ocf_192.168.100.183 (ocf:heartbeat:IPaddr): Started c001n02
* lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n08
* rsc_c001n01 (ocf:heartbeat:IPaddr): Started c001n03
* rsc_c001n08 (ocf:heartbeat:IPaddr): Started c001n08
* rsc_c001n02 (ocf:heartbeat:IPaddr): Started c001n02
* rsc_c001n03 (ocf:heartbeat:IPaddr): Started c001n03
* Clone Set: DoFencing [child_DoFencing] (unique):
* child_DoFencing:0 (stonith:ssh): Stopped
* child_DoFencing:1 (stonith:ssh): Started c001n03
* child_DoFencing:2 (stonith:ssh): Started c001n02
* child_DoFencing:3 (stonith:ssh): Started c001n08
- * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique):
+ * Clone Set: master_rsc_1 [ocf_msdummy] (promotable, unique):
* ocf_msdummy:0 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped
* ocf_msdummy:1 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n03
* ocf_msdummy:2 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n02
* ocf_msdummy:3 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n08
* ocf_msdummy:4 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped
* ocf_msdummy:5 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n03
* ocf_msdummy:6 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n02
* ocf_msdummy:7 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n08
diff --git a/cts/scheduler/summary/promoted-8.summary b/cts/scheduler/summary/promoted-8.summary
index 32417ff1ea..571eba6945 100644
--- a/cts/scheduler/summary/promoted-8.summary
+++ b/cts/scheduler/summary/promoted-8.summary
@@ -1,124 +1,124 @@
Current cluster status:
* Node List:
* Node c001n01: UNCLEAN (offline)
* Online: [ c001n02 c001n03 c001n08 ]
* Full List of Resources:
* DcIPaddr (ocf:heartbeat:IPaddr): Started c001n01 (UNCLEAN)
* Resource Group: group-1:
* ocf_192.168.100.181 (ocf:heartbeat:IPaddr): Started c001n03
* heartbeat_192.168.100.182 (ocf:heartbeat:IPaddr): Started c001n03
* ocf_192.168.100.183 (ocf:heartbeat:IPaddr): Started c001n03
* lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n02
* rsc_c001n01 (ocf:heartbeat:IPaddr): Started c001n01 (UNCLEAN)
* rsc_c001n08 (ocf:heartbeat:IPaddr): Started c001n08
* rsc_c001n02 (ocf:heartbeat:IPaddr): Started c001n02
* rsc_c001n03 (ocf:heartbeat:IPaddr): Started c001n03
* Clone Set: DoFencing [child_DoFencing] (unique):
* child_DoFencing:0 (stonith:ssh): Started c001n01 (UNCLEAN)
* child_DoFencing:1 (stonith:ssh): Started c001n03
* child_DoFencing:2 (stonith:ssh): Started c001n02
* child_DoFencing:3 (stonith:ssh): Started c001n08
- * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique):
+ * Clone Set: master_rsc_1 [ocf_msdummy] (promotable, unique):
* ocf_msdummy:0 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Promoted c001n01 (UNCLEAN)
* ocf_msdummy:1 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n03
* ocf_msdummy:2 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n02
* ocf_msdummy:3 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n08
* ocf_msdummy:4 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped
* ocf_msdummy:5 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped
* ocf_msdummy:6 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n02
* ocf_msdummy:7 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n08
Transition Summary:
* Fence (reboot) c001n01 'peer is no longer part of the cluster'
* Move DcIPaddr ( c001n01 -> c001n03 )
* Move ocf_192.168.100.181 ( c001n03 -> c001n02 )
* Move heartbeat_192.168.100.182 ( c001n03 -> c001n02 )
* Move ocf_192.168.100.183 ( c001n03 -> c001n02 )
* Move lsb_dummy ( c001n02 -> c001n08 )
* Move rsc_c001n01 ( c001n01 -> c001n03 )
* Stop child_DoFencing:0 ( c001n01 ) due to node availability
* Move ocf_msdummy:0 ( Promoted c001n01 -> Unpromoted c001n03 )
Executing Cluster Transition:
* Pseudo action: group-1_stop_0
* Resource action: ocf_192.168.100.183 stop on c001n03
* Resource action: lsb_dummy stop on c001n02
* Resource action: child_DoFencing:2 monitor on c001n08
* Resource action: child_DoFencing:2 monitor on c001n03
* Resource action: child_DoFencing:3 monitor on c001n03
* Resource action: child_DoFencing:3 monitor on c001n02
* Pseudo action: DoFencing_stop_0
* Resource action: ocf_msdummy:4 monitor on c001n08
* Resource action: ocf_msdummy:4 monitor on c001n03
* Resource action: ocf_msdummy:4 monitor on c001n02
* Resource action: ocf_msdummy:5 monitor on c001n08
* Resource action: ocf_msdummy:5 monitor on c001n03
* Resource action: ocf_msdummy:5 monitor on c001n02
* Resource action: ocf_msdummy:6 monitor on c001n08
* Resource action: ocf_msdummy:6 monitor on c001n03
* Resource action: ocf_msdummy:7 monitor on c001n03
* Resource action: ocf_msdummy:7 monitor on c001n02
* Pseudo action: master_rsc_1_demote_0
* Fencing c001n01 (reboot)
* Pseudo action: DcIPaddr_stop_0
* Resource action: heartbeat_192.168.100.182 stop on c001n03
* Resource action: lsb_dummy start on c001n08
* Pseudo action: rsc_c001n01_stop_0
* Pseudo action: child_DoFencing:0_stop_0
* Pseudo action: DoFencing_stopped_0
* Pseudo action: ocf_msdummy:0_demote_0
* Pseudo action: master_rsc_1_demoted_0
* Pseudo action: master_rsc_1_stop_0
* Resource action: DcIPaddr start on c001n03
* Resource action: ocf_192.168.100.181 stop on c001n03
* Resource action: lsb_dummy monitor=5000 on c001n08
* Resource action: rsc_c001n01 start on c001n03
* Pseudo action: ocf_msdummy:0_stop_0
* Pseudo action: master_rsc_1_stopped_0
* Pseudo action: master_rsc_1_start_0
* Resource action: DcIPaddr monitor=5000 on c001n03
* Pseudo action: group-1_stopped_0
* Pseudo action: group-1_start_0
* Resource action: ocf_192.168.100.181 start on c001n02
* Resource action: heartbeat_192.168.100.182 start on c001n02
* Resource action: ocf_192.168.100.183 start on c001n02
* Resource action: rsc_c001n01 monitor=5000 on c001n03
* Resource action: ocf_msdummy:0 start on c001n03
* Pseudo action: master_rsc_1_running_0
* Pseudo action: group-1_running_0
* Resource action: ocf_192.168.100.181 monitor=5000 on c001n02
* Resource action: heartbeat_192.168.100.182 monitor=5000 on c001n02
* Resource action: ocf_192.168.100.183 monitor=5000 on c001n02
* Resource action: ocf_msdummy:0 monitor=5000 on c001n03
Revised Cluster Status:
* Node List:
* Online: [ c001n02 c001n03 c001n08 ]
* OFFLINE: [ c001n01 ]
* Full List of Resources:
* DcIPaddr (ocf:heartbeat:IPaddr): Started c001n03
* Resource Group: group-1:
* ocf_192.168.100.181 (ocf:heartbeat:IPaddr): Started c001n02
* heartbeat_192.168.100.182 (ocf:heartbeat:IPaddr): Started c001n02
* ocf_192.168.100.183 (ocf:heartbeat:IPaddr): Started c001n02
* lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n08
* rsc_c001n01 (ocf:heartbeat:IPaddr): Started c001n03
* rsc_c001n08 (ocf:heartbeat:IPaddr): Started c001n08
* rsc_c001n02 (ocf:heartbeat:IPaddr): Started c001n02
* rsc_c001n03 (ocf:heartbeat:IPaddr): Started c001n03
* Clone Set: DoFencing [child_DoFencing] (unique):
* child_DoFencing:0 (stonith:ssh): Stopped
* child_DoFencing:1 (stonith:ssh): Started c001n03
* child_DoFencing:2 (stonith:ssh): Started c001n02
* child_DoFencing:3 (stonith:ssh): Started c001n08
- * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique):
+ * Clone Set: master_rsc_1 [ocf_msdummy] (promotable, unique):
* ocf_msdummy:0 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n03
* ocf_msdummy:1 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n03
* ocf_msdummy:2 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n02
* ocf_msdummy:3 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n08
* ocf_msdummy:4 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped
* ocf_msdummy:5 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped
* ocf_msdummy:6 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n02
* ocf_msdummy:7 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n08
diff --git a/cts/scheduler/summary/promoted-9.summary b/cts/scheduler/summary/promoted-9.summary
index 2da56a62db..7dfdbbda99 100644
--- a/cts/scheduler/summary/promoted-9.summary
+++ b/cts/scheduler/summary/promoted-9.summary
@@ -1,100 +1,100 @@
Current cluster status:
* Node List:
* Node sgi2: UNCLEAN (offline)
* Node test02: UNCLEAN (offline)
* Online: [ ibm1 va1 ]
* Full List of Resources:
* DcIPaddr (ocf:heartbeat:IPaddr): Stopped
* Resource Group: group-1:
* ocf_127.0.0.11 (ocf:heartbeat:IPaddr): Stopped
* heartbeat_127.0.0.12 (ocf:heartbeat:IPaddr): Stopped
* ocf_127.0.0.13 (ocf:heartbeat:IPaddr): Stopped
* lsb_dummy (lsb:/usr/lib64/heartbeat/cts/LSBDummy): Stopped
* rsc_sgi2 (ocf:heartbeat:IPaddr): Stopped
* rsc_ibm1 (ocf:heartbeat:IPaddr): Stopped
* rsc_va1 (ocf:heartbeat:IPaddr): Stopped
* rsc_test02 (ocf:heartbeat:IPaddr): Stopped
* Clone Set: DoFencing [child_DoFencing] (unique):
* child_DoFencing:0 (stonith:ssh): Started va1
* child_DoFencing:1 (stonith:ssh): Started ibm1
* child_DoFencing:2 (stonith:ssh): Stopped
* child_DoFencing:3 (stonith:ssh): Stopped
- * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique):
+ * Clone Set: master_rsc_1 [ocf_msdummy] (promotable, unique):
* ocf_msdummy:0 (ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped
* ocf_msdummy:1 (ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped
* ocf_msdummy:2 (ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped
* ocf_msdummy:3 (ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped
* ocf_msdummy:4 (ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped
* ocf_msdummy:5 (ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped
* ocf_msdummy:6 (ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped
* ocf_msdummy:7 (ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped
Transition Summary:
* Start DcIPaddr ( va1 ) due to no quorum (blocked)
* Start ocf_127.0.0.11 ( va1 ) due to no quorum (blocked)
* Start heartbeat_127.0.0.12 ( va1 ) due to no quorum (blocked)
* Start ocf_127.0.0.13 ( va1 ) due to no quorum (blocked)
* Start lsb_dummy ( va1 ) due to no quorum (blocked)
* Start rsc_sgi2 ( va1 ) due to no quorum (blocked)
* Start rsc_ibm1 ( va1 ) due to no quorum (blocked)
* Start rsc_va1 ( va1 ) due to no quorum (blocked)
* Start rsc_test02 ( va1 ) due to no quorum (blocked)
* Stop child_DoFencing:1 ( ibm1 ) due to node availability
* Promote ocf_msdummy:0 ( Stopped -> Promoted va1 ) blocked
* Start ocf_msdummy:1 ( va1 ) due to no quorum (blocked)
Executing Cluster Transition:
* Resource action: child_DoFencing:1 monitor on va1
* Resource action: child_DoFencing:2 monitor on va1
* Resource action: child_DoFencing:2 monitor on ibm1
* Resource action: child_DoFencing:3 monitor on va1
* Resource action: child_DoFencing:3 monitor on ibm1
* Pseudo action: DoFencing_stop_0
* Resource action: ocf_msdummy:2 monitor on va1
* Resource action: ocf_msdummy:2 monitor on ibm1
* Resource action: ocf_msdummy:3 monitor on va1
* Resource action: ocf_msdummy:3 monitor on ibm1
* Resource action: ocf_msdummy:4 monitor on va1
* Resource action: ocf_msdummy:4 monitor on ibm1
* Resource action: ocf_msdummy:5 monitor on va1
* Resource action: ocf_msdummy:5 monitor on ibm1
* Resource action: ocf_msdummy:6 monitor on va1
* Resource action: ocf_msdummy:6 monitor on ibm1
* Resource action: ocf_msdummy:7 monitor on va1
* Resource action: ocf_msdummy:7 monitor on ibm1
* Resource action: child_DoFencing:1 stop on ibm1
* Pseudo action: DoFencing_stopped_0
* Cluster action: do_shutdown on ibm1
Revised Cluster Status:
* Node List:
* Node sgi2: UNCLEAN (offline)
* Node test02: UNCLEAN (offline)
* Online: [ ibm1 va1 ]
* Full List of Resources:
* DcIPaddr (ocf:heartbeat:IPaddr): Stopped
* Resource Group: group-1:
* ocf_127.0.0.11 (ocf:heartbeat:IPaddr): Stopped
* heartbeat_127.0.0.12 (ocf:heartbeat:IPaddr): Stopped
* ocf_127.0.0.13 (ocf:heartbeat:IPaddr): Stopped
* lsb_dummy (lsb:/usr/lib64/heartbeat/cts/LSBDummy): Stopped
* rsc_sgi2 (ocf:heartbeat:IPaddr): Stopped
* rsc_ibm1 (ocf:heartbeat:IPaddr): Stopped
* rsc_va1 (ocf:heartbeat:IPaddr): Stopped
* rsc_test02 (ocf:heartbeat:IPaddr): Stopped
* Clone Set: DoFencing [child_DoFencing] (unique):
* child_DoFencing:0 (stonith:ssh): Started va1
* child_DoFencing:1 (stonith:ssh): Stopped
* child_DoFencing:2 (stonith:ssh): Stopped
* child_DoFencing:3 (stonith:ssh): Stopped
- * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique):
+ * Clone Set: master_rsc_1 [ocf_msdummy] (promotable, unique):
* ocf_msdummy:0 (ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped
* ocf_msdummy:1 (ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped
* ocf_msdummy:2 (ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped
* ocf_msdummy:3 (ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped
* ocf_msdummy:4 (ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped
* ocf_msdummy:5 (ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped
* ocf_msdummy:6 (ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped
* ocf_msdummy:7 (ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped
diff --git a/cts/scheduler/summary/promoted-asymmetrical-order.summary b/cts/scheduler/summary/promoted-asymmetrical-order.summary
index e10568e898..1e49b3084b 100644
--- a/cts/scheduler/summary/promoted-asymmetrical-order.summary
+++ b/cts/scheduler/summary/promoted-asymmetrical-order.summary
@@ -1,37 +1,37 @@
2 of 4 resource instances DISABLED and 0 BLOCKED from further action due to failure
Current cluster status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
- * Clone Set: ms1 [rsc1] (promotable) (disabled):
+ * Clone Set: ms1 [rsc1] (promotable, disabled):
* Promoted: [ node1 ]
* Unpromoted: [ node2 ]
* Clone Set: ms2 [rsc2] (promotable):
* Promoted: [ node2 ]
* Unpromoted: [ node1 ]
Transition Summary:
* Stop rsc1:0 ( Promoted node1 ) due to node availability
* Stop rsc1:1 ( Unpromoted node2 ) due to node availability
Executing Cluster Transition:
* Pseudo action: ms1_demote_0
* Resource action: rsc1:0 demote on node1
* Pseudo action: ms1_demoted_0
* Pseudo action: ms1_stop_0
* Resource action: rsc1:0 stop on node1
* Resource action: rsc1:1 stop on node2
* Pseudo action: ms1_stopped_0
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
- * Clone Set: ms1 [rsc1] (promotable) (disabled):
+ * Clone Set: ms1 [rsc1] (promotable, disabled):
* Stopped (disabled): [ node1 node2 ]
* Clone Set: ms2 [rsc2] (promotable):
* Promoted: [ node2 ]
* Unpromoted: [ node1 ]
diff --git a/cts/scheduler/summary/promoted-failed-demote-2.summary b/cts/scheduler/summary/promoted-failed-demote-2.summary
index c8504e9e1d..453b5b7c9b 100644
--- a/cts/scheduler/summary/promoted-failed-demote-2.summary
+++ b/cts/scheduler/summary/promoted-failed-demote-2.summary
@@ -1,47 +1,47 @@
Current cluster status:
* Node List:
* Online: [ dl380g5a dl380g5b ]
* Full List of Resources:
- * Clone Set: ms-sf [group] (promotable) (unique):
+ * Clone Set: ms-sf [group] (promotable, unique):
* Resource Group: group:0:
* stateful-1:0 (ocf:heartbeat:Stateful): FAILED dl380g5b
* stateful-2:0 (ocf:heartbeat:Stateful): Stopped
* Resource Group: group:1:
* stateful-1:1 (ocf:heartbeat:Stateful): Unpromoted dl380g5a
* stateful-2:1 (ocf:heartbeat:Stateful): Unpromoted dl380g5a
Transition Summary:
* Stop stateful-1:0 ( Unpromoted dl380g5b ) due to node availability
* Promote stateful-1:1 ( Unpromoted -> Promoted dl380g5a )
* Promote stateful-2:1 ( Unpromoted -> Promoted dl380g5a )
Executing Cluster Transition:
* Resource action: stateful-1:1 cancel=20000 on dl380g5a
* Resource action: stateful-2:1 cancel=20000 on dl380g5a
* Pseudo action: ms-sf_stop_0
* Pseudo action: group:0_stop_0
* Resource action: stateful-1:0 stop on dl380g5b
* Pseudo action: group:0_stopped_0
* Pseudo action: ms-sf_stopped_0
* Pseudo action: ms-sf_promote_0
* Pseudo action: group:1_promote_0
* Resource action: stateful-1:1 promote on dl380g5a
* Resource action: stateful-2:1 promote on dl380g5a
* Pseudo action: group:1_promoted_0
* Resource action: stateful-1:1 monitor=10000 on dl380g5a
* Resource action: stateful-2:1 monitor=10000 on dl380g5a
* Pseudo action: ms-sf_promoted_0
Revised Cluster Status:
* Node List:
* Online: [ dl380g5a dl380g5b ]
* Full List of Resources:
- * Clone Set: ms-sf [group] (promotable) (unique):
+ * Clone Set: ms-sf [group] (promotable, unique):
* Resource Group: group:0:
* stateful-1:0 (ocf:heartbeat:Stateful): Stopped
* stateful-2:0 (ocf:heartbeat:Stateful): Stopped
* Resource Group: group:1:
* stateful-1:1 (ocf:heartbeat:Stateful): Promoted dl380g5a
* stateful-2:1 (ocf:heartbeat:Stateful): Promoted dl380g5a
diff --git a/cts/scheduler/summary/promoted-failed-demote.summary b/cts/scheduler/summary/promoted-failed-demote.summary
index f071025528..732fba89c7 100644
--- a/cts/scheduler/summary/promoted-failed-demote.summary
+++ b/cts/scheduler/summary/promoted-failed-demote.summary
@@ -1,64 +1,64 @@
Current cluster status:
* Node List:
* Online: [ dl380g5a dl380g5b ]
* Full List of Resources:
- * Clone Set: ms-sf [group] (promotable) (unique):
+ * Clone Set: ms-sf [group] (promotable, unique):
* Resource Group: group:0:
* stateful-1:0 (ocf:heartbeat:Stateful): FAILED dl380g5b
* stateful-2:0 (ocf:heartbeat:Stateful): Stopped
* Resource Group: group:1:
* stateful-1:1 (ocf:heartbeat:Stateful): Unpromoted dl380g5a
* stateful-2:1 (ocf:heartbeat:Stateful): Unpromoted dl380g5a
Transition Summary:
* Stop stateful-1:0 ( Unpromoted dl380g5b ) due to node availability
* Promote stateful-1:1 ( Unpromoted -> Promoted dl380g5a )
* Promote stateful-2:1 ( Unpromoted -> Promoted dl380g5a )
Executing Cluster Transition:
* Resource action: stateful-1:1 cancel=20000 on dl380g5a
* Resource action: stateful-2:1 cancel=20000 on dl380g5a
* Pseudo action: ms-sf_pre_notify_stop_0
* Resource action: stateful-1:0 notify on dl380g5b
* Resource action: stateful-1:1 notify on dl380g5a
* Resource action: stateful-2:1 notify on dl380g5a
* Pseudo action: ms-sf_confirmed-pre_notify_stop_0
* Pseudo action: ms-sf_stop_0
* Pseudo action: group:0_stop_0
* Resource action: stateful-1:0 stop on dl380g5b
* Pseudo action: group:0_stopped_0
* Pseudo action: ms-sf_stopped_0
* Pseudo action: ms-sf_post_notify_stopped_0
* Resource action: stateful-1:1 notify on dl380g5a
* Resource action: stateful-2:1 notify on dl380g5a
* Pseudo action: ms-sf_confirmed-post_notify_stopped_0
* Pseudo action: ms-sf_pre_notify_promote_0
* Resource action: stateful-1:1 notify on dl380g5a
* Resource action: stateful-2:1 notify on dl380g5a
* Pseudo action: ms-sf_confirmed-pre_notify_promote_0
* Pseudo action: ms-sf_promote_0
* Pseudo action: group:1_promote_0
* Resource action: stateful-1:1 promote on dl380g5a
* Resource action: stateful-2:1 promote on dl380g5a
* Pseudo action: group:1_promoted_0
* Pseudo action: ms-sf_promoted_0
* Pseudo action: ms-sf_post_notify_promoted_0
* Resource action: stateful-1:1 notify on dl380g5a
* Resource action: stateful-2:1 notify on dl380g5a
* Pseudo action: ms-sf_confirmed-post_notify_promoted_0
* Resource action: stateful-1:1 monitor=10000 on dl380g5a
* Resource action: stateful-2:1 monitor=10000 on dl380g5a
Revised Cluster Status:
* Node List:
* Online: [ dl380g5a dl380g5b ]
* Full List of Resources:
- * Clone Set: ms-sf [group] (promotable) (unique):
+ * Clone Set: ms-sf [group] (promotable, unique):
* Resource Group: group:0:
* stateful-1:0 (ocf:heartbeat:Stateful): Stopped
* stateful-2:0 (ocf:heartbeat:Stateful): Stopped
* Resource Group: group:1:
* stateful-1:1 (ocf:heartbeat:Stateful): Promoted dl380g5a
* stateful-2:1 (ocf:heartbeat:Stateful): Promoted dl380g5a
diff --git a/cts/scheduler/summary/promoted-group.summary b/cts/scheduler/summary/promoted-group.summary
index f06047c34f..594d1b50d4 100644
--- a/cts/scheduler/summary/promoted-group.summary
+++ b/cts/scheduler/summary/promoted-group.summary
@@ -1,37 +1,37 @@
Current cluster status:
* Node List:
* Online: [ rh44-1 rh44-2 ]
* Full List of Resources:
* Resource Group: test:
* resource_1 (ocf:heartbeat:IPaddr): Started rh44-1
- * Clone Set: ms-sf [grp_ms_sf] (promotable) (unique):
+ * Clone Set: ms-sf [grp_ms_sf] (promotable, unique):
* Resource Group: grp_ms_sf:0:
* master_slave_Stateful:0 (ocf:heartbeat:Stateful): Unpromoted rh44-2
* Resource Group: grp_ms_sf:1:
* master_slave_Stateful:1 (ocf:heartbeat:Stateful): Unpromoted rh44-1
Transition Summary:
* Promote master_slave_Stateful:1 ( Unpromoted -> Promoted rh44-1 )
Executing Cluster Transition:
* Resource action: master_slave_Stateful:1 cancel=5000 on rh44-1
* Pseudo action: ms-sf_promote_0
* Pseudo action: grp_ms_sf:1_promote_0
* Resource action: master_slave_Stateful:1 promote on rh44-1
* Pseudo action: grp_ms_sf:1_promoted_0
* Resource action: master_slave_Stateful:1 monitor=6000 on rh44-1
* Pseudo action: ms-sf_promoted_0
Revised Cluster Status:
* Node List:
* Online: [ rh44-1 rh44-2 ]
* Full List of Resources:
* Resource Group: test:
* resource_1 (ocf:heartbeat:IPaddr): Started rh44-1
- * Clone Set: ms-sf [grp_ms_sf] (promotable) (unique):
+ * Clone Set: ms-sf [grp_ms_sf] (promotable, unique):
* Resource Group: grp_ms_sf:0:
* master_slave_Stateful:0 (ocf:heartbeat:Stateful): Unpromoted rh44-2
* Resource Group: grp_ms_sf:1:
* master_slave_Stateful:1 (ocf:heartbeat:Stateful): Promoted rh44-1
diff --git a/cts/scheduler/summary/promoted-reattach.summary b/cts/scheduler/summary/promoted-reattach.summary
index cf089d9324..8f07251f2e 100644
--- a/cts/scheduler/summary/promoted-reattach.summary
+++ b/cts/scheduler/summary/promoted-reattach.summary
@@ -1,34 +1,34 @@
Current cluster status:
* Node List:
* Online: [ dktest1 dktest2 ]
* Full List of Resources:
- * Clone Set: ms-drbd1 [drbd1] (promotable) (unmanaged):
+ * Clone Set: ms-drbd1 [drbd1] (promotable, unmanaged):
* drbd1 (ocf:heartbeat:drbd): Promoted dktest1 (unmanaged)
* drbd1 (ocf:heartbeat:drbd): Unpromoted dktest2 (unmanaged)
* Resource Group: apache (unmanaged):
* apache-vip (ocf:heartbeat:IPaddr2): Started dktest1 (unmanaged)
* mount (ocf:heartbeat:Filesystem): Started dktest1 (unmanaged)
* webserver (ocf:heartbeat:apache): Started dktest1 (unmanaged)
Transition Summary:
Executing Cluster Transition:
* Resource action: drbd1:0 monitor=10000 on dktest1
* Resource action: drbd1:0 monitor=11000 on dktest2
* Resource action: apache-vip monitor=60000 on dktest1
* Resource action: mount monitor=10000 on dktest1
* Resource action: webserver monitor=30000 on dktest1
Revised Cluster Status:
* Node List:
* Online: [ dktest1 dktest2 ]
* Full List of Resources:
- * Clone Set: ms-drbd1 [drbd1] (promotable) (unmanaged):
+ * Clone Set: ms-drbd1 [drbd1] (promotable, unmanaged):
* drbd1 (ocf:heartbeat:drbd): Promoted dktest1 (unmanaged)
* drbd1 (ocf:heartbeat:drbd): Unpromoted dktest2 (unmanaged)
* Resource Group: apache (unmanaged):
* apache-vip (ocf:heartbeat:IPaddr2): Started dktest1 (unmanaged)
* mount (ocf:heartbeat:Filesystem): Started dktest1 (unmanaged)
* webserver (ocf:heartbeat:apache): Started dktest1 (unmanaged)
diff --git a/cts/scheduler/summary/promoted-unmanaged-monitor.summary b/cts/scheduler/summary/promoted-unmanaged-monitor.summary
index 2b96429fad..3c5b39aa17 100644
--- a/cts/scheduler/summary/promoted-unmanaged-monitor.summary
+++ b/cts/scheduler/summary/promoted-unmanaged-monitor.summary
@@ -1,69 +1,69 @@
Current cluster status:
* Node List:
* Online: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ]
* Full List of Resources:
* Clone Set: Fencing [FencingChild] (unmanaged):
* Stopped: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ]
* Resource Group: group-1 (unmanaged):
* r192.168.122.112 (ocf:heartbeat:IPaddr): Started pcmk-3 (unmanaged)
* r192.168.122.113 (ocf:heartbeat:IPaddr): Started pcmk-3 (unmanaged)
* r192.168.122.114 (ocf:heartbeat:IPaddr): Started pcmk-3 (unmanaged)
* rsc_pcmk-1 (ocf:heartbeat:IPaddr): Started pcmk-1 (unmanaged)
* rsc_pcmk-2 (ocf:heartbeat:IPaddr): Started pcmk-2 (unmanaged)
* rsc_pcmk-3 (ocf:heartbeat:IPaddr): Started pcmk-3 (unmanaged)
* rsc_pcmk-4 (ocf:heartbeat:IPaddr): Started pcmk-4 (unmanaged)
* lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started pcmk-3 (unmanaged)
* migrator (ocf:pacemaker:Dummy): Started pcmk-4 (unmanaged)
* Clone Set: Connectivity [ping-1] (unmanaged):
* ping-1 (ocf:pacemaker:ping): Started pcmk-2 (unmanaged)
* ping-1 (ocf:pacemaker:ping): Started pcmk-3 (unmanaged)
* ping-1 (ocf:pacemaker:ping): Started pcmk-4 (unmanaged)
* ping-1 (ocf:pacemaker:ping): Started pcmk-1 (unmanaged)
- * Clone Set: master-1 [stateful-1] (promotable) (unmanaged):
+ * Clone Set: master-1 [stateful-1] (promotable, unmanaged):
* stateful-1 (ocf:pacemaker:Stateful): Unpromoted pcmk-2 (unmanaged)
* stateful-1 (ocf:pacemaker:Stateful): Promoted pcmk-3 (unmanaged)
* stateful-1 (ocf:pacemaker:Stateful): Unpromoted pcmk-4 (unmanaged)
* Stopped: [ pcmk-1 ]
Transition Summary:
Executing Cluster Transition:
* Resource action: lsb-dummy monitor=5000 on pcmk-3
* Resource action: migrator monitor=10000 on pcmk-4
* Resource action: ping-1:0 monitor=60000 on pcmk-2
* Resource action: ping-1:0 monitor=60000 on pcmk-3
* Resource action: ping-1:0 monitor=60000 on pcmk-4
* Resource action: ping-1:0 monitor=60000 on pcmk-1
* Resource action: stateful-1:0 monitor=15000 on pcmk-2
* Resource action: stateful-1:0 monitor on pcmk-1
* Resource action: stateful-1:0 monitor=16000 on pcmk-3
* Resource action: stateful-1:0 monitor=15000 on pcmk-4
Revised Cluster Status:
* Node List:
* Online: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ]
* Full List of Resources:
* Clone Set: Fencing [FencingChild] (unmanaged):
* Stopped: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ]
* Resource Group: group-1 (unmanaged):
* r192.168.122.112 (ocf:heartbeat:IPaddr): Started pcmk-3 (unmanaged)
* r192.168.122.113 (ocf:heartbeat:IPaddr): Started pcmk-3 (unmanaged)
* r192.168.122.114 (ocf:heartbeat:IPaddr): Started pcmk-3 (unmanaged)
* rsc_pcmk-1 (ocf:heartbeat:IPaddr): Started pcmk-1 (unmanaged)
* rsc_pcmk-2 (ocf:heartbeat:IPaddr): Started pcmk-2 (unmanaged)
* rsc_pcmk-3 (ocf:heartbeat:IPaddr): Started pcmk-3 (unmanaged)
* rsc_pcmk-4 (ocf:heartbeat:IPaddr): Started pcmk-4 (unmanaged)
* lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started pcmk-3 (unmanaged)
* migrator (ocf:pacemaker:Dummy): Started pcmk-4 (unmanaged)
* Clone Set: Connectivity [ping-1] (unmanaged):
* ping-1 (ocf:pacemaker:ping): Started pcmk-2 (unmanaged)
* ping-1 (ocf:pacemaker:ping): Started pcmk-3 (unmanaged)
* ping-1 (ocf:pacemaker:ping): Started pcmk-4 (unmanaged)
* ping-1 (ocf:pacemaker:ping): Started pcmk-1 (unmanaged)
- * Clone Set: master-1 [stateful-1] (promotable) (unmanaged):
+ * Clone Set: master-1 [stateful-1] (promotable, unmanaged):
* stateful-1 (ocf:pacemaker:Stateful): Unpromoted pcmk-2 (unmanaged)
* stateful-1 (ocf:pacemaker:Stateful): Promoted pcmk-3 (unmanaged)
* stateful-1 (ocf:pacemaker:Stateful): Unpromoted pcmk-4 (unmanaged)
* Stopped: [ pcmk-1 ]
diff --git a/cts/scheduler/summary/rec-node-13.summary b/cts/scheduler/summary/rec-node-13.summary
index 68210542c3..72c8e42736 100644
--- a/cts/scheduler/summary/rec-node-13.summary
+++ b/cts/scheduler/summary/rec-node-13.summary
@@ -1,80 +1,80 @@
Current cluster status:
* Node List:
* Node c001n04: UNCLEAN (online)
* Online: [ c001n02 c001n06 c001n07 ]
* OFFLINE: [ c001n03 c001n05 ]
* Full List of Resources:
* Clone Set: DoFencing [child_DoFencing]:
* Started: [ c001n02 c001n06 c001n07 ]
* Stopped: [ c001n03 c001n04 c001n05 ]
* DcIPaddr (ocf:heartbeat:IPaddr): Stopped
* Resource Group: group-1:
* ocf_192.168.100.181 (ocf:heartbeat:IPaddr): Started c001n02
* heartbeat_192.168.100.182 (ocf:heartbeat:IPaddr): Started c001n02
* ocf_192.168.100.183 (ocf:heartbeat:IPaddr): Started c001n02
* lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n06
* rsc_c001n05 (ocf:heartbeat:IPaddr): Started c001n07
* rsc_c001n03 (ocf:heartbeat:IPaddr): Started c001n06
* rsc_c001n04 (ocf:heartbeat:IPaddr): Started c001n07
* rsc_c001n02 (ocf:heartbeat:IPaddr): Started c001n02
* rsc_c001n07 (ocf:heartbeat:IPaddr): Started c001n07
* rsc_c001n06 (ocf:heartbeat:IPaddr): Started c001n06
- * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique):
+ * Clone Set: master_rsc_1 [ocf_msdummy] (promotable, unique):
* ocf_msdummy:0 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Promoted c001n02
* ocf_msdummy:1 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped
* ocf_msdummy:2 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped
* ocf_msdummy:3 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped
* ocf_msdummy:4 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n02
* ocf_msdummy:5 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped
* ocf_msdummy:6 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): FAILED c001n04
* ocf_msdummy:7 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped
* ocf_msdummy:8 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n06
* ocf_msdummy:9 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n07
* ocf_msdummy:10 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n06
* ocf_msdummy:11 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n07
Transition Summary:
* Fence (reboot) c001n04 'ocf_msdummy:6 failed there'
* Stop ocf_msdummy:6 ( Unpromoted c001n04 ) due to node availability
Executing Cluster Transition:
* Fencing c001n04 (reboot)
* Pseudo action: master_rsc_1_stop_0
* Pseudo action: ocf_msdummy:6_stop_0
* Pseudo action: master_rsc_1_stopped_0
Revised Cluster Status:
* Node List:
* Online: [ c001n02 c001n06 c001n07 ]
* OFFLINE: [ c001n03 c001n04 c001n05 ]
* Full List of Resources:
* Clone Set: DoFencing [child_DoFencing]:
* Started: [ c001n02 c001n06 c001n07 ]
* Stopped: [ c001n03 c001n04 c001n05 ]
* DcIPaddr (ocf:heartbeat:IPaddr): Stopped
* Resource Group: group-1:
* ocf_192.168.100.181 (ocf:heartbeat:IPaddr): Started c001n02
* heartbeat_192.168.100.182 (ocf:heartbeat:IPaddr): Started c001n02
* ocf_192.168.100.183 (ocf:heartbeat:IPaddr): Started c001n02
* lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n06
* rsc_c001n05 (ocf:heartbeat:IPaddr): Started c001n07
* rsc_c001n03 (ocf:heartbeat:IPaddr): Started c001n06
* rsc_c001n04 (ocf:heartbeat:IPaddr): Started c001n07
* rsc_c001n02 (ocf:heartbeat:IPaddr): Started c001n02
* rsc_c001n07 (ocf:heartbeat:IPaddr): Started c001n07
* rsc_c001n06 (ocf:heartbeat:IPaddr): Started c001n06
- * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique):
+ * Clone Set: master_rsc_1 [ocf_msdummy] (promotable, unique):
* ocf_msdummy:0 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Promoted c001n02
* ocf_msdummy:1 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped
* ocf_msdummy:2 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped
* ocf_msdummy:3 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped
* ocf_msdummy:4 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n02
* ocf_msdummy:5 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped
* ocf_msdummy:6 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped
* ocf_msdummy:7 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped
* ocf_msdummy:8 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n06
* ocf_msdummy:9 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n07
* ocf_msdummy:10 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n06
* ocf_msdummy:11 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n07
diff --git a/cts/scheduler/summary/rsc-maintenance.summary b/cts/scheduler/summary/rsc-maintenance.summary
index fed1d61e19..0b9d57ed2a 100644
--- a/cts/scheduler/summary/rsc-maintenance.summary
+++ b/cts/scheduler/summary/rsc-maintenance.summary
@@ -1,31 +1,31 @@
2 of 4 resource instances DISABLED and 0 BLOCKED from further action due to failure
Current cluster status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
- * Resource Group: group1 (unmanaged) (disabled):
+ * Resource Group: group1 (unmanaged, disabled):
* rsc1 (ocf:pacemaker:Dummy): Started node1 (disabled, unmanaged)
* rsc2 (ocf:pacemaker:Dummy): Started node1 (disabled, unmanaged)
* Resource Group: group2:
* rsc3 (ocf:pacemaker:Dummy): Started node2
* rsc4 (ocf:pacemaker:Dummy): Started node2
Transition Summary:
Executing Cluster Transition:
* Resource action: rsc1 cancel=10000 on node1
* Resource action: rsc2 cancel=10000 on node1
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
- * Resource Group: group1 (unmanaged) (disabled):
+ * Resource Group: group1 (unmanaged, disabled):
* rsc1 (ocf:pacemaker:Dummy): Started node1 (disabled, unmanaged)
* rsc2 (ocf:pacemaker:Dummy): Started node1 (disabled, unmanaged)
* Resource Group: group2:
* rsc3 (ocf:pacemaker:Dummy): Started node2
* rsc4 (ocf:pacemaker:Dummy): Started node2
diff --git a/cts/scheduler/summary/stonith-0.summary b/cts/scheduler/summary/stonith-0.summary
index 5b829bf06d..f9745bd642 100644
--- a/cts/scheduler/summary/stonith-0.summary
+++ b/cts/scheduler/summary/stonith-0.summary
@@ -1,111 +1,111 @@
Current cluster status:
* Node List:
* Node c001n03: UNCLEAN (online)
* Node c001n05: UNCLEAN (online)
* Online: [ c001n02 c001n04 c001n06 c001n07 c001n08 ]
* Full List of Resources:
* DcIPaddr (ocf:heartbeat:IPaddr): Stopped
* Resource Group: group-1:
* ocf_192.168.100.181 (ocf:heartbeat:IPaddr): Started [ c001n03 c001n05 ]
* heartbeat_192.168.100.182 (ocf:heartbeat:IPaddr): Started c001n03
* ocf_192.168.100.183 (ocf:heartbeat:IPaddr): FAILED [ c001n03 c001n05 ]
* lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n04
* rsc_c001n03 (ocf:heartbeat:IPaddr): Started c001n06
* rsc_c001n02 (ocf:heartbeat:IPaddr): Started c001n02
* rsc_c001n04 (ocf:heartbeat:IPaddr): Started c001n04
* rsc_c001n05 (ocf:heartbeat:IPaddr): Started c001n05
* rsc_c001n06 (ocf:heartbeat:IPaddr): Started c001n06
* rsc_c001n07 (ocf:heartbeat:IPaddr): Started c001n03
* rsc_c001n08 (ocf:heartbeat:IPaddr): Started c001n08
* Clone Set: DoFencing [child_DoFencing]:
* Started: [ c001n02 c001n04 c001n06 c001n07 c001n08 ]
* Stopped: [ c001n03 c001n05 ]
- * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique):
+ * Clone Set: master_rsc_1 [ocf_msdummy] (promotable, unique):
* ocf_msdummy:0 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Promoted c001n02
* ocf_msdummy:1 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n02
* ocf_msdummy:2 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n07
* ocf_msdummy:3 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n07
* ocf_msdummy:4 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n08
* ocf_msdummy:5 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n08
* ocf_msdummy:6 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped
* ocf_msdummy:7 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped
* ocf_msdummy:8 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped
* ocf_msdummy:9 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped
* ocf_msdummy:10 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n04
* ocf_msdummy:11 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n04
* ocf_msdummy:12 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n06
* ocf_msdummy:13 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n06
Transition Summary:
* Fence (reboot) c001n05 'ocf_192.168.100.183 failed there'
* Fence (reboot) c001n03 'ocf_192.168.100.183 failed there'
* Move ocf_192.168.100.181 ( c001n03 -> c001n02 )
* Move heartbeat_192.168.100.182 ( c001n03 -> c001n02 )
* Recover ocf_192.168.100.183 ( c001n03 -> c001n02 )
* Move rsc_c001n05 ( c001n05 -> c001n07 )
* Move rsc_c001n07 ( c001n03 -> c001n07 )
Executing Cluster Transition:
* Resource action: child_DoFencing:4 monitor=20000 on c001n08
* Fencing c001n05 (reboot)
* Fencing c001n03 (reboot)
* Pseudo action: group-1_stop_0
* Pseudo action: ocf_192.168.100.183_stop_0
* Pseudo action: ocf_192.168.100.183_stop_0
* Pseudo action: rsc_c001n05_stop_0
* Pseudo action: rsc_c001n07_stop_0
* Pseudo action: heartbeat_192.168.100.182_stop_0
* Resource action: rsc_c001n05 start on c001n07
* Resource action: rsc_c001n07 start on c001n07
* Pseudo action: ocf_192.168.100.181_stop_0
* Pseudo action: ocf_192.168.100.181_stop_0
* Resource action: rsc_c001n05 monitor=5000 on c001n07
* Resource action: rsc_c001n07 monitor=5000 on c001n07
* Pseudo action: group-1_stopped_0
* Pseudo action: group-1_start_0
* Resource action: ocf_192.168.100.181 start on c001n02
* Resource action: heartbeat_192.168.100.182 start on c001n02
* Resource action: ocf_192.168.100.183 start on c001n02
* Pseudo action: group-1_running_0
* Resource action: ocf_192.168.100.181 monitor=5000 on c001n02
* Resource action: heartbeat_192.168.100.182 monitor=5000 on c001n02
* Resource action: ocf_192.168.100.183 monitor=5000 on c001n02
Revised Cluster Status:
* Node List:
* Online: [ c001n02 c001n04 c001n06 c001n07 c001n08 ]
* OFFLINE: [ c001n03 c001n05 ]
* Full List of Resources:
* DcIPaddr (ocf:heartbeat:IPaddr): Stopped
* Resource Group: group-1:
* ocf_192.168.100.181 (ocf:heartbeat:IPaddr): Started c001n02
* heartbeat_192.168.100.182 (ocf:heartbeat:IPaddr): Started c001n02
* ocf_192.168.100.183 (ocf:heartbeat:IPaddr): Started c001n02
* lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n04
* rsc_c001n03 (ocf:heartbeat:IPaddr): Started c001n06
* rsc_c001n02 (ocf:heartbeat:IPaddr): Started c001n02
* rsc_c001n04 (ocf:heartbeat:IPaddr): Started c001n04
* rsc_c001n05 (ocf:heartbeat:IPaddr): Started c001n07
* rsc_c001n06 (ocf:heartbeat:IPaddr): Started c001n06
* rsc_c001n07 (ocf:heartbeat:IPaddr): Started c001n07
* rsc_c001n08 (ocf:heartbeat:IPaddr): Started c001n08
* Clone Set: DoFencing [child_DoFencing]:
* Started: [ c001n02 c001n04 c001n06 c001n07 c001n08 ]
* Stopped: [ c001n03 c001n05 ]
- * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique):
+ * Clone Set: master_rsc_1 [ocf_msdummy] (promotable, unique):
* ocf_msdummy:0 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Promoted c001n02
* ocf_msdummy:1 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n02
* ocf_msdummy:2 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n07
* ocf_msdummy:3 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n07
* ocf_msdummy:4 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n08
* ocf_msdummy:5 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n08
* ocf_msdummy:6 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped
* ocf_msdummy:7 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped
* ocf_msdummy:8 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped
* ocf_msdummy:9 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped
* ocf_msdummy:10 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n04
* ocf_msdummy:11 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n04
* ocf_msdummy:12 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n06
* ocf_msdummy:13 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n06
diff --git a/cts/scheduler/summary/stonith-1.summary b/cts/scheduler/summary/stonith-1.summary
index 31d2f57ce8..29b979cacc 100644
--- a/cts/scheduler/summary/stonith-1.summary
+++ b/cts/scheduler/summary/stonith-1.summary
@@ -1,113 +1,113 @@
Current cluster status:
* Node List:
* Node sles-3: UNCLEAN (offline)
* Online: [ sles-1 sles-2 sles-4 ]
* Full List of Resources:
* Resource Group: group-1:
* r192.168.100.181 (ocf:heartbeat:IPaddr): Started sles-1
* r192.168.100.182 (ocf:heartbeat:IPaddr): Started sles-1
* r192.168.100.183 (ocf:heartbeat:IPaddr): Stopped
* lsb_dummy (lsb:/usr/lib64/heartbeat/cts/LSBDummy): Started sles-2
* migrator (ocf:heartbeat:Dummy): Started sles-3 (UNCLEAN)
* rsc_sles-1 (ocf:heartbeat:IPaddr): Started sles-1
* rsc_sles-2 (ocf:heartbeat:IPaddr): Started sles-2
* rsc_sles-3 (ocf:heartbeat:IPaddr): Started sles-3 (UNCLEAN)
* rsc_sles-4 (ocf:heartbeat:IPaddr): Started sles-4
* Clone Set: DoFencing [child_DoFencing]:
* child_DoFencing (stonith:external/vmware): Started sles-3 (UNCLEAN)
* Started: [ sles-1 sles-2 ]
* Stopped: [ sles-4 ]
- * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique):
+ * Clone Set: master_rsc_1 [ocf_msdummy] (promotable, unique):
* ocf_msdummy:0 (ocf:heartbeat:Stateful): Stopped
* ocf_msdummy:1 (ocf:heartbeat:Stateful): Stopped
* ocf_msdummy:2 (ocf:heartbeat:Stateful): Unpromoted sles-3 (UNCLEAN)
* ocf_msdummy:3 (ocf:heartbeat:Stateful): Stopped
* ocf_msdummy:4 (ocf:heartbeat:Stateful): Stopped
* ocf_msdummy:5 (ocf:heartbeat:Stateful): Unpromoted sles-3 (UNCLEAN)
* ocf_msdummy:6 (ocf:heartbeat:Stateful): Stopped
* ocf_msdummy:7 (ocf:heartbeat:Stateful): Stopped
Transition Summary:
* Fence (reboot) sles-3 'peer is no longer part of the cluster'
* Start r192.168.100.183 ( sles-1 )
* Move migrator ( sles-3 -> sles-4 )
* Move rsc_sles-3 ( sles-3 -> sles-4 )
* Move child_DoFencing:2 ( sles-3 -> sles-4 )
* Start ocf_msdummy:0 ( sles-4 )
* Start ocf_msdummy:1 ( sles-1 )
* Move ocf_msdummy:2 ( sles-3 -> sles-2 Unpromoted )
* Start ocf_msdummy:3 ( sles-4 )
* Start ocf_msdummy:4 ( sles-1 )
* Move ocf_msdummy:5 ( sles-3 -> sles-2 Unpromoted )
Executing Cluster Transition:
* Pseudo action: group-1_start_0
* Resource action: r192.168.100.182 monitor=5000 on sles-1
* Resource action: lsb_dummy monitor=5000 on sles-2
* Resource action: rsc_sles-2 monitor=5000 on sles-2
* Resource action: rsc_sles-4 monitor=5000 on sles-4
* Pseudo action: DoFencing_stop_0
* Fencing sles-3 (reboot)
* Resource action: r192.168.100.183 start on sles-1
* Pseudo action: migrator_stop_0
* Pseudo action: rsc_sles-3_stop_0
* Pseudo action: child_DoFencing:2_stop_0
* Pseudo action: DoFencing_stopped_0
* Pseudo action: DoFencing_start_0
* Pseudo action: master_rsc_1_stop_0
* Pseudo action: group-1_running_0
* Resource action: r192.168.100.183 monitor=5000 on sles-1
* Resource action: migrator start on sles-4
* Resource action: rsc_sles-3 start on sles-4
* Resource action: child_DoFencing:2 start on sles-4
* Pseudo action: DoFencing_running_0
* Pseudo action: ocf_msdummy:2_stop_0
* Pseudo action: ocf_msdummy:5_stop_0
* Pseudo action: master_rsc_1_stopped_0
* Pseudo action: master_rsc_1_start_0
* Resource action: migrator monitor=10000 on sles-4
* Resource action: rsc_sles-3 monitor=5000 on sles-4
* Resource action: child_DoFencing:2 monitor=60000 on sles-4
* Resource action: ocf_msdummy:0 start on sles-4
* Resource action: ocf_msdummy:1 start on sles-1
* Resource action: ocf_msdummy:2 start on sles-2
* Resource action: ocf_msdummy:3 start on sles-4
* Resource action: ocf_msdummy:4 start on sles-1
* Resource action: ocf_msdummy:5 start on sles-2
* Pseudo action: master_rsc_1_running_0
* Resource action: ocf_msdummy:0 monitor=5000 on sles-4
* Resource action: ocf_msdummy:1 monitor=5000 on sles-1
* Resource action: ocf_msdummy:2 monitor=5000 on sles-2
* Resource action: ocf_msdummy:3 monitor=5000 on sles-4
* Resource action: ocf_msdummy:4 monitor=5000 on sles-1
* Resource action: ocf_msdummy:5 monitor=5000 on sles-2
Revised Cluster Status:
* Node List:
* Online: [ sles-1 sles-2 sles-4 ]
* OFFLINE: [ sles-3 ]
* Full List of Resources:
* Resource Group: group-1:
* r192.168.100.181 (ocf:heartbeat:IPaddr): Started sles-1
* r192.168.100.182 (ocf:heartbeat:IPaddr): Started sles-1
* r192.168.100.183 (ocf:heartbeat:IPaddr): Started sles-1
* lsb_dummy (lsb:/usr/lib64/heartbeat/cts/LSBDummy): Started sles-2
* migrator (ocf:heartbeat:Dummy): Started sles-4
* rsc_sles-1 (ocf:heartbeat:IPaddr): Started sles-1
* rsc_sles-2 (ocf:heartbeat:IPaddr): Started sles-2
* rsc_sles-3 (ocf:heartbeat:IPaddr): Started sles-4
* rsc_sles-4 (ocf:heartbeat:IPaddr): Started sles-4
* Clone Set: DoFencing [child_DoFencing]:
* Started: [ sles-1 sles-2 sles-4 ]
* Stopped: [ sles-3 ]
- * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique):
+ * Clone Set: master_rsc_1 [ocf_msdummy] (promotable, unique):
* ocf_msdummy:0 (ocf:heartbeat:Stateful): Unpromoted sles-4
* ocf_msdummy:1 (ocf:heartbeat:Stateful): Unpromoted sles-1
* ocf_msdummy:2 (ocf:heartbeat:Stateful): Unpromoted sles-2
* ocf_msdummy:3 (ocf:heartbeat:Stateful): Unpromoted sles-4
* ocf_msdummy:4 (ocf:heartbeat:Stateful): Unpromoted sles-1
* ocf_msdummy:5 (ocf:heartbeat:Stateful): Unpromoted sles-2
* ocf_msdummy:6 (ocf:heartbeat:Stateful): Stopped
* ocf_msdummy:7 (ocf:heartbeat:Stateful): Stopped
diff --git a/cts/scheduler/summary/stonith-2.summary b/cts/scheduler/summary/stonith-2.summary
index 9fd7c65e59..c6f657193b 100644
--- a/cts/scheduler/summary/stonith-2.summary
+++ b/cts/scheduler/summary/stonith-2.summary
@@ -1,78 +1,78 @@
Current cluster status:
* Node List:
* Node sles-5: UNCLEAN (offline)
* Online: [ sles-1 sles-2 sles-3 sles-4 sles-6 ]
* Full List of Resources:
* Resource Group: group-1:
* r192.168.100.181 (ocf:heartbeat:IPaddr): Started sles-1
* r192.168.100.182 (ocf:heartbeat:IPaddr): Started sles-1
* r192.168.100.183 (ocf:heartbeat:IPaddr): Started sles-1
* lsb_dummy (lsb:/usr/share/heartbeat/cts/LSBDummy): Started sles-2
* migrator (ocf:heartbeat:Dummy): Started sles-3
* rsc_sles-1 (ocf:heartbeat:IPaddr): Started sles-1
* rsc_sles-2 (ocf:heartbeat:IPaddr): Started sles-2
* rsc_sles-3 (ocf:heartbeat:IPaddr): Started sles-3
* rsc_sles-4 (ocf:heartbeat:IPaddr): Started sles-4
* rsc_sles-5 (ocf:heartbeat:IPaddr): Stopped
* rsc_sles-6 (ocf:heartbeat:IPaddr): Started sles-6
* Clone Set: DoFencing [child_DoFencing]:
* Started: [ sles-1 sles-2 sles-3 sles-4 sles-6 ]
* Stopped: [ sles-5 ]
- * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique):
+ * Clone Set: master_rsc_1 [ocf_msdummy] (promotable, unique):
* ocf_msdummy:0 (ocf:heartbeat:Stateful): Unpromoted sles-3
* ocf_msdummy:1 (ocf:heartbeat:Stateful): Unpromoted sles-4
* ocf_msdummy:2 (ocf:heartbeat:Stateful): Unpromoted sles-4
* ocf_msdummy:3 (ocf:heartbeat:Stateful): Unpromoted sles-1
* ocf_msdummy:4 (ocf:heartbeat:Stateful): Unpromoted sles-2
* ocf_msdummy:5 (ocf:heartbeat:Stateful): Unpromoted sles-1
* ocf_msdummy:6 (ocf:heartbeat:Stateful): Stopped
* ocf_msdummy:7 (ocf:heartbeat:Stateful): Stopped
* ocf_msdummy:8 (ocf:heartbeat:Stateful): Unpromoted sles-6
* ocf_msdummy:9 (ocf:heartbeat:Stateful): Unpromoted sles-6
* ocf_msdummy:10 (ocf:heartbeat:Stateful): Unpromoted sles-2
* ocf_msdummy:11 (ocf:heartbeat:Stateful): Unpromoted sles-3
Transition Summary:
* Fence (reboot) sles-5 'peer is no longer part of the cluster'
* Start rsc_sles-5 ( sles-6 )
Executing Cluster Transition:
* Fencing sles-5 (reboot)
* Resource action: rsc_sles-5 start on sles-6
* Resource action: rsc_sles-5 monitor=5000 on sles-6
Revised Cluster Status:
* Node List:
* Online: [ sles-1 sles-2 sles-3 sles-4 sles-6 ]
* OFFLINE: [ sles-5 ]
* Full List of Resources:
* Resource Group: group-1:
* r192.168.100.181 (ocf:heartbeat:IPaddr): Started sles-1
* r192.168.100.182 (ocf:heartbeat:IPaddr): Started sles-1
* r192.168.100.183 (ocf:heartbeat:IPaddr): Started sles-1
* lsb_dummy (lsb:/usr/share/heartbeat/cts/LSBDummy): Started sles-2
* migrator (ocf:heartbeat:Dummy): Started sles-3
* rsc_sles-1 (ocf:heartbeat:IPaddr): Started sles-1
* rsc_sles-2 (ocf:heartbeat:IPaddr): Started sles-2
* rsc_sles-3 (ocf:heartbeat:IPaddr): Started sles-3
* rsc_sles-4 (ocf:heartbeat:IPaddr): Started sles-4
* rsc_sles-5 (ocf:heartbeat:IPaddr): Started sles-6
* rsc_sles-6 (ocf:heartbeat:IPaddr): Started sles-6
* Clone Set: DoFencing [child_DoFencing]:
* Started: [ sles-1 sles-2 sles-3 sles-4 sles-6 ]
* Stopped: [ sles-5 ]
- * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique):
+ * Clone Set: master_rsc_1 [ocf_msdummy] (promotable, unique):
* ocf_msdummy:0 (ocf:heartbeat:Stateful): Unpromoted sles-3
* ocf_msdummy:1 (ocf:heartbeat:Stateful): Unpromoted sles-4
* ocf_msdummy:2 (ocf:heartbeat:Stateful): Unpromoted sles-4
* ocf_msdummy:3 (ocf:heartbeat:Stateful): Unpromoted sles-1
* ocf_msdummy:4 (ocf:heartbeat:Stateful): Unpromoted sles-2
* ocf_msdummy:5 (ocf:heartbeat:Stateful): Unpromoted sles-1
* ocf_msdummy:6 (ocf:heartbeat:Stateful): Stopped
* ocf_msdummy:7 (ocf:heartbeat:Stateful): Stopped
* ocf_msdummy:8 (ocf:heartbeat:Stateful): Unpromoted sles-6
* ocf_msdummy:9 (ocf:heartbeat:Stateful): Unpromoted sles-6
* ocf_msdummy:10 (ocf:heartbeat:Stateful): Unpromoted sles-2
* ocf_msdummy:11 (ocf:heartbeat:Stateful): Unpromoted sles-3
diff --git a/cts/scheduler/summary/unmanaged-promoted.summary b/cts/scheduler/summary/unmanaged-promoted.summary
index bdaac99618..33c0a4127f 100644
--- a/cts/scheduler/summary/unmanaged-promoted.summary
+++ b/cts/scheduler/summary/unmanaged-promoted.summary
@@ -1,63 +1,63 @@
Current cluster status:
* Node List:
* Online: [ pcmk-1 pcmk-2 ]
* OFFLINE: [ pcmk-3 pcmk-4 ]
* Full List of Resources:
* Clone Set: Fencing [FencingChild] (unmanaged):
* FencingChild (stonith:fence_xvm): Started pcmk-2 (unmanaged)
* FencingChild (stonith:fence_xvm): Started pcmk-1 (unmanaged)
* Stopped: [ pcmk-3 pcmk-4 ]
* Resource Group: group-1 (unmanaged):
* r192.168.122.126 (ocf:heartbeat:IPaddr): Started pcmk-2 (unmanaged)
* r192.168.122.127 (ocf:heartbeat:IPaddr): Started pcmk-2 (unmanaged)
* r192.168.122.128 (ocf:heartbeat:IPaddr): Started pcmk-2 (unmanaged)
* rsc_pcmk-1 (ocf:heartbeat:IPaddr): Started pcmk-1 (unmanaged)
* rsc_pcmk-2 (ocf:heartbeat:IPaddr): Started pcmk-2 (unmanaged)
* rsc_pcmk-3 (ocf:heartbeat:IPaddr): Started pcmk-3 (unmanaged)
* rsc_pcmk-4 (ocf:heartbeat:IPaddr): Started pcmk-4 (unmanaged)
* lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started pcmk-2 (unmanaged)
* migrator (ocf:pacemaker:Dummy): Started pcmk-4 (unmanaged)
* Clone Set: Connectivity [ping-1] (unmanaged):
* ping-1 (ocf:pacemaker:ping): Started pcmk-2 (unmanaged)
* ping-1 (ocf:pacemaker:ping): Started pcmk-1 (unmanaged)
* Stopped: [ pcmk-3 pcmk-4 ]
- * Clone Set: master-1 [stateful-1] (promotable) (unmanaged):
+ * Clone Set: master-1 [stateful-1] (promotable, unmanaged):
* stateful-1 (ocf:pacemaker:Stateful): Promoted pcmk-2 (unmanaged)
* stateful-1 (ocf:pacemaker:Stateful): Unpromoted pcmk-1 (unmanaged)
* Stopped: [ pcmk-3 pcmk-4 ]
Transition Summary:
Executing Cluster Transition:
* Cluster action: do_shutdown on pcmk-2
* Cluster action: do_shutdown on pcmk-1
Revised Cluster Status:
* Node List:
* Online: [ pcmk-1 pcmk-2 ]
* OFFLINE: [ pcmk-3 pcmk-4 ]
* Full List of Resources:
* Clone Set: Fencing [FencingChild] (unmanaged):
* FencingChild (stonith:fence_xvm): Started pcmk-2 (unmanaged)
* FencingChild (stonith:fence_xvm): Started pcmk-1 (unmanaged)
* Stopped: [ pcmk-3 pcmk-4 ]
* Resource Group: group-1 (unmanaged):
* r192.168.122.126 (ocf:heartbeat:IPaddr): Started pcmk-2 (unmanaged)
* r192.168.122.127 (ocf:heartbeat:IPaddr): Started pcmk-2 (unmanaged)
* r192.168.122.128 (ocf:heartbeat:IPaddr): Started pcmk-2 (unmanaged)
* rsc_pcmk-1 (ocf:heartbeat:IPaddr): Started pcmk-1 (unmanaged)
* rsc_pcmk-2 (ocf:heartbeat:IPaddr): Started pcmk-2 (unmanaged)
* rsc_pcmk-3 (ocf:heartbeat:IPaddr): Started pcmk-3 (unmanaged)
* rsc_pcmk-4 (ocf:heartbeat:IPaddr): Started pcmk-4 (unmanaged)
* lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started pcmk-2 (unmanaged)
* migrator (ocf:pacemaker:Dummy): Started pcmk-4 (unmanaged)
* Clone Set: Connectivity [ping-1] (unmanaged):
* ping-1 (ocf:pacemaker:ping): Started pcmk-2 (unmanaged)
* ping-1 (ocf:pacemaker:ping): Started pcmk-1 (unmanaged)
* Stopped: [ pcmk-3 pcmk-4 ]
- * Clone Set: master-1 [stateful-1] (promotable) (unmanaged):
+ * Clone Set: master-1 [stateful-1] (promotable, unmanaged):
* stateful-1 (ocf:pacemaker:Stateful): Promoted pcmk-2 (unmanaged)
* stateful-1 (ocf:pacemaker:Stateful): Unpromoted pcmk-1 (unmanaged)
* Stopped: [ pcmk-3 pcmk-4 ]
diff --git a/lib/pengine/clone.c b/lib/pengine/clone.c
index 1653dcc806..6323692b6f 100644
--- a/lib/pengine/clone.c
+++ b/lib/pengine/clone.c
@@ -1,1007 +1,1043 @@
/*
* Copyright 2004-2021 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#define VARIANT_CLONE 1
#include "./variant.h"
#ifdef PCMK__COMPAT_2_0
#define PROMOTED_INSTANCES RSC_ROLE_PROMOTED_LEGACY_S "s"
#define UNPROMOTED_INSTANCES RSC_ROLE_UNPROMOTED_LEGACY_S "s"
#else
#define PROMOTED_INSTANCES RSC_ROLE_PROMOTED_S
#define UNPROMOTED_INSTANCES RSC_ROLE_UNPROMOTED_S
#endif
+static void
+clone_header(pcmk__output_t *out, int *rc, pe_resource_t *rsc, clone_variant_data_t *clone_data)
+{
+ char *attrs = NULL;
+ size_t len = 0;
+
+ if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
+ pcmk__add_separated_word(&attrs, &len, "promotable", ", ");
+ }
+
+ if (pcmk_is_set(rsc->flags, pe_rsc_unique)) {
+ pcmk__add_separated_word(&attrs, &len, "unique", ", ");
+ }
+
+ if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ pcmk__add_separated_word(&attrs, &len, "unmanaged", ", ");
+ }
+
+ if (pe__resource_is_disabled(rsc)) {
+ pcmk__add_separated_word(&attrs, &len, "disabled", ", ");
+ }
+
+ if (attrs) {
+ PCMK__OUTPUT_LIST_HEADER(out, FALSE, *rc, "Clone Set: %s [%s] (%s)",
+ rsc->id, ID(clone_data->xml_obj_child),
+ attrs);
+ free(attrs);
+ } else {
+ PCMK__OUTPUT_LIST_HEADER(out, FALSE, *rc, "Clone Set: %s [%s]",
+ rsc->id, ID(clone_data->xml_obj_child))
+ }
+}
+
void
pe__force_anon(const char *standard, pe_resource_t *rsc, const char *rid,
pe_working_set_t *data_set)
{
if (pe_rsc_is_clone(rsc)) {
clone_variant_data_t *clone_data = NULL;
get_clone_variant_data(clone_data, rsc);
pe_warn("Ignoring " XML_RSC_ATTR_UNIQUE " for %s because %s resources "
"such as %s can be used only as anonymous clones",
rsc->id, standard, rid);
clone_data->clone_node_max = 1;
clone_data->clone_max = QB_MIN(clone_data->clone_max,
g_list_length(data_set->nodes));
}
}
pe_resource_t *
find_clone_instance(pe_resource_t * rsc, const char *sub_id, pe_working_set_t * data_set)
{
char *child_id = NULL;
pe_resource_t *child = NULL;
const char *child_base = NULL;
clone_variant_data_t *clone_data = NULL;
get_clone_variant_data(clone_data, rsc);
child_base = ID(clone_data->xml_obj_child);
child_id = crm_strdup_printf("%s:%s", child_base, sub_id);
child = pe_find_resource(rsc->children, child_id);
free(child_id);
return child;
}
pe_resource_t *
pe__create_clone_child(pe_resource_t *rsc, pe_working_set_t *data_set)
{
gboolean as_orphan = FALSE;
char *inc_num = NULL;
char *inc_max = NULL;
pe_resource_t *child_rsc = NULL;
xmlNode *child_copy = NULL;
clone_variant_data_t *clone_data = NULL;
get_clone_variant_data(clone_data, rsc);
CRM_CHECK(clone_data->xml_obj_child != NULL, return FALSE);
if (clone_data->total_clones >= clone_data->clone_max) {
// If we've already used all available instances, this is an orphan
as_orphan = TRUE;
}
// Allocate instance numbers in numerical order (starting at 0)
inc_num = pcmk__itoa(clone_data->total_clones);
inc_max = pcmk__itoa(clone_data->clone_max);
child_copy = copy_xml(clone_data->xml_obj_child);
crm_xml_add(child_copy, XML_RSC_ATTR_INCARNATION, inc_num);
if (common_unpack(child_copy, &child_rsc, rsc, data_set) == FALSE) {
pe_err("Failed unpacking resource %s", crm_element_value(child_copy, XML_ATTR_ID));
child_rsc = NULL;
goto bail;
}
/* child_rsc->globally_unique = rsc->globally_unique; */
CRM_ASSERT(child_rsc);
clone_data->total_clones += 1;
pe_rsc_trace(child_rsc, "Setting clone attributes for: %s", child_rsc->id);
rsc->children = g_list_append(rsc->children, child_rsc);
if (as_orphan) {
pe__set_resource_flags_recursive(child_rsc, pe_rsc_orphan);
}
add_hash_param(child_rsc->meta, XML_RSC_ATTR_INCARNATION_MAX, inc_max);
pe_rsc_trace(rsc, "Added %s instance %s", rsc->id, child_rsc->id);
bail:
free(inc_num);
free(inc_max);
return child_rsc;
}
gboolean
clone_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
{
int lpc = 0;
xmlNode *a_child = NULL;
xmlNode *xml_obj = rsc->xml;
clone_variant_data_t *clone_data = NULL;
const char *ordered = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_ORDERED);
const char *max_clones = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION_MAX);
const char *max_clones_node = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION_NODEMAX);
pe_rsc_trace(rsc, "Processing resource %s...", rsc->id);
clone_data = calloc(1, sizeof(clone_variant_data_t));
rsc->variant_opaque = clone_data;
if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
const char *promoted_max = NULL;
const char *promoted_node_max = NULL;
promoted_max = g_hash_table_lookup(rsc->meta,
XML_RSC_ATTR_PROMOTED_MAX);
if (promoted_max == NULL) {
// @COMPAT deprecated since 2.0.0
promoted_max = g_hash_table_lookup(rsc->meta,
PCMK_XE_PROMOTED_MAX_LEGACY);
}
promoted_node_max = g_hash_table_lookup(rsc->meta,
XML_RSC_ATTR_PROMOTED_NODEMAX);
if (promoted_node_max == NULL) {
// @COMPAT deprecated since 2.0.0
promoted_node_max = g_hash_table_lookup(rsc->meta,
PCMK_XE_PROMOTED_NODE_MAX_LEGACY);
}
// Use 1 as default but 0 for minimum and invalid
if (promoted_max == NULL) {
clone_data->promoted_max = 1;
} else {
pcmk__scan_min_int(promoted_max, &(clone_data->promoted_max), 0);
}
// Use 1 as default but 0 for minimum and invalid
if (promoted_node_max == NULL) {
clone_data->promoted_node_max = 1;
} else {
pcmk__scan_min_int(promoted_node_max,
&(clone_data->promoted_node_max), 0);
}
}
// Implied by calloc()
/* clone_data->xml_obj_child = NULL; */
// Use 1 as default but 0 for minimum and invalid
if (max_clones_node == NULL) {
clone_data->clone_node_max = 1;
} else {
pcmk__scan_min_int(max_clones_node, &(clone_data->clone_node_max), 0);
}
/* Use number of nodes (but always at least 1, which is handy for crm_verify
* for a CIB without nodes) as default, but 0 for minimum and invalid
*/
if (max_clones == NULL) {
clone_data->clone_max = QB_MAX(1, g_list_length(data_set->nodes));
} else {
pcmk__scan_min_int(max_clones, &(clone_data->clone_max), 0);
}
clone_data->ordered = crm_is_true(ordered);
if ((rsc->flags & pe_rsc_unique) == 0 && clone_data->clone_node_max > 1) {
pcmk__config_err("Ignoring " XML_RSC_ATTR_PROMOTED_MAX " for %s "
"because anonymous clones support only one instance "
"per node", rsc->id);
clone_data->clone_node_max = 1;
}
pe_rsc_trace(rsc, "Options for %s", rsc->id);
pe_rsc_trace(rsc, "\tClone max: %d", clone_data->clone_max);
pe_rsc_trace(rsc, "\tClone node max: %d", clone_data->clone_node_max);
pe_rsc_trace(rsc, "\tClone is unique: %s",
pe__rsc_bool_str(rsc, pe_rsc_unique));
pe_rsc_trace(rsc, "\tClone is promotable: %s",
pe__rsc_bool_str(rsc, pe_rsc_promotable));
// Clones may contain a single group or primitive
for (a_child = pcmk__xe_first_child(xml_obj); a_child != NULL;
a_child = pcmk__xe_next(a_child)) {
if (pcmk__str_any_of((const char *)a_child->name, XML_CIB_TAG_RESOURCE, XML_CIB_TAG_GROUP, NULL)) {
clone_data->xml_obj_child = a_child;
break;
}
}
if (clone_data->xml_obj_child == NULL) {
pcmk__config_err("%s has nothing to clone", rsc->id);
return FALSE;
}
/*
* Make clones ever so slightly sticky by default
*
* This helps ensure clone instances are not shuffled around the cluster
* for no benefit in situations when pre-allocation is not appropriate
*/
if (g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_STICKINESS) == NULL) {
add_hash_param(rsc->meta, XML_RSC_ATTR_STICKINESS, "1");
}
/* This ensures that the globally-unique value always exists for children to
* inherit when being unpacked, as well as in resource agents' environment.
*/
add_hash_param(rsc->meta, XML_RSC_ATTR_UNIQUE,
pe__rsc_bool_str(rsc, pe_rsc_unique));
if (clone_data->clone_max <= 0) {
/* Create one child instance so that unpack_find_resource() will hook up
* any orphans up to the parent correctly.
*/
if (pe__create_clone_child(rsc, data_set) == NULL) {
return FALSE;
}
} else {
// Create a child instance for each available instance number
for (lpc = 0; lpc < clone_data->clone_max; lpc++) {
if (pe__create_clone_child(rsc, data_set) == NULL) {
return FALSE;
}
}
}
pe_rsc_trace(rsc, "Added %d children to resource %s...", clone_data->clone_max, rsc->id);
return TRUE;
}
gboolean
clone_active(pe_resource_t * rsc, gboolean all)
{
GList *gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
gboolean child_active = child_rsc->fns->active(child_rsc, all);
if (all == FALSE && child_active) {
return TRUE;
} else if (all && child_active == FALSE) {
return FALSE;
}
}
if (all) {
return TRUE;
} else {
return FALSE;
}
}
static void
short_print(char *list, const char *prefix, const char *type, const char *suffix, long options, void *print_data)
{
if(suffix == NULL) {
suffix = "";
}
if (list) {
if (options & pe_print_html) {
status_print("");
}
status_print("%s%s: [ %s ]%s", prefix, type, list, suffix);
if (options & pe_print_html) {
status_print("\n");
} else if (options & pe_print_suppres_nl) {
/* nothing */
} else if ((options & pe_print_printf) || (options & pe_print_ncurses)) {
status_print("\n");
}
}
}
static const char *
configured_role_str(pe_resource_t * rsc)
{
const char *target_role = g_hash_table_lookup(rsc->meta,
XML_RSC_ATTR_TARGET_ROLE);
if ((target_role == NULL) && rsc->children && rsc->children->data) {
target_role = g_hash_table_lookup(((pe_resource_t*)rsc->children->data)->meta,
XML_RSC_ATTR_TARGET_ROLE);
}
return target_role;
}
static enum rsc_role_e
configured_role(pe_resource_t * rsc)
{
const char *target_role = configured_role_str(rsc);
if (target_role) {
return text2role(target_role);
}
return RSC_ROLE_UNKNOWN;
}
static void
clone_print_xml(pe_resource_t * rsc, const char *pre_text, long options, void *print_data)
{
char *child_text = crm_strdup_printf("%s ", pre_text);
const char *target_role = configured_role_str(rsc);
GList *gIter = rsc->children;
status_print("%sid);
status_print("multi_state=\"%s\" ",
pe__rsc_bool_str(rsc, pe_rsc_promotable));
status_print("unique=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_unique));
status_print("managed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_managed));
status_print("failed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_failed));
status_print("failure_ignored=\"%s\" ",
pe__rsc_bool_str(rsc, pe_rsc_failure_ignored));
if (target_role) {
status_print("target_role=\"%s\" ", target_role);
}
status_print(">\n");
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
child_rsc->fns->print(child_rsc, child_text, options, print_data);
}
status_print("%s\n", pre_text);
free(child_text);
}
bool is_set_recursive(pe_resource_t * rsc, long long flag, bool any)
{
GList *gIter;
bool all = !any;
if (pcmk_is_set(rsc->flags, flag)) {
if(any) {
return TRUE;
}
} else if(all) {
return FALSE;
}
for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
if(is_set_recursive(gIter->data, flag, any)) {
if(any) {
return TRUE;
}
} else if(all) {
return FALSE;
}
}
if(all) {
return TRUE;
}
return FALSE;
}
void
clone_print(pe_resource_t * rsc, const char *pre_text, long options, void *print_data)
{
char *list_text = NULL;
char *child_text = NULL;
char *stopped_list = NULL;
size_t list_text_len = 0;
size_t stopped_list_len = 0;
GList *promoted_list = NULL;
GList *started_list = NULL;
GList *gIter = rsc->children;
clone_variant_data_t *clone_data = NULL;
int active_instances = 0;
if (pre_text == NULL) {
pre_text = " ";
}
if (options & pe_print_xml) {
clone_print_xml(rsc, pre_text, options, print_data);
return;
}
get_clone_variant_data(clone_data, rsc);
child_text = crm_strdup_printf("%s ", pre_text);
status_print("%sClone Set: %s [%s]%s%s%s",
pre_text ? pre_text : "", rsc->id, ID(clone_data->xml_obj_child),
pcmk_is_set(rsc->flags, pe_rsc_promotable)? " (promotable)" : "",
pcmk_is_set(rsc->flags, pe_rsc_unique)? " (unique)" : "",
pcmk_is_set(rsc->flags, pe_rsc_managed)? "" : " (unmanaged)");
if (options & pe_print_html) {
status_print("\n\n");
} else if ((options & pe_print_log) == 0) {
status_print("\n");
}
for (; gIter != NULL; gIter = gIter->next) {
gboolean print_full = FALSE;
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
gboolean partially_active = child_rsc->fns->active(child_rsc, FALSE);
if (options & pe_print_clone_details) {
print_full = TRUE;
}
if (pcmk_is_set(rsc->flags, pe_rsc_unique)) {
// Print individual instance when unique (except stopped orphans)
if (partially_active || !pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
print_full = TRUE;
}
// Everything else in this block is for anonymous clones
} else if (pcmk_is_set(options, pe_print_pending)
&& (child_rsc->pending_task != NULL)
&& strcmp(child_rsc->pending_task, "probe")) {
// Print individual instance when non-probe action is pending
print_full = TRUE;
} else if (partially_active == FALSE) {
// List stopped instances when requested (except orphans)
if (!pcmk_is_set(child_rsc->flags, pe_rsc_orphan)
&& !pcmk_is_set(options, pe_print_clone_active)) {
pcmk__add_word(&stopped_list, &stopped_list_len, child_rsc->id);
}
} else if (is_set_recursive(child_rsc, pe_rsc_orphan, TRUE)
|| is_set_recursive(child_rsc, pe_rsc_managed, FALSE) == FALSE
|| is_set_recursive(child_rsc, pe_rsc_failed, TRUE)) {
// Print individual instance when active orphaned/unmanaged/failed
print_full = TRUE;
} else if (child_rsc->fns->active(child_rsc, TRUE)) {
// Instance of fully active anonymous clone
pe_node_t *location = child_rsc->fns->location(child_rsc, NULL, TRUE);
if (location) {
// Instance is active on a single node
enum rsc_role_e a_role = child_rsc->fns->state(child_rsc, TRUE);
if (location->details->online == FALSE && location->details->unclean) {
print_full = TRUE;
} else if (a_role > RSC_ROLE_UNPROMOTED) {
promoted_list = g_list_append(promoted_list, location);
} else {
started_list = g_list_append(started_list, location);
}
} else {
/* uncolocated group - bleh */
print_full = TRUE;
}
} else {
// Instance of partially active anonymous clone
print_full = TRUE;
}
if (print_full) {
if (options & pe_print_html) {
status_print("- \n");
}
child_rsc->fns->print(child_rsc, child_text, options, print_data);
if (options & pe_print_html) {
status_print("
\n");
}
}
}
/* Promoted */
promoted_list = g_list_sort(promoted_list, sort_node_uname);
for (gIter = promoted_list; gIter; gIter = gIter->next) {
pe_node_t *host = gIter->data;
pcmk__add_word(&list_text, &list_text_len, host->details->uname);
active_instances++;
}
short_print(list_text, child_text, PROMOTED_INSTANCES, NULL, options,
print_data);
g_list_free(promoted_list);
free(list_text);
list_text = NULL;
list_text_len = 0;
/* Started/Unpromoted */
started_list = g_list_sort(started_list, sort_node_uname);
for (gIter = started_list; gIter; gIter = gIter->next) {
pe_node_t *host = gIter->data;
pcmk__add_word(&list_text, &list_text_len, host->details->uname);
active_instances++;
}
if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
enum rsc_role_e role = configured_role(rsc);
if (role == RSC_ROLE_UNPROMOTED) {
short_print(list_text, child_text,
UNPROMOTED_INSTANCES " (target-role)", NULL, options,
print_data);
} else {
short_print(list_text, child_text, UNPROMOTED_INSTANCES, NULL,
options, print_data);
}
} else {
short_print(list_text, child_text, "Started", NULL, options, print_data);
}
g_list_free(started_list);
free(list_text);
list_text = NULL;
list_text_len = 0;
if (!pcmk_is_set(options, pe_print_clone_active)) {
const char *state = "Stopped";
enum rsc_role_e role = configured_role(rsc);
if (role == RSC_ROLE_STOPPED) {
state = "Stopped (disabled)";
}
if (!pcmk_is_set(rsc->flags, pe_rsc_unique)
&& (clone_data->clone_max > active_instances)) {
GList *nIter;
GList *list = g_hash_table_get_values(rsc->allowed_nodes);
/* Custom stopped list for non-unique clones */
free(stopped_list);
stopped_list = NULL;
stopped_list_len = 0;
if (list == NULL) {
/* Clusters with symmetrical=false haven't calculated allowed_nodes yet
* If we've not probed for them yet, the Stopped list will be empty
*/
list = g_hash_table_get_values(rsc->known_on);
}
list = g_list_sort(list, sort_node_uname);
for (nIter = list; nIter != NULL; nIter = nIter->next) {
pe_node_t *node = (pe_node_t *)nIter->data;
if (pe_find_node(rsc->running_on, node->details->uname) == NULL) {
pcmk__add_word(&stopped_list, &stopped_list_len,
node->details->uname);
}
}
g_list_free(list);
}
short_print(stopped_list, child_text, state, NULL, options, print_data);
free(stopped_list);
}
if (options & pe_print_html) {
status_print("
\n");
}
free(child_text);
}
PCMK__OUTPUT_ARGS("clone", "unsigned int", "pe_resource_t *", "GList *", "GList *")
int
pe__clone_xml(pcmk__output_t *out, va_list args)
{
unsigned int show_opts = va_arg(args, unsigned int);
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
GList *only_node = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
GList *gIter = rsc->children;
+ GList *all = NULL;
int rc = pcmk_rc_no_output;
gboolean printed_header = FALSE;
gboolean print_everything = TRUE;
if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
return rc;
}
print_everything = pcmk__str_in_list(only_rsc, rsc_printable_id(rsc), pcmk__str_none) ||
(strstr(rsc->id, ":") != NULL && pcmk__str_in_list(only_rsc, rsc->id, pcmk__str_none));
+ all = g_list_prepend(all, (gpointer) "*");
+
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
if (pcmk__rsc_filtered_by_node(child_rsc, only_node)) {
continue;
}
if (child_rsc->fns->is_filtered(child_rsc, only_rsc, print_everything)) {
continue;
}
if (!printed_header) {
printed_header = TRUE;
rc = pe__name_and_nvpairs_xml(out, true, "clone", 8,
"id", rsc->id,
"multi_state", pe__rsc_bool_str(rsc, pe_rsc_promotable),
"unique", pe__rsc_bool_str(rsc, pe_rsc_unique),
"managed", pe__rsc_bool_str(rsc, pe_rsc_managed),
"disabled", pcmk__btoa(pe__resource_is_disabled(rsc)),
"failed", pe__rsc_bool_str(rsc, pe_rsc_failed),
"failure_ignored", pe__rsc_bool_str(rsc, pe_rsc_failure_ignored),
"target_role", configured_role_str(rsc));
CRM_ASSERT(rc == pcmk_rc_ok);
}
out->message(out, crm_map_element_name(child_rsc->xml), show_opts,
- child_rsc, only_node, only_rsc);
+ child_rsc, only_node, all);
}
if (printed_header) {
pcmk__output_xml_pop_parent(out);
}
+ g_list_free(all);
return rc;
}
PCMK__OUTPUT_ARGS("clone", "unsigned int", "pe_resource_t *", "GList *", "GList *")
int
pe__clone_default(pcmk__output_t *out, va_list args)
{
unsigned int show_opts = va_arg(args, unsigned int);
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
GList *only_node = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
char *list_text = NULL;
char *stopped_list = NULL;
size_t list_text_len = 0;
size_t stopped_list_len = 0;
GList *promoted_list = NULL;
GList *started_list = NULL;
GList *gIter = rsc->children;
clone_variant_data_t *clone_data = NULL;
int active_instances = 0;
int rc = pcmk_rc_no_output;
gboolean print_everything = TRUE;
get_clone_variant_data(clone_data, rsc);
if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
return rc;
}
print_everything = pcmk__str_in_list(only_rsc, rsc_printable_id(rsc), pcmk__str_none) ||
(strstr(rsc->id, ":") != NULL && pcmk__str_in_list(only_rsc, rsc->id, pcmk__str_none));
- out->begin_list(out, NULL, NULL, "Clone Set: %s [%s]%s%s%s%s",
- rsc->id, ID(clone_data->xml_obj_child),
- pcmk_is_set(rsc->flags, pe_rsc_promotable) ? " (promotable)" : "",
- pcmk_is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
- pcmk_is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)",
- pe__resource_is_disabled(rsc) ? " (disabled)" : "");
- rc = pcmk_rc_ok;
-
for (; gIter != NULL; gIter = gIter->next) {
gboolean print_full = FALSE;
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
gboolean partially_active = child_rsc->fns->active(child_rsc, FALSE);
if (pcmk__rsc_filtered_by_node(child_rsc, only_node)) {
continue;
}
if (child_rsc->fns->is_filtered(child_rsc, only_rsc, print_everything)) {
continue;
}
if (pcmk_is_set(show_opts, pcmk_show_clone_detail)) {
print_full = TRUE;
}
if (pcmk_is_set(rsc->flags, pe_rsc_unique)) {
// Print individual instance when unique (except stopped orphans)
if (partially_active || !pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
print_full = TRUE;
}
// Everything else in this block is for anonymous clones
} else if (pcmk_is_set(show_opts, pcmk_show_pending)
&& (child_rsc->pending_task != NULL)
&& strcmp(child_rsc->pending_task, "probe")) {
// Print individual instance when non-probe action is pending
print_full = TRUE;
} else if (partially_active == FALSE) {
// List stopped instances when requested (except orphans)
if (!pcmk_is_set(child_rsc->flags, pe_rsc_orphan)
&& pcmk_is_set(show_opts, pcmk_show_inactive_rscs)) {
pcmk__add_word(&stopped_list, &stopped_list_len, child_rsc->id);
}
} else if (is_set_recursive(child_rsc, pe_rsc_orphan, TRUE)
|| is_set_recursive(child_rsc, pe_rsc_managed, FALSE) == FALSE
|| is_set_recursive(child_rsc, pe_rsc_failed, TRUE)) {
// Print individual instance when active orphaned/unmanaged/failed
print_full = TRUE;
} else if (child_rsc->fns->active(child_rsc, TRUE)) {
// Instance of fully active anonymous clone
pe_node_t *location = child_rsc->fns->location(child_rsc, NULL, TRUE);
if (location) {
// Instance is active on a single node
enum rsc_role_e a_role = child_rsc->fns->state(child_rsc, TRUE);
if (location->details->online == FALSE && location->details->unclean) {
print_full = TRUE;
} else if (a_role > RSC_ROLE_UNPROMOTED) {
promoted_list = g_list_append(promoted_list, location);
} else {
started_list = g_list_append(started_list, location);
}
} else {
/* uncolocated group - bleh */
print_full = TRUE;
}
} else {
// Instance of partially active anonymous clone
print_full = TRUE;
}
if (print_full) {
GList *all = NULL;
+ clone_header(out, &rc, rsc, clone_data);
+
/* Print every resource that's a child of this clone. */
all = g_list_prepend(all, (gpointer) "*");
out->message(out, crm_map_element_name(child_rsc->xml), show_opts,
child_rsc, only_node, all);
g_list_free(all);
}
}
if (pcmk_is_set(show_opts, pcmk_show_clone_detail)) {
free(stopped_list);
- out->end_list(out);
+ PCMK__OUTPUT_LIST_FOOTER(out, rc);
return pcmk_rc_ok;
}
/* Promoted */
promoted_list = g_list_sort(promoted_list, sort_node_uname);
for (gIter = promoted_list; gIter; gIter = gIter->next) {
pe_node_t *host = gIter->data;
if (!pcmk__str_in_list(only_node, host->details->uname, pcmk__str_casei)) {
continue;
}
pcmk__add_word(&list_text, &list_text_len, host->details->uname);
active_instances++;
}
if (list_text != NULL) {
+ clone_header(out, &rc, rsc, clone_data);
+
out->list_item(out, NULL, PROMOTED_INSTANCES ": [ %s ]", list_text);
g_list_free(promoted_list);
free(list_text);
list_text = NULL;
list_text_len = 0;
}
/* Started/Unpromoted */
started_list = g_list_sort(started_list, sort_node_uname);
for (gIter = started_list; gIter; gIter = gIter->next) {
pe_node_t *host = gIter->data;
if (!pcmk__str_in_list(only_node, host->details->uname, pcmk__str_casei)) {
continue;
}
pcmk__add_word(&list_text, &list_text_len, host->details->uname);
active_instances++;
}
if (list_text != NULL) {
+ clone_header(out, &rc, rsc, clone_data);
+
if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
enum rsc_role_e role = configured_role(rsc);
if (role == RSC_ROLE_UNPROMOTED) {
out->list_item(out, NULL,
UNPROMOTED_INSTANCES " (target-role): [ %s ]",
list_text);
} else {
out->list_item(out, NULL, UNPROMOTED_INSTANCES ": [ %s ]",
list_text);
}
} else {
out->list_item(out, NULL, "Started: [ %s ]", list_text);
}
g_list_free(started_list);
free(list_text);
list_text = NULL;
list_text_len = 0;
}
if (pcmk_is_set(show_opts, pcmk_show_inactive_rscs)) {
const char *state = "Stopped";
enum rsc_role_e role = configured_role(rsc);
if (role == RSC_ROLE_STOPPED) {
state = "Stopped (disabled)";
}
if (!pcmk_is_set(rsc->flags, pe_rsc_unique)
&& (clone_data->clone_max > active_instances)) {
GList *nIter;
GList *list = g_hash_table_get_values(rsc->allowed_nodes);
/* Custom stopped list for non-unique clones */
free(stopped_list);
stopped_list = NULL;
stopped_list_len = 0;
if (list == NULL) {
/* Clusters with symmetrical=false haven't calculated allowed_nodes yet
* If we've not probed for them yet, the Stopped list will be empty
*/
list = g_hash_table_get_values(rsc->known_on);
}
list = g_list_sort(list, sort_node_uname);
for (nIter = list; nIter != NULL; nIter = nIter->next) {
pe_node_t *node = (pe_node_t *)nIter->data;
if (pe_find_node(rsc->running_on, node->details->uname) == NULL &&
pcmk__str_in_list(only_node, node->details->uname, pcmk__str_casei)) {
pcmk__add_word(&stopped_list, &stopped_list_len,
node->details->uname);
}
}
g_list_free(list);
}
if (stopped_list != NULL) {
+ clone_header(out, &rc, rsc, clone_data);
+
out->list_item(out, NULL, "%s: [ %s ]", state, stopped_list);
free(stopped_list);
stopped_list_len = 0;
}
}
- out->end_list(out);
-
+ PCMK__OUTPUT_LIST_FOOTER(out, rc);
return rc;
}
void
clone_free(pe_resource_t * rsc)
{
clone_variant_data_t *clone_data = NULL;
get_clone_variant_data(clone_data, rsc);
pe_rsc_trace(rsc, "Freeing %s", rsc->id);
for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
CRM_ASSERT(child_rsc);
pe_rsc_trace(child_rsc, "Freeing child %s", child_rsc->id);
free_xml(child_rsc->xml);
child_rsc->xml = NULL;
/* There could be a saved unexpanded xml */
free_xml(child_rsc->orig_xml);
child_rsc->orig_xml = NULL;
child_rsc->fns->free(child_rsc);
}
g_list_free(rsc->children);
if (clone_data) {
CRM_ASSERT(clone_data->demote_notify == NULL);
CRM_ASSERT(clone_data->stop_notify == NULL);
CRM_ASSERT(clone_data->start_notify == NULL);
CRM_ASSERT(clone_data->promote_notify == NULL);
}
common_free(rsc);
}
enum rsc_role_e
clone_resource_state(const pe_resource_t * rsc, gboolean current)
{
enum rsc_role_e clone_role = RSC_ROLE_UNKNOWN;
GList *gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
enum rsc_role_e a_role = child_rsc->fns->state(child_rsc, current);
if (a_role > clone_role) {
clone_role = a_role;
}
}
pe_rsc_trace(rsc, "%s role: %s", rsc->id, role2text(clone_role));
return clone_role;
}
/*!
* \internal
* \brief Check whether a clone has an instance for every node
*
* \param[in] rsc Clone to check
* \param[in] data_set Cluster state
*/
bool
pe__is_universal_clone(pe_resource_t *rsc,
pe_working_set_t *data_set)
{
if (pe_rsc_is_clone(rsc)) {
clone_variant_data_t *clone_data = NULL;
get_clone_variant_data(clone_data, rsc);
if (clone_data->clone_max == g_list_length(data_set->nodes)) {
return TRUE;
}
}
return FALSE;
}
gboolean
pe__clone_is_filtered(pe_resource_t *rsc, GList *only_rsc, gboolean check_parent)
{
gboolean passes = FALSE;
clone_variant_data_t *clone_data = NULL;
if (pcmk__str_in_list(only_rsc, rsc_printable_id(rsc), pcmk__str_none)) {
passes = TRUE;
} else {
get_clone_variant_data(clone_data, rsc);
passes = pcmk__str_in_list(only_rsc, ID(clone_data->xml_obj_child), pcmk__str_none);
if (!passes) {
for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
if (!child_rsc->fns->is_filtered(child_rsc, only_rsc, FALSE)) {
passes = TRUE;
break;
}
}
}
}
return !passes;
}
diff --git a/lib/pengine/group.c b/lib/pengine/group.c
index 1ae33342dd..31e73d7eb2 100644
--- a/lib/pengine/group.c
+++ b/lib/pengine/group.c
@@ -1,353 +1,423 @@
/*
* Copyright 2004-2021 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
+#include
#include
#include
#define VARIANT_GROUP 1
#include "./variant.h"
+static int
+inactive_resources(pe_resource_t *rsc)
+{
+ int retval = 0;
+
+ for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
+ pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+
+ if (!child_rsc->fns->active(child_rsc, TRUE)) {
+ retval++;
+ }
+ }
+
+ return retval;
+}
+
+static void
+group_header(pcmk__output_t *out, int *rc, pe_resource_t *rsc, int n_inactive, bool show_inactive)
+{
+ char *attrs = NULL;
+ size_t len = 0;
+
+ if (n_inactive > 0 && !show_inactive) {
+ char *word = crm_strdup_printf("%d member%s inactive", n_inactive, pcmk__plural_s(n_inactive));
+ pcmk__add_separated_word(&attrs, &len, word, ", ");
+ free(word);
+ }
+
+ if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ pcmk__add_separated_word(&attrs, &len, "unmanaged", ", ");
+ }
+
+ if (pe__resource_is_disabled(rsc)) {
+ pcmk__add_separated_word(&attrs, &len, "disabled", ", ");
+ }
+
+ if (attrs) {
+ PCMK__OUTPUT_LIST_HEADER(out, FALSE, *rc, "Resource Group: %s (%s)",
+ rsc->id, attrs);
+ free(attrs);
+ } else {
+ PCMK__OUTPUT_LIST_HEADER(out, FALSE, *rc, "Resource Group: %s", rsc->id);
+ }
+}
+
+static bool
+skip_child_rsc(pe_resource_t *rsc, pe_resource_t *child, gboolean parent_passes,
+ GList *only_rsc, unsigned int show_opts)
+{
+ bool star_list = pcmk__list_of_1(only_rsc) &&
+ pcmk__str_eq("*", g_list_first(only_rsc)->data, pcmk__str_none);
+ bool child_filtered = child->fns->is_filtered(child, only_rsc, FALSE);
+ bool child_active = child->fns->active(child, FALSE);
+ bool show_inactive = pcmk_is_set(show_opts, pcmk_show_inactive_rscs);
+
+ /* If the resource is in only_rsc by name (so, ignoring "*") then allow
+ * it regardless of if it's active or not.
+ */
+ if (!star_list && !child_filtered) {
+ return false;
+
+ } else if (!child_filtered && (child_active || show_inactive)) {
+ return false;
+
+ } else if (parent_passes && (child_active || show_inactive)) {
+ return false;
+
+ }
+
+ return true;
+}
+
gboolean
group_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
{
xmlNode *xml_obj = rsc->xml;
xmlNode *xml_native_rsc = NULL;
group_variant_data_t *group_data = NULL;
const char *group_ordered = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_ORDERED);
const char *group_colocated = g_hash_table_lookup(rsc->meta, "collocated");
const char *clone_id = NULL;
pe_rsc_trace(rsc, "Processing resource %s...", rsc->id);
group_data = calloc(1, sizeof(group_variant_data_t));
group_data->num_children = 0;
group_data->first_child = NULL;
group_data->last_child = NULL;
rsc->variant_opaque = group_data;
// We don't actually need the null checks but it speeds up the common case
if ((group_ordered == NULL)
|| (crm_str_to_boolean(group_ordered, &(group_data->ordered)) < 0)) {
group_data->ordered = TRUE;
}
if ((group_colocated == NULL)
|| (crm_str_to_boolean(group_colocated, &(group_data->colocated)) < 0)) {
group_data->colocated = TRUE;
}
clone_id = crm_element_value(rsc->xml, XML_RSC_ATTR_INCARNATION);
for (xml_native_rsc = pcmk__xe_first_child(xml_obj); xml_native_rsc != NULL;
xml_native_rsc = pcmk__xe_next(xml_native_rsc)) {
if (pcmk__str_eq((const char *)xml_native_rsc->name,
XML_CIB_TAG_RESOURCE, pcmk__str_none)) {
pe_resource_t *new_rsc = NULL;
crm_xml_add(xml_native_rsc, XML_RSC_ATTR_INCARNATION, clone_id);
if (common_unpack(xml_native_rsc, &new_rsc, rsc, data_set) == FALSE) {
pe_err("Failed unpacking resource %s", crm_element_value(xml_obj, XML_ATTR_ID));
if (new_rsc != NULL && new_rsc->fns != NULL) {
new_rsc->fns->free(new_rsc);
}
continue;
}
group_data->num_children++;
rsc->children = g_list_append(rsc->children, new_rsc);
if (group_data->first_child == NULL) {
group_data->first_child = new_rsc;
}
group_data->last_child = new_rsc;
pe_rsc_trace(rsc, "Added %s member %s", rsc->id, new_rsc->id);
}
}
if (group_data->num_children == 0) {
pcmk__config_warn("Group %s does not have any children", rsc->id);
return TRUE; // Allow empty groups, children can be added later
}
pe_rsc_trace(rsc, "Added %d children to resource %s...", group_data->num_children, rsc->id);
return TRUE;
}
gboolean
group_active(pe_resource_t * rsc, gboolean all)
{
gboolean c_all = TRUE;
gboolean c_any = FALSE;
GList *gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
if (child_rsc->fns->active(child_rsc, all)) {
c_any = TRUE;
} else {
c_all = FALSE;
}
}
if (c_any == FALSE) {
return FALSE;
} else if (all && c_all == FALSE) {
return FALSE;
}
return TRUE;
}
static void
group_print_xml(pe_resource_t * rsc, const char *pre_text, long options, void *print_data)
{
GList *gIter = rsc->children;
char *child_text = crm_strdup_printf("%s ", pre_text);
status_print("%sid);
status_print("number_resources=\"%d\" ", g_list_length(rsc->children));
status_print(">\n");
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
child_rsc->fns->print(child_rsc, child_text, options, print_data);
}
status_print("%s\n", pre_text);
free(child_text);
}
void
group_print(pe_resource_t * rsc, const char *pre_text, long options, void *print_data)
{
char *child_text = NULL;
GList *gIter = rsc->children;
if (pre_text == NULL) {
pre_text = " ";
}
if (options & pe_print_xml) {
group_print_xml(rsc, pre_text, options, print_data);
return;
}
child_text = crm_strdup_printf("%s ", pre_text);
status_print("%sResource Group: %s", pre_text ? pre_text : "", rsc->id);
if (options & pe_print_html) {
status_print("\n\n");
} else if ((options & pe_print_log) == 0) {
status_print("\n");
}
if (options & pe_print_brief) {
print_rscs_brief(rsc->children, child_text, options, print_data, TRUE);
} else {
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
if (options & pe_print_html) {
status_print("- \n");
}
child_rsc->fns->print(child_rsc, child_text, options, print_data);
if (options & pe_print_html) {
status_print("
\n");
}
}
}
if (options & pe_print_html) {
status_print("
\n");
}
free(child_text);
}
PCMK__OUTPUT_ARGS("group", "unsigned int", "pe_resource_t *", "GList *", "GList *")
int
pe__group_xml(pcmk__output_t *out, va_list args)
{
unsigned int show_opts = va_arg(args, unsigned int);
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
GList *only_node = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
GList *gIter = rsc->children;
char *count = pcmk__itoa(g_list_length(gIter));
int rc = pcmk_rc_no_output;
- gboolean print_everything = TRUE;
+
+ gboolean parent_passes = pcmk__str_in_list(only_rsc, rsc_printable_id(rsc), pcmk__str_none) ||
+ (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(only_rsc, rsc->id, pcmk__str_none));
if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
free(count);
return rc;
}
- print_everything = pcmk__str_in_list(only_rsc, rsc_printable_id(rsc), pcmk__str_none) ||
- (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(only_rsc, rsc->id, pcmk__str_none));
-
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
- if (child_rsc->fns->is_filtered(child_rsc, only_rsc, print_everything)) {
+ if (skip_child_rsc(rsc, child_rsc, parent_passes, only_rsc, show_opts)) {
continue;
}
if (rc == pcmk_rc_no_output) {
rc = pe__name_and_nvpairs_xml(out, true, "group", 4
, "id", rsc->id
, "number_resources", count
, "managed", pe__rsc_bool_str(rsc, pe_rsc_managed)
, "disabled", pcmk__btoa(pe__resource_is_disabled(rsc)));
free(count);
CRM_ASSERT(rc == pcmk_rc_ok);
}
out->message(out, crm_map_element_name(child_rsc->xml), show_opts, child_rsc,
only_node, only_rsc);
}
if (rc == pcmk_rc_ok) {
pcmk__output_xml_pop_parent(out);
}
return rc;
}
PCMK__OUTPUT_ARGS("group", "unsigned int", "pe_resource_t *", "GList *", "GList *")
int
pe__group_default(pcmk__output_t *out, va_list args)
{
unsigned int show_opts = va_arg(args, unsigned int);
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
GList *only_node = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
int rc = pcmk_rc_no_output;
- gboolean print_everything = TRUE;
+
+ gboolean parent_passes = pcmk__str_in_list(only_rsc, rsc_printable_id(rsc), pcmk__str_none) ||
+ (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(only_rsc, rsc->id, pcmk__str_none));
+
+ gboolean active = rsc->fns->active(rsc, TRUE);
+ gboolean partially_active = rsc->fns->active(rsc, FALSE);
if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
return rc;
}
- print_everything = pcmk__str_in_list(only_rsc, rsc_printable_id(rsc), pcmk__str_none) ||
- (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(only_rsc, rsc->id, pcmk__str_none));
-
if (pcmk_is_set(show_opts, pcmk_show_brief)) {
GList *rscs = pe__filter_rsc_list(rsc->children, only_rsc);
if (rscs != NULL) {
- out->begin_list(out, NULL, NULL, "Resource Group: %s%s%s", rsc->id,
- pcmk_is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)",
- pe__resource_is_disabled(rsc) ? " (disabled)" : "");
-
+ group_header(out, &rc, rsc, !active && partially_active ? inactive_resources(rsc) : 0,
+ pcmk_is_set(show_opts, pcmk_show_inactive_rscs));
pe__rscs_brief_output(out, rscs, show_opts | pcmk_show_inactive_rscs);
rc = pcmk_rc_ok;
g_list_free(rscs);
}
} else {
for (GList *gIter = rsc->children; gIter; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
- if (child_rsc->fns->is_filtered(child_rsc, only_rsc, print_everything)) {
+ if (skip_child_rsc(rsc, child_rsc, parent_passes, only_rsc, show_opts)) {
continue;
}
- PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Resource Group: %s%s%s", rsc->id,
- pcmk_is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)",
- pe__resource_is_disabled(rsc) ? " (disabled)" : "");
-
+ group_header(out, &rc, rsc, !active && partially_active ? inactive_resources(rsc) : 0,
+ pcmk_is_set(show_opts, pcmk_show_inactive_rscs));
out->message(out, crm_map_element_name(child_rsc->xml), show_opts,
child_rsc, only_node, only_rsc);
}
}
PCMK__OUTPUT_LIST_FOOTER(out, rc);
return rc;
}
void
group_free(pe_resource_t * rsc)
{
CRM_CHECK(rsc != NULL, return);
pe_rsc_trace(rsc, "Freeing %s", rsc->id);
for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
CRM_ASSERT(child_rsc);
pe_rsc_trace(child_rsc, "Freeing child %s", child_rsc->id);
child_rsc->fns->free(child_rsc);
}
pe_rsc_trace(rsc, "Freeing child list");
g_list_free(rsc->children);
common_free(rsc);
}
enum rsc_role_e
group_resource_state(const pe_resource_t * rsc, gboolean current)
{
enum rsc_role_e group_role = RSC_ROLE_UNKNOWN;
GList *gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
enum rsc_role_e role = child_rsc->fns->state(child_rsc, current);
if (role > group_role) {
group_role = role;
}
}
pe_rsc_trace(rsc, "%s role: %s", rsc->id, role2text(group_role));
return group_role;
}
gboolean
pe__group_is_filtered(pe_resource_t *rsc, GList *only_rsc, gboolean check_parent)
{
gboolean passes = FALSE;
if (check_parent && pcmk__str_in_list(only_rsc, rsc_printable_id(uber_parent(rsc)), pcmk__str_none)) {
passes = TRUE;
} else if (pcmk__str_in_list(only_rsc, rsc_printable_id(rsc), pcmk__str_none)) {
passes = TRUE;
} else if (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(only_rsc, rsc->id, pcmk__str_none)) {
passes = TRUE;
} else {
for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
if (!child_rsc->fns->is_filtered(child_rsc, only_rsc, FALSE)) {
passes = TRUE;
break;
}
}
}
return !passes;
}
diff --git a/lib/pengine/native.c b/lib/pengine/native.c
index 56054fc4a7..786d1af91f 100644
--- a/lib/pengine/native.c
+++ b/lib/pengine/native.c
@@ -1,1355 +1,1355 @@
/*
* Copyright 2004-2021 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#define VARIANT_NATIVE 1
#include "./variant.h"
#ifdef PCMK__COMPAT_2_0
#define PROVIDER_SEP "::"
#else
#define PROVIDER_SEP ":"
#endif
/*!
* \internal
* \brief Check whether a resource is active on multiple nodes
*/
static bool
is_multiply_active(pe_resource_t *rsc)
{
unsigned int count = 0;
if (rsc->variant == pe_native) {
pe__find_active_requires(rsc, &count);
}
return count > 1;
}
static void
native_priority_to_node(pe_resource_t * rsc, pe_node_t * node, gboolean failed)
{
int priority = 0;
if ((rsc->priority == 0) || (failed == TRUE)) {
return;
}
if (rsc->role == RSC_ROLE_PROMOTED) {
// Promoted instance takes base priority + 1
priority = rsc->priority + 1;
} else {
priority = rsc->priority;
}
node->details->priority += priority;
pe_rsc_trace(rsc, "Node '%s' now has priority %d with %s'%s' (priority: %d%s)",
node->details->uname, node->details->priority,
(rsc->role == RSC_ROLE_PROMOTED)? "promoted " : "",
rsc->id, rsc->priority,
(rsc->role == RSC_ROLE_PROMOTED)? " + 1" : "");
/* Priority of a resource running on a guest node is added to the cluster
* node as well. */
if (node->details->remote_rsc
&& node->details->remote_rsc->container) {
GList *gIter = node->details->remote_rsc->container->running_on;
for (; gIter != NULL; gIter = gIter->next) {
pe_node_t *a_node = gIter->data;
a_node->details->priority += priority;
pe_rsc_trace(rsc, "Node '%s' now has priority %d with %s'%s' (priority: %d%s) "
"from guest node '%s'",
a_node->details->uname, a_node->details->priority,
(rsc->role == RSC_ROLE_PROMOTED)? "promoted " : "",
rsc->id, rsc->priority,
(rsc->role == RSC_ROLE_PROMOTED)? " + 1" : "",
node->details->uname);
}
}
}
void
native_add_running(pe_resource_t * rsc, pe_node_t * node, pe_working_set_t * data_set, gboolean failed)
{
GList *gIter = rsc->running_on;
CRM_CHECK(node != NULL, return);
for (; gIter != NULL; gIter = gIter->next) {
pe_node_t *a_node = (pe_node_t *) gIter->data;
CRM_CHECK(a_node != NULL, return);
if (pcmk__str_eq(a_node->details->id, node->details->id, pcmk__str_casei)) {
return;
}
}
pe_rsc_trace(rsc, "Adding %s to %s %s", rsc->id, node->details->uname,
pcmk_is_set(rsc->flags, pe_rsc_managed)? "" : "(unmanaged)");
rsc->running_on = g_list_append(rsc->running_on, node);
if (rsc->variant == pe_native) {
node->details->running_rsc = g_list_append(node->details->running_rsc, rsc);
native_priority_to_node(rsc, node, failed);
}
if (rsc->variant == pe_native && node->details->maintenance) {
pe__clear_resource_flags(rsc, pe_rsc_managed);
}
if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
pe_resource_t *p = rsc->parent;
pe_rsc_info(rsc, "resource %s isn't managed", rsc->id);
resource_location(rsc, node, INFINITY, "not_managed_default", data_set);
while(p && node->details->online) {
/* add without the additional location constraint */
p->running_on = g_list_append(p->running_on, node);
p = p->parent;
}
return;
}
if (is_multiply_active(rsc)) {
switch (rsc->recovery_type) {
case recovery_stop_only:
{
GHashTableIter gIter;
pe_node_t *local_node = NULL;
/* make sure it doesn't come up again */
if (rsc->allowed_nodes != NULL) {
g_hash_table_destroy(rsc->allowed_nodes);
}
rsc->allowed_nodes = pe__node_list2table(data_set->nodes);
g_hash_table_iter_init(&gIter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&gIter, NULL, (void **)&local_node)) {
local_node->weight = -INFINITY;
}
}
break;
case recovery_stop_start:
break;
case recovery_block:
pe__clear_resource_flags(rsc, pe_rsc_managed);
pe__set_resource_flags(rsc, pe_rsc_block);
/* If the resource belongs to a group or bundle configured with
* multiple-active=block, block the entire entity.
*/
if (rsc->parent
&& (rsc->parent->variant == pe_group || rsc->parent->variant == pe_container)
&& rsc->parent->recovery_type == recovery_block) {
GList *gIter = rsc->parent->children;
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child = (pe_resource_t *) gIter->data;
pe__clear_resource_flags(child, pe_rsc_managed);
pe__set_resource_flags(child, pe_rsc_block);
}
}
break;
}
crm_debug("%s is active on multiple nodes including %s: %s",
rsc->id, node->details->uname,
recovery2text(rsc->recovery_type));
} else {
pe_rsc_trace(rsc, "Resource %s is active on: %s", rsc->id, node->details->uname);
}
if (rsc->parent != NULL) {
native_add_running(rsc->parent, node, data_set, FALSE);
}
}
static void
recursive_clear_unique(pe_resource_t *rsc)
{
pe__clear_resource_flags(rsc, pe_rsc_unique);
add_hash_param(rsc->meta, XML_RSC_ATTR_UNIQUE, XML_BOOLEAN_FALSE);
for (GList *child = rsc->children; child != NULL; child = child->next) {
recursive_clear_unique((pe_resource_t *) child->data);
}
}
gboolean
native_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
{
pe_resource_t *parent = uber_parent(rsc);
native_variant_data_t *native_data = NULL;
const char *standard = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
uint32_t ra_caps = pcmk_get_ra_caps(standard);
pe_rsc_trace(rsc, "Processing resource %s...", rsc->id);
native_data = calloc(1, sizeof(native_variant_data_t));
rsc->variant_opaque = native_data;
// Only some agent standards support unique and promotable clones
if (!pcmk_is_set(ra_caps, pcmk_ra_cap_unique)
&& pcmk_is_set(rsc->flags, pe_rsc_unique) && pe_rsc_is_clone(parent)) {
/* @COMPAT We should probably reject this situation as an error (as we
* do for promotable below) rather than warn and convert, but that would
* be a backward-incompatible change that we should probably do with a
* transform at a schema major version bump.
*/
pe__force_anon(standard, parent, rsc->id, data_set);
/* Clear globally-unique on the parent and all its descendents unpacked
* so far (clearing the parent should make any future children unpacking
* correct). We have to clear this resource explicitly because it isn't
* hooked into the parent's children yet.
*/
recursive_clear_unique(parent);
recursive_clear_unique(rsc);
}
if (!pcmk_is_set(ra_caps, pcmk_ra_cap_promotable)
&& pcmk_is_set(parent->flags, pe_rsc_promotable)) {
pe_err("Resource %s is of type %s and therefore "
"cannot be used as a promotable clone resource",
rsc->id, standard);
return FALSE;
}
return TRUE;
}
static bool
rsc_is_on_node(pe_resource_t *rsc, const pe_node_t *node, int flags)
{
pe_rsc_trace(rsc, "Checking whether %s is on %s",
rsc->id, node->details->uname);
if (pcmk_is_set(flags, pe_find_current) && rsc->running_on) {
for (GList *iter = rsc->running_on; iter; iter = iter->next) {
pe_node_t *loc = (pe_node_t *) iter->data;
if (loc->details == node->details) {
return TRUE;
}
}
} else if (pcmk_is_set(flags, pe_find_inactive)
&& (rsc->running_on == NULL)) {
return TRUE;
} else if (!pcmk_is_set(flags, pe_find_current) && rsc->allocated_to
&& (rsc->allocated_to->details == node->details)) {
return TRUE;
}
return FALSE;
}
pe_resource_t *
native_find_rsc(pe_resource_t * rsc, const char *id, const pe_node_t *on_node,
int flags)
{
bool match = FALSE;
pe_resource_t *result = NULL;
CRM_CHECK(id && rsc && rsc->id, return NULL);
if (flags & pe_find_clone) {
const char *rid = ID(rsc->xml);
if (!pe_rsc_is_clone(uber_parent(rsc))) {
match = FALSE;
} else if (!strcmp(id, rsc->id) || pcmk__str_eq(id, rid, pcmk__str_casei)) {
match = TRUE;
}
} else if (!strcmp(id, rsc->id)) {
match = TRUE;
} else if (pcmk_is_set(flags, pe_find_renamed)
&& rsc->clone_name && strcmp(rsc->clone_name, id) == 0) {
match = TRUE;
} else if (pcmk_is_set(flags, pe_find_any)
|| (pcmk_is_set(flags, pe_find_anon)
&& !pcmk_is_set(rsc->flags, pe_rsc_unique))) {
match = pe_base_name_eq(rsc, id);
}
if (match && on_node) {
bool match_node = rsc_is_on_node(rsc, on_node, flags);
if (match_node == FALSE) {
match = FALSE;
}
}
if (match) {
return rsc;
}
for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child = (pe_resource_t *) gIter->data;
result = rsc->fns->find_rsc(child, id, on_node, flags);
if (result) {
return result;
}
}
return NULL;
}
// create is ignored
char *
native_parameter(pe_resource_t * rsc, pe_node_t * node, gboolean create, const char *name,
pe_working_set_t * data_set)
{
char *value_copy = NULL;
const char *value = NULL;
GHashTable *params = NULL;
CRM_CHECK(rsc != NULL, return NULL);
CRM_CHECK(name != NULL && strlen(name) != 0, return NULL);
pe_rsc_trace(rsc, "Looking up %s in %s", name, rsc->id);
params = pe_rsc_params(rsc, node, data_set);
value = g_hash_table_lookup(params, name);
if (value == NULL) {
/* try meta attributes instead */
value = g_hash_table_lookup(rsc->meta, name);
}
if (value != NULL) {
value_copy = strdup(value);
}
return value_copy;
}
gboolean
native_active(pe_resource_t * rsc, gboolean all)
{
for (GList *gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
pe_node_t *a_node = (pe_node_t *) gIter->data;
if (a_node->details->unclean) {
pe_rsc_trace(rsc, "Resource %s: node %s is unclean",
rsc->id, a_node->details->uname);
return TRUE;
} else if (a_node->details->online == FALSE) {
pe_rsc_trace(rsc, "Resource %s: node %s is offline",
rsc->id, a_node->details->uname);
} else {
pe_rsc_trace(rsc, "Resource %s active on %s",
rsc->id, a_node->details->uname);
return TRUE;
}
}
return FALSE;
}
struct print_data_s {
long options;
void *print_data;
};
static const char *
native_pending_state(pe_resource_t * rsc)
{
const char *pending_state = NULL;
if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_START, pcmk__str_casei)) {
pending_state = "Starting";
} else if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_STOP, pcmk__str_casei)) {
pending_state = "Stopping";
} else if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_MIGRATE, pcmk__str_casei)) {
pending_state = "Migrating";
} else if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_MIGRATED, pcmk__str_casei)) {
/* Work might be done in here. */
pending_state = "Migrating";
} else if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_PROMOTE, pcmk__str_casei)) {
pending_state = "Promoting";
} else if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_DEMOTE, pcmk__str_casei)) {
pending_state = "Demoting";
}
return pending_state;
}
static const char *
native_pending_task(pe_resource_t * rsc)
{
const char *pending_task = NULL;
if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_STATUS, pcmk__str_casei)) {
pending_task = "Monitoring";
/* Pending probes are not printed, even if pending
* operations are requested. If someone ever requests that
* behavior, uncomment this and the corresponding part of
* unpack.c:unpack_rsc_op().
*/
/*
} else if (pcmk__str_eq(rsc->pending_task, "probe", pcmk__str_casei)) {
pending_task = "Checking";
*/
}
return pending_task;
}
static enum rsc_role_e
native_displayable_role(pe_resource_t *rsc)
{
enum rsc_role_e role = rsc->role;
if ((role == RSC_ROLE_STARTED)
&& pcmk_is_set(uber_parent(rsc)->flags, pe_rsc_promotable)) {
role = RSC_ROLE_UNPROMOTED;
}
return role;
}
static const char *
native_displayable_state(pe_resource_t *rsc, bool print_pending)
{
const char *rsc_state = NULL;
if (print_pending) {
rsc_state = native_pending_state(rsc);
}
if (rsc_state == NULL) {
rsc_state = role2text(native_displayable_role(rsc));
}
return rsc_state;
}
static void
native_print_xml(pe_resource_t * rsc, const char *pre_text, long options, void *print_data)
{
const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
const char *prov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER);
const char *rsc_state = native_displayable_state(rsc, pcmk_is_set(options, pe_print_pending));
const char *target_role = NULL;
/* resource information. */
status_print("%sxml, XML_ATTR_TYPE));
status_print("role=\"%s\" ", rsc_state);
if (rsc->meta) {
target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
}
if (target_role) {
status_print("target_role=\"%s\" ", target_role);
}
status_print("active=\"%s\" ", pcmk__btoa(rsc->fns->active(rsc, TRUE)));
status_print("orphaned=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_orphan));
status_print("blocked=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_block));
status_print("managed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_managed));
status_print("failed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_failed));
status_print("failure_ignored=\"%s\" ",
pe__rsc_bool_str(rsc, pe_rsc_failure_ignored));
status_print("nodes_running_on=\"%d\" ", g_list_length(rsc->running_on));
if (options & pe_print_pending) {
const char *pending_task = native_pending_task(rsc);
if (pending_task) {
status_print("pending=\"%s\" ", pending_task);
}
}
/* print out the nodes this resource is running on */
if (options & pe_print_rsconly) {
status_print("/>\n");
/* do nothing */
} else if (rsc->running_on != NULL) {
GList *gIter = rsc->running_on;
status_print(">\n");
for (; gIter != NULL; gIter = gIter->next) {
pe_node_t *node = (pe_node_t *) gIter->data;
status_print("%s \n", pre_text,
node->details->uname, node->details->id,
pcmk__btoa(node->details->online == FALSE));
}
status_print("%s\n", pre_text);
} else {
status_print("/>\n");
}
}
// Append a flag to resource description string's flags list
static bool
add_output_flag(GString *s, const char *flag_desc, bool have_flags)
{
g_string_append(s, (have_flags? ", " : " ("));
g_string_append(s, flag_desc);
return true;
}
// Append a node name to resource description string's node list
static bool
add_output_node(GString *s, const char *node, bool have_nodes)
{
g_string_append(s, (have_nodes? " " : " [ "));
g_string_append(s, node);
return true;
}
/*!
* \internal
* \brief Create a string description of a resource
*
* \param[in] rsc Resource to describe
* \param[in] name Desired identifier for the resource
* \param[in] node If not NULL, node that resource is "on"
* \param[in] show_opts Bitmask of pcmk_show_opt_e.
* \param[in] target_role Resource's target role
* \param[in] show_nodes Whether to display nodes when multiply active
*
* \return Newly allocated string description of resource
* \note Caller must free the result with g_free().
*/
gchar *
pcmk__native_output_string(pe_resource_t *rsc, const char *name, pe_node_t *node,
unsigned long show_opts, const char *target_role, bool show_nodes)
{
const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
const char *provider = NULL;
const char *kind = crm_element_value(rsc->xml, XML_ATTR_TYPE);
gchar *retval = NULL;
GString *outstr = NULL;
bool have_flags = false;
if (rsc->variant != pe_native) {
return NULL;
}
CRM_CHECK(name != NULL, name = "unknown");
CRM_CHECK(kind != NULL, kind = "unknown");
CRM_CHECK(class != NULL, class = "unknown");
if (pcmk_is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_provider)) {
provider = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER);
}
if ((node == NULL) && (rsc->lock_node != NULL)) {
node = rsc->lock_node;
}
if (pcmk_is_set(show_opts, pcmk_show_rsc_only)
|| pcmk__list_of_multiple(rsc->running_on)) {
node = NULL;
}
// We need a string of at least this size
outstr = g_string_sized_new(strlen(name) + strlen(class) + strlen(kind)
+ (provider? (strlen(provider) + 2) : 0)
+ (node? strlen(node->details->uname) + 1 : 0)
+ 11);
// Resource name and agent
g_string_printf(outstr, "%s\t(%s%s%s:%s):\t", name, class,
((provider == NULL)? "" : PROVIDER_SEP),
((provider == NULL)? "" : provider), kind);
// State on node
if (pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
g_string_append(outstr, " ORPHANED");
}
if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
enum rsc_role_e role = native_displayable_role(rsc);
if (role > RSC_ROLE_UNPROMOTED) {
g_string_append_printf(outstr, " FAILED %s", role2text(role));
} else {
g_string_append(outstr, " FAILED");
}
} else {
g_string_append_printf(outstr, " %s", native_displayable_state(rsc, pcmk_is_set(show_opts, pcmk_show_pending)));
}
if (node) {
g_string_append_printf(outstr, " %s", node->details->uname);
}
// Flags, as: ( [...])
if (node && !(node->details->online) && node->details->unclean) {
have_flags = add_output_flag(outstr, "UNCLEAN", have_flags);
}
if (node && (node == rsc->lock_node)) {
have_flags = add_output_flag(outstr, "LOCKED", have_flags);
}
if (pcmk_is_set(show_opts, pcmk_show_pending)) {
const char *pending_task = native_pending_task(rsc);
if (pending_task) {
have_flags = add_output_flag(outstr, pending_task, have_flags);
}
}
if (target_role) {
enum rsc_role_e target_role_e = text2role(target_role);
/* Only show target role if it limits our abilities (i.e. ignore
* Started, as it is the default anyways, and doesn't prevent the
* resource from becoming promoted).
*/
if (target_role_e == RSC_ROLE_STOPPED) {
have_flags = add_output_flag(outstr, "disabled", have_flags);
} else if (pcmk_is_set(uber_parent(rsc)->flags, pe_rsc_promotable)
&& target_role_e == RSC_ROLE_UNPROMOTED) {
have_flags = add_output_flag(outstr, "target-role:", have_flags);
g_string_append(outstr, target_role);
}
}
if (pcmk_is_set(rsc->flags, pe_rsc_block)) {
have_flags = add_output_flag(outstr, "blocked", have_flags);
} else if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
have_flags = add_output_flag(outstr, "unmanaged", have_flags);
}
if (pcmk_is_set(rsc->flags, pe_rsc_failure_ignored)) {
have_flags = add_output_flag(outstr, "failure ignored", have_flags);
}
if (have_flags) {
g_string_append(outstr, ")");
}
// User-supplied description
if (pcmk_is_set(show_opts, pcmk_show_rsc_only)
|| pcmk__list_of_multiple(rsc->running_on)) {
const char *desc = crm_element_value(rsc->xml, XML_ATTR_DESC);
if (desc) {
g_string_append_printf(outstr, " %s", desc);
}
}
if (show_nodes && !pcmk_is_set(show_opts, pcmk_show_rsc_only)
&& pcmk__list_of_multiple(rsc->running_on)) {
bool have_nodes = false;
for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) {
pe_node_t *n = (pe_node_t *) iter->data;
have_nodes = add_output_node(outstr, n->details->uname, have_nodes);
}
if (have_nodes) {
g_string_append(outstr, " ]");
}
}
retval = outstr->str;
g_string_free(outstr, FALSE);
return retval;
}
int
pe__common_output_html(pcmk__output_t *out, pe_resource_t * rsc,
const char *name, pe_node_t *node, unsigned int show_opts)
{
const char *kind = crm_element_value(rsc->xml, XML_ATTR_TYPE);
const char *target_role = NULL;
xmlNodePtr list_node = NULL;
const char *cl = NULL;
CRM_ASSERT(rsc->variant == pe_native);
CRM_ASSERT(kind != NULL);
if (rsc->meta) {
const char *is_internal = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INTERNAL_RSC);
if (crm_is_true(is_internal)
&& !pcmk_is_set(show_opts, pcmk_show_implicit_rscs)) {
crm_trace("skipping print of internal resource %s", rsc->id);
return pcmk_rc_no_output;
}
target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
}
if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
cl = "rsc-managed";
} else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
cl = "rsc-failed";
} else if (rsc->variant == pe_native && (rsc->running_on == NULL)) {
cl = "rsc-failed";
} else if (pcmk__list_of_multiple(rsc->running_on)) {
cl = "rsc-multiple";
} else if (pcmk_is_set(rsc->flags, pe_rsc_failure_ignored)) {
cl = "rsc-failure-ignored";
} else {
cl = "rsc-ok";
}
{
gchar *s = pcmk__native_output_string(rsc, name, node, show_opts,
target_role, true);
list_node = pcmk__output_create_html_node(out, "li", NULL, NULL, NULL);
pcmk_create_html_node(list_node, "span", NULL, cl, s);
g_free(s);
}
return pcmk_rc_ok;
}
int
pe__common_output_text(pcmk__output_t *out, pe_resource_t * rsc,
const char *name, pe_node_t *node, unsigned int show_opts)
{
const char *target_role = NULL;
CRM_ASSERT(rsc->variant == pe_native);
if (rsc->meta) {
const char *is_internal = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INTERNAL_RSC);
if (crm_is_true(is_internal)
&& !pcmk_is_set(show_opts, pcmk_show_implicit_rscs)) {
crm_trace("skipping print of internal resource %s", rsc->id);
return pcmk_rc_no_output;
}
target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
}
{
gchar *s = pcmk__native_output_string(rsc, name, node, show_opts,
target_role, true);
out->list_item(out, NULL, "%s", s);
g_free(s);
}
return pcmk_rc_ok;
}
void
common_print(pe_resource_t * rsc, const char *pre_text, const char *name, pe_node_t *node, long options, void *print_data)
{
const char *target_role = NULL;
CRM_ASSERT(rsc->variant == pe_native);
if (rsc->meta) {
const char *is_internal = g_hash_table_lookup(rsc->meta,
XML_RSC_ATTR_INTERNAL_RSC);
if (crm_is_true(is_internal)
&& !pcmk_is_set(options, pe_print_implicit)) {
crm_trace("skipping print of internal resource %s", rsc->id);
return;
}
target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
}
if (options & pe_print_xml) {
native_print_xml(rsc, pre_text, options, print_data);
return;
}
if ((pre_text == NULL) && (options & pe_print_printf)) {
pre_text = " ";
}
if (options & pe_print_html) {
if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
status_print("");
} else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
status_print("");
} else if (rsc->running_on == NULL) {
status_print("");
} else if (pcmk__list_of_multiple(rsc->running_on)) {
status_print("");
} else if (pcmk_is_set(rsc->flags, pe_rsc_failure_ignored)) {
status_print("");
} else {
status_print("");
}
}
{
gchar *resource_s = pcmk__native_output_string(rsc, name, node, options,
target_role, false);
status_print("%s%s", (pre_text? pre_text : ""), resource_s);
g_free(resource_s);
}
if (pcmk_is_set(options, pe_print_html)) {
status_print(" ");
}
if (!pcmk_is_set(options, pe_print_rsconly)
&& pcmk__list_of_multiple(rsc->running_on)) {
GList *gIter = rsc->running_on;
int counter = 0;
if (options & pe_print_html) {
status_print("\n");
} else if ((options & pe_print_printf)
|| (options & pe_print_ncurses)) {
status_print("[");
}
for (; gIter != NULL; gIter = gIter->next) {
pe_node_t *n = (pe_node_t *) gIter->data;
counter++;
if (options & pe_print_html) {
status_print("- \n%s", n->details->uname);
} else if ((options & pe_print_printf)
|| (options & pe_print_ncurses)) {
status_print(" %s", n->details->uname);
} else if ((options & pe_print_log)) {
status_print("\t%d : %s", counter, n->details->uname);
} else {
status_print("%s", n->details->uname);
}
if (options & pe_print_html) {
status_print("
\n");
}
}
if (options & pe_print_html) {
status_print("
\n");
} else if ((options & pe_print_printf)
|| (options & pe_print_ncurses)) {
status_print(" ]");
}
}
if (options & pe_print_html) {
status_print("
\n");
} else if (options & pe_print_suppres_nl) {
/* nothing */
} else if ((options & pe_print_printf) || (options & pe_print_ncurses)) {
status_print("\n");
}
}
void
native_print(pe_resource_t * rsc, const char *pre_text, long options, void *print_data)
{
pe_node_t *node = NULL;
CRM_ASSERT(rsc->variant == pe_native);
if (options & pe_print_xml) {
native_print_xml(rsc, pre_text, options, print_data);
return;
}
node = pe__current_node(rsc);
if (node == NULL) {
// This is set only if a non-probe action is pending on this node
node = rsc->pending_node;
}
common_print(rsc, pre_text, rsc_printable_id(rsc), node, options, print_data);
}
PCMK__OUTPUT_ARGS("primitive", "unsigned int", "pe_resource_t *", "GList *", "GList *")
int
pe__resource_xml(pcmk__output_t *out, va_list args)
{
unsigned int show_opts = va_arg(args, unsigned int);
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
GList *only_node G_GNUC_UNUSED = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
bool print_pending = pcmk_is_set(show_opts, pcmk_show_pending);
const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
const char *prov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER);
const char *rsc_state = native_displayable_state(rsc, print_pending);
char ra_name[LINE_MAX];
char *nodes_running_on = NULL;
char *priority = NULL;
int rc = pcmk_rc_no_output;
const char *target_role = NULL;
if (rsc->meta != NULL) {
target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
}
CRM_ASSERT(rsc->variant == pe_native);
if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
return pcmk_rc_no_output;
}
/* resource information. */
snprintf(ra_name, LINE_MAX, "%s%s%s:%s", class,
((prov == NULL)? "" : PROVIDER_SEP), ((prov == NULL)? "" : prov),
crm_element_value(rsc->xml, XML_ATTR_TYPE));
nodes_running_on = pcmk__itoa(g_list_length(rsc->running_on));
priority = pcmk__ftoa(rsc->priority);
rc = pe__name_and_nvpairs_xml(out, true, "resource", 12,
"id", rsc_printable_id(rsc),
"resource_agent", ra_name,
"role", rsc_state,
"target_role", target_role,
"active", pcmk__btoa(rsc->fns->active(rsc, TRUE)),
"orphaned", pe__rsc_bool_str(rsc, pe_rsc_orphan),
"blocked", pe__rsc_bool_str(rsc, pe_rsc_block),
"managed", pe__rsc_bool_str(rsc, pe_rsc_managed),
"failed", pe__rsc_bool_str(rsc, pe_rsc_failed),
"failure_ignored", pe__rsc_bool_str(rsc, pe_rsc_failure_ignored),
"nodes_running_on", nodes_running_on,
"pending", (print_pending? native_pending_task(rsc) : NULL));
free(priority);
free(nodes_running_on);
CRM_ASSERT(rc == pcmk_rc_ok);
if (rsc->running_on != NULL) {
GList *gIter = rsc->running_on;
for (; gIter != NULL; gIter = gIter->next) {
pe_node_t *node = (pe_node_t *) gIter->data;
rc = pe__name_and_nvpairs_xml(out, false, "node", 3,
"name", node->details->uname,
"id", node->details->id,
"cached", pcmk__btoa(node->details->online));
CRM_ASSERT(rc == pcmk_rc_ok);
}
}
pcmk__output_xml_pop_parent(out);
return rc;
}
PCMK__OUTPUT_ARGS("primitive", "unsigned int", "pe_resource_t *", "GList *", "GList *")
int
pe__resource_html(pcmk__output_t *out, va_list args)
{
unsigned int show_opts = va_arg(args, unsigned int);
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
GList *only_node G_GNUC_UNUSED = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
pe_node_t *node = pe__current_node(rsc);
if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
return pcmk_rc_no_output;
}
CRM_ASSERT(rsc->variant == pe_native);
if (node == NULL) {
// This is set only if a non-probe action is pending on this node
node = rsc->pending_node;
}
return pe__common_output_html(out, rsc, rsc_printable_id(rsc), node, show_opts);
}
PCMK__OUTPUT_ARGS("primitive", "unsigned int", "pe_resource_t *", "GList *", "GList *")
int
pe__resource_text(pcmk__output_t *out, va_list args)
{
unsigned int show_opts = va_arg(args, unsigned int);
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
GList *only_node G_GNUC_UNUSED = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
pe_node_t *node = pe__current_node(rsc);
CRM_ASSERT(rsc->variant == pe_native);
if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
return pcmk_rc_no_output;
}
if (node == NULL) {
// This is set only if a non-probe action is pending on this node
node = rsc->pending_node;
}
return pe__common_output_text(out, rsc, rsc_printable_id(rsc), node, show_opts);
}
void
native_free(pe_resource_t * rsc)
{
pe_rsc_trace(rsc, "Freeing resource action list (not the data)");
common_free(rsc);
}
enum rsc_role_e
native_resource_state(const pe_resource_t * rsc, gboolean current)
{
enum rsc_role_e role = rsc->next_role;
if (current) {
role = rsc->role;
}
pe_rsc_trace(rsc, "%s state: %s", rsc->id, role2text(role));
return role;
}
/*!
* \internal
* \brief List nodes where a resource (or any of its children) is
*
* \param[in] rsc Resource to check
* \param[out] list List to add result to
* \param[in] current 0 = where allocated, 1 = where running,
* 2 = where running or pending
*
* \return If list contains only one node, that node, or NULL otherwise
*/
pe_node_t *
native_location(const pe_resource_t *rsc, GList **list, int current)
{
pe_node_t *one = NULL;
GList *result = NULL;
if (rsc->children) {
GList *gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child = (pe_resource_t *) gIter->data;
child->fns->location(child, &result, current);
}
} else if (current) {
if (rsc->running_on) {
result = g_list_copy(rsc->running_on);
}
if ((current == 2) && rsc->pending_node
&& !pe_find_node_id(result, rsc->pending_node->details->id)) {
result = g_list_append(result, rsc->pending_node);
}
} else if (current == FALSE && rsc->allocated_to) {
result = g_list_append(NULL, rsc->allocated_to);
}
if (result && (result->next == NULL)) {
one = result->data;
}
if (list) {
GList *gIter = result;
for (; gIter != NULL; gIter = gIter->next) {
pe_node_t *node = (pe_node_t *) gIter->data;
if (*list == NULL || pe_find_node_id(*list, node->details->id) == NULL) {
*list = g_list_append(*list, node);
}
}
}
g_list_free(result);
return one;
}
static void
get_rscs_brief(GList *rsc_list, GHashTable * rsc_table, GHashTable * active_table)
{
GList *gIter = rsc_list;
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *rsc = (pe_resource_t *) gIter->data;
const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
const char *kind = crm_element_value(rsc->xml, XML_ATTR_TYPE);
int offset = 0;
char buffer[LINE_MAX];
int *rsc_counter = NULL;
int *active_counter = NULL;
if (rsc->variant != pe_native) {
continue;
}
offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", class);
if (pcmk_is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_provider)) {
const char *prov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER);
if (prov != NULL) {
offset += snprintf(buffer + offset, LINE_MAX - offset,
PROVIDER_SEP "%s", prov);
}
}
offset += snprintf(buffer + offset, LINE_MAX - offset, ":%s", kind);
CRM_LOG_ASSERT(offset > 0);
if (rsc_table) {
rsc_counter = g_hash_table_lookup(rsc_table, buffer);
if (rsc_counter == NULL) {
rsc_counter = calloc(1, sizeof(int));
*rsc_counter = 0;
g_hash_table_insert(rsc_table, strdup(buffer), rsc_counter);
}
(*rsc_counter)++;
}
if (active_table) {
GList *gIter2 = rsc->running_on;
for (; gIter2 != NULL; gIter2 = gIter2->next) {
pe_node_t *node = (pe_node_t *) gIter2->data;
GHashTable *node_table = NULL;
if (node->details->unclean == FALSE && node->details->online == FALSE) {
continue;
}
node_table = g_hash_table_lookup(active_table, node->details->uname);
if (node_table == NULL) {
node_table = pcmk__strkey_table(free, free);
g_hash_table_insert(active_table, strdup(node->details->uname), node_table);
}
active_counter = g_hash_table_lookup(node_table, buffer);
if (active_counter == NULL) {
active_counter = calloc(1, sizeof(int));
*active_counter = 0;
g_hash_table_insert(node_table, strdup(buffer), active_counter);
}
(*active_counter)++;
}
}
}
}
static void
destroy_node_table(gpointer data)
{
GHashTable *node_table = data;
if (node_table) {
g_hash_table_destroy(node_table);
}
}
void
print_rscs_brief(GList *rsc_list, const char *pre_text, long options,
void *print_data, gboolean print_all)
{
GHashTable *rsc_table = pcmk__strkey_table(free, free);
GHashTable *active_table = pcmk__strkey_table(free, destroy_node_table);
GHashTableIter hash_iter;
char *type = NULL;
int *rsc_counter = NULL;
get_rscs_brief(rsc_list, rsc_table, active_table);
g_hash_table_iter_init(&hash_iter, rsc_table);
while (g_hash_table_iter_next(&hash_iter, (gpointer *)&type, (gpointer *)&rsc_counter)) {
GHashTableIter hash_iter2;
char *node_name = NULL;
GHashTable *node_table = NULL;
int active_counter_all = 0;
g_hash_table_iter_init(&hash_iter2, active_table);
while (g_hash_table_iter_next(&hash_iter2, (gpointer *)&node_name, (gpointer *)&node_table)) {
int *active_counter = g_hash_table_lookup(node_table, type);
if (active_counter == NULL || *active_counter == 0) {
continue;
} else {
active_counter_all += *active_counter;
}
if (options & pe_print_rsconly) {
node_name = NULL;
}
if (options & pe_print_html) {
status_print("\n");
}
if (print_all) {
status_print("%s%d/%d\t(%s):\tActive %s\n", pre_text ? pre_text : "",
active_counter ? *active_counter : 0,
rsc_counter ? *rsc_counter : 0, type,
active_counter && (*active_counter > 0) && node_name ? node_name : "");
} else {
status_print("%s%d\t(%s):\tActive %s\n", pre_text ? pre_text : "",
active_counter ? *active_counter : 0, type,
active_counter && (*active_counter > 0) && node_name ? node_name : "");
}
if (options & pe_print_html) {
status_print("\n");
}
}
if (print_all && active_counter_all == 0) {
if (options & pe_print_html) {
status_print("\n");
}
status_print("%s%d/%d\t(%s):\tActive\n", pre_text ? pre_text : "",
active_counter_all,
rsc_counter ? *rsc_counter : 0, type);
if (options & pe_print_html) {
status_print("\n");
}
}
}
if (rsc_table) {
g_hash_table_destroy(rsc_table);
rsc_table = NULL;
}
if (active_table) {
g_hash_table_destroy(active_table);
active_table = NULL;
}
}
int
pe__rscs_brief_output(pcmk__output_t *out, GList *rsc_list, unsigned int show_opts)
{
GHashTable *rsc_table = pcmk__strkey_table(free, free);
GHashTable *active_table = pcmk__strkey_table(free, destroy_node_table);
GList *sorted_rscs;
int rc = pcmk_rc_no_output;
get_rscs_brief(rsc_list, rsc_table, active_table);
/* Make a list of the rsc_table keys so that it can be sorted. This is to make sure
* output order stays consistent between systems.
*/
sorted_rscs = g_hash_table_get_keys(rsc_table);
sorted_rscs = g_list_sort(sorted_rscs, (GCompareFunc) strcmp);
for (GList *gIter = sorted_rscs; gIter; gIter = gIter->next) {
char *type = (char *) gIter->data;
int *rsc_counter = g_hash_table_lookup(rsc_table, type);
GHashTableIter hash_iter2;
char *node_name = NULL;
GHashTable *node_table = NULL;
int active_counter_all = 0;
g_hash_table_iter_init(&hash_iter2, active_table);
while (g_hash_table_iter_next(&hash_iter2, (gpointer *)&node_name, (gpointer *)&node_table)) {
int *active_counter = g_hash_table_lookup(node_table, type);
if (active_counter == NULL || *active_counter == 0) {
continue;
} else {
active_counter_all += *active_counter;
}
if (pcmk_is_set(show_opts, pcmk_show_rsc_only)) {
node_name = NULL;
}
if (pcmk_is_set(show_opts, pcmk_show_inactive_rscs)) {
out->list_item(out, NULL, "%d/%d\t(%s):\tActive %s",
*active_counter,
rsc_counter ? *rsc_counter : 0, type,
(*active_counter > 0) && node_name ? node_name : "");
} else {
out->list_item(out, NULL, "%d\t(%s):\tActive %s",
*active_counter, type,
(*active_counter > 0) && node_name ? node_name : "");
}
rc = pcmk_rc_ok;
}
if (pcmk_is_set(show_opts, pcmk_show_inactive_rscs) && active_counter_all == 0) {
out->list_item(out, NULL, "%d/%d\t(%s):\tActive",
active_counter_all,
rsc_counter ? *rsc_counter : 0, type);
rc = pcmk_rc_ok;
}
}
if (rsc_table) {
g_hash_table_destroy(rsc_table);
rsc_table = NULL;
}
if (active_table) {
g_hash_table_destroy(active_table);
active_table = NULL;
}
if (sorted_rscs) {
g_list_free(sorted_rscs);
}
return rc;
}
gboolean
pe__native_is_filtered(pe_resource_t *rsc, GList *only_rsc, gboolean check_parent)
{
if (pcmk__str_in_list(only_rsc, rsc_printable_id(rsc), pcmk__str_none) ||
pcmk__str_in_list(only_rsc, rsc->id, pcmk__str_none)) {
return FALSE;
- } else if (check_parent) {
+ } else if (check_parent && rsc->parent) {
pe_resource_t *up = uber_parent(rsc);
if (pe_rsc_is_bundled(rsc)) {
return up->parent->fns->is_filtered(up->parent, only_rsc, FALSE);
} else {
return up->fns->is_filtered(up, only_rsc, FALSE);
}
}
return TRUE;
}