diff --git a/cts/cli/regression.crm_mon.exp b/cts/cli/regression.crm_mon.exp index 27a52b7196..070a3205a3 100644 --- a/cts/cli/regression.crm_mon.exp +++ b/cts/cli/regression.crm_mon.exp @@ -1,3966 +1,3966 @@ =#=#=#= Begin test: Basic text output =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ] Active Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster01 cluster02 ] * Fencing (stonith:fence_xvm): Started cluster01 * dummy (ocf:pacemaker:Dummy): Started cluster02 * Container bundle set: httpd-bundle [pcmk:http]: * httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01 * httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02 * httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped * Resource Group: exim-group: * Public-IP (ocf:heartbeat:IPaddr): Started cluster02 * Email (lsb:exim): Started cluster02 * Clone Set: mysql-clone-group [mysql-group]: * Started: [ cluster01 cluster02 ] * Clone Set: promotable-clone [promotable-rsc] (promotable): * Promoted: [ cluster02 ] * Unpromoted: [ cluster01 ] =#=#=#= End test: Basic text output - OK (0) =#=#=#= * Passed: crm_mon - Basic text output =#=#=#= Begin test: XML output =#=#=#= =#=#=#= End test: XML output - OK (0) =#=#=#= * Passed: crm_mon - XML output =#=#=#= Begin test: Basic text output without node section =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Active Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster01 cluster02 ] * Fencing (stonith:fence_xvm): Started cluster01 * dummy (ocf:pacemaker:Dummy): Started cluster02 * Container bundle set: httpd-bundle [pcmk:http]: * httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01 * httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02 * httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped * Resource Group: exim-group: * Public-IP (ocf:heartbeat:IPaddr): Started cluster02 * Email (lsb:exim): Started cluster02 * Clone Set: mysql-clone-group [mysql-group]: * Started: [ cluster01 cluster02 ] * Clone Set: promotable-clone [promotable-rsc] (promotable): * Promoted: [ cluster02 ] * Unpromoted: [ cluster01 ] =#=#=#= End test: Basic text output without node section - OK (0) =#=#=#= * Passed: crm_mon - Basic text output without node section =#=#=#= Begin test: XML output without the node section =#=#=#= =#=#=#= End test: XML output without the node section - OK (0) =#=#=#= * Passed: crm_mon - XML output without the node section =#=#=#= Begin test: Text output with only the node section =#=#=#= Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ] =#=#=#= End test: Text output with only the node section - OK (0) =#=#=#= * Passed: crm_mon - Text output with only the node section =#=#=#= Begin test: Complete text output =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ] Active Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster01 cluster02 ] * Fencing (stonith:fence_xvm): Started cluster01 * dummy (ocf:pacemaker:Dummy): Started cluster02 * Container bundle set: httpd-bundle [pcmk:http]: * httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01 * httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02 * httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped * Resource Group: exim-group: * Public-IP (ocf:heartbeat:IPaddr): Started cluster02 * Email (lsb:exim): Started cluster02 * Clone Set: mysql-clone-group [mysql-group]: * Started: [ cluster01 cluster02 ] * Clone Set: promotable-clone [promotable-rsc] (promotable): * Promoted: [ cluster02 ] * Unpromoted: [ cluster01 ] Node Attributes: * Node: cluster01: * location : office * pingd : 1000 * Node: cluster02: * pingd : 1000 Operations: * Node: cluster02: * ping: migration-threshold=1000000: * (11) start * (12) monitor: interval="10000ms" * dummy: migration-threshold=1000000: * (18) start * (19) monitor: interval="60000ms" * Public-IP: migration-threshold=1000000: * (2) start * Email: migration-threshold=1000000: * (2) start * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * promotable-rsc: migration-threshold=1000000: * (4) monitor: interval="10000ms" * (5) cancel: interval="10000ms" * (6) promote * (7) monitor: interval="5000ms" * httpd-bundle-ip-192.168.122.132: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-docker-1: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-1: migration-threshold=1000000: * (2) start * (3) monitor: interval="30000ms" * Node: cluster01: * ping: migration-threshold=1000000: * (17) start * (18) monitor: interval="10000ms" * Fencing: migration-threshold=1000000: * (15) start * (20) monitor: interval="60000ms" * dummy: migration-threshold=1000000: * (16) stop * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * promotable-rsc: migration-threshold=1000000: * (2) start * (4) monitor: interval="10000ms" * httpd-bundle-ip-192.168.122.131: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-docker-0: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-0: migration-threshold=1000000: * (2) start * (3) monitor: interval="30000ms" * Node: httpd-bundle-0@cluster01: * httpd: migration-threshold=1000000: * (1) start * Node: httpd-bundle-1@cluster02: * httpd: migration-threshold=1000000: * (1) start Negative Location Constraints: * not-on-cluster1 prevents dummy from running on cluster01 =#=#=#= End test: Complete text output - OK (0) =#=#=#= * Passed: crm_mon - Complete text output =#=#=#= Begin test: Complete text output with detail =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 (1) cluster02 (2) ] * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ] Active Resources: * Clone Set: ping-clone [ping]: * ping (ocf:pacemaker:ping): Started cluster02 * ping (ocf:pacemaker:ping): Started cluster01 * Fencing (stonith:fence_xvm): Started cluster01 * dummy (ocf:pacemaker:Dummy): Started cluster02 * Container bundle set: httpd-bundle [pcmk:http]: * Replica[0] * httpd-bundle-ip-192.168.122.131 (ocf:heartbeat:IPaddr2): Started cluster01 * httpd (ocf:heartbeat:apache): Started httpd-bundle-0 * httpd-bundle-docker-0 (ocf:heartbeat:docker): Started cluster01 * httpd-bundle-0 (ocf:pacemaker:remote): Started cluster01 * Replica[1] * httpd-bundle-ip-192.168.122.132 (ocf:heartbeat:IPaddr2): Started cluster02 * httpd (ocf:heartbeat:apache): Started httpd-bundle-1 * httpd-bundle-docker-1 (ocf:heartbeat:docker): Started cluster02 * httpd-bundle-1 (ocf:pacemaker:remote): Started cluster02 * Replica[2] * httpd-bundle-ip-192.168.122.133 (ocf:heartbeat:IPaddr2): Stopped * httpd (ocf:heartbeat:apache): Stopped * httpd-bundle-docker-2 (ocf:heartbeat:docker): Stopped * httpd-bundle-2 (ocf:pacemaker:remote): Stopped * Resource Group: exim-group: * Public-IP (ocf:heartbeat:IPaddr): Started cluster02 * Email (lsb:exim): Started cluster02 * Clone Set: mysql-clone-group [mysql-group]: * Resource Group: mysql-group:0: * mysql-proxy (lsb:mysql-proxy): Started cluster02 * Resource Group: mysql-group:1: * mysql-proxy (lsb:mysql-proxy): Started cluster01 * Clone Set: promotable-clone [promotable-rsc] (promotable): * promotable-rsc (ocf:pacemaker:Stateful): Promoted cluster02 * promotable-rsc (ocf:pacemaker:Stateful): Unpromoted cluster01 * promotable-rsc (ocf:pacemaker:Stateful): Stopped * promotable-rsc (ocf:pacemaker:Stateful): Stopped * promotable-rsc (ocf:pacemaker:Stateful): Stopped Node Attributes: * Node: cluster01 (1): * location : office * pingd : 1000 * Node: cluster02 (2): * pingd : 1000 Operations: * Node: cluster02 (2): * ping: migration-threshold=1000000: * (11) start * (12) monitor: interval="10000ms" * dummy: migration-threshold=1000000: * (18) start * (19) monitor: interval="60000ms" * Public-IP: migration-threshold=1000000: * (2) start * Email: migration-threshold=1000000: * (2) start * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * promotable-rsc: migration-threshold=1000000: * (4) monitor: interval="10000ms" * (5) cancel: interval="10000ms" * (6) promote * (7) monitor: interval="5000ms" * httpd-bundle-ip-192.168.122.132: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-docker-1: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-1: migration-threshold=1000000: * (2) start * (3) monitor: interval="30000ms" * Node: cluster01 (1): * ping: migration-threshold=1000000: * (17) start * (18) monitor: interval="10000ms" * Fencing: migration-threshold=1000000: * (15) start * (20) monitor: interval="60000ms" * dummy: migration-threshold=1000000: * (16) stop * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * promotable-rsc: migration-threshold=1000000: * (2) start * (4) monitor: interval="10000ms" * httpd-bundle-ip-192.168.122.131: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-docker-0: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-0: migration-threshold=1000000: * (2) start * (3) monitor: interval="30000ms" * Node: httpd-bundle-0@cluster01: * httpd: migration-threshold=1000000: * (1) start * Node: httpd-bundle-1@cluster02: * httpd: migration-threshold=1000000: * (1) start Negative Location Constraints: * not-on-cluster1 prevents dummy from running on cluster01 (1) =#=#=#= End test: Complete text output with detail - OK (0) =#=#=#= * Passed: crm_mon - Complete text output with detail =#=#=#= Begin test: Complete brief text output =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ] Active Resources: * 1 (ocf:pacemaker:Dummy): Active cluster02 * 1 (stonith:fence_xvm): Active cluster01 * Clone Set: ping-clone [ping]: * Started: [ cluster01 cluster02 ] * Container bundle set: httpd-bundle [pcmk:http]: * httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01 * httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02 * httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped * Resource Group: exim-group: * 1/1 (lsb:exim): Active cluster02 * 1/1 (ocf:heartbeat:IPaddr): Active cluster02 * Clone Set: mysql-clone-group [mysql-group]: * Started: [ cluster01 cluster02 ] * Clone Set: promotable-clone [promotable-rsc] (promotable): * Promoted: [ cluster02 ] * Unpromoted: [ cluster01 ] Node Attributes: * Node: cluster01: * location : office * pingd : 1000 * Node: cluster02: * pingd : 1000 Operations: * Node: cluster02: * ping: migration-threshold=1000000: * (11) start * (12) monitor: interval="10000ms" * dummy: migration-threshold=1000000: * (18) start * (19) monitor: interval="60000ms" * Public-IP: migration-threshold=1000000: * (2) start * Email: migration-threshold=1000000: * (2) start * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * promotable-rsc: migration-threshold=1000000: * (4) monitor: interval="10000ms" * (5) cancel: interval="10000ms" * (6) promote * (7) monitor: interval="5000ms" * httpd-bundle-ip-192.168.122.132: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-docker-1: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-1: migration-threshold=1000000: * (2) start * (3) monitor: interval="30000ms" * Node: cluster01: * ping: migration-threshold=1000000: * (17) start * (18) monitor: interval="10000ms" * Fencing: migration-threshold=1000000: * (15) start * (20) monitor: interval="60000ms" * dummy: migration-threshold=1000000: * (16) stop * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * promotable-rsc: migration-threshold=1000000: * (2) start * (4) monitor: interval="10000ms" * httpd-bundle-ip-192.168.122.131: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-docker-0: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-0: migration-threshold=1000000: * (2) start * (3) monitor: interval="30000ms" * Node: httpd-bundle-0@cluster01: * httpd: migration-threshold=1000000: * (1) start * Node: httpd-bundle-1@cluster02: * httpd: migration-threshold=1000000: * (1) start Negative Location Constraints: * not-on-cluster1 prevents dummy from running on cluster01 =#=#=#= End test: Complete brief text output - OK (0) =#=#=#= * Passed: crm_mon - Complete brief text output =#=#=#= Begin test: Complete text output grouped by node =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Node cluster01: online: * Resources: * ping (ocf:pacemaker:ping): Started * Fencing (stonith:fence_xvm): Started * mysql-proxy (lsb:mysql-proxy): Started * promotable-rsc (ocf:pacemaker:Stateful): Unpromoted * httpd-bundle-ip-192.168.122.131 (ocf:heartbeat:IPaddr2): Started * httpd-bundle-docker-0 (ocf:heartbeat:docker): Started * Node cluster02: online: * Resources: * ping (ocf:pacemaker:ping): Started * dummy (ocf:pacemaker:Dummy): Started * Public-IP (ocf:heartbeat:IPaddr): Started * Email (lsb:exim): Started * mysql-proxy (lsb:mysql-proxy): Started * promotable-rsc (ocf:pacemaker:Stateful): Promoted * httpd-bundle-ip-192.168.122.132 (ocf:heartbeat:IPaddr2): Started * httpd-bundle-docker-1 (ocf:heartbeat:docker): Started * GuestNode httpd-bundle-0@cluster01: online: * Resources: * httpd (ocf:heartbeat:apache): Started * GuestNode httpd-bundle-1@cluster02: online: * Resources: * httpd (ocf:heartbeat:apache): Started * GuestNode httpd-bundle-2@: OFFLINE: * Resources: Node Attributes: * Node: cluster01: * location : office * pingd : 1000 * Node: cluster02: * pingd : 1000 Operations: * Node: cluster02: * ping: migration-threshold=1000000: * (11) start * (12) monitor: interval="10000ms" * dummy: migration-threshold=1000000: * (18) start * (19) monitor: interval="60000ms" * Public-IP: migration-threshold=1000000: * (2) start * Email: migration-threshold=1000000: * (2) start * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * promotable-rsc: migration-threshold=1000000: * (4) monitor: interval="10000ms" * (5) cancel: interval="10000ms" * (6) promote * (7) monitor: interval="5000ms" * httpd-bundle-ip-192.168.122.132: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-docker-1: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-1: migration-threshold=1000000: * (2) start * (3) monitor: interval="30000ms" * Node: cluster01: * ping: migration-threshold=1000000: * (17) start * (18) monitor: interval="10000ms" * Fencing: migration-threshold=1000000: * (15) start * (20) monitor: interval="60000ms" * dummy: migration-threshold=1000000: * (16) stop * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * promotable-rsc: migration-threshold=1000000: * (2) start * (4) monitor: interval="10000ms" * httpd-bundle-ip-192.168.122.131: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-docker-0: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-0: migration-threshold=1000000: * (2) start * (3) monitor: interval="30000ms" * Node: httpd-bundle-0@cluster01: * httpd: migration-threshold=1000000: * (1) start * Node: httpd-bundle-1@cluster02: * httpd: migration-threshold=1000000: * (1) start Negative Location Constraints: * not-on-cluster1 prevents dummy from running on cluster01 =#=#=#= End test: Complete text output grouped by node - OK (0) =#=#=#= * Passed: crm_mon - Complete text output grouped by node =#=#=#= Begin test: Complete brief text output grouped by node =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Node cluster01: online: * Resources: * 1 (lsb:mysql-proxy): Active * 1 (ocf:heartbeat:IPaddr2): Active * 1 (ocf:heartbeat:docker): Active * 1 (ocf:pacemaker:Stateful): Active * 1 (ocf:pacemaker:ping): Active * 1 (ocf:pacemaker:remote): Active * 1 (stonith:fence_xvm): Active * Node cluster02: online: * Resources: * 1 (lsb:exim): Active * 1 (lsb:mysql-proxy): Active * 1 (ocf:heartbeat:IPaddr): Active * 1 (ocf:heartbeat:IPaddr2): Active * 1 (ocf:heartbeat:docker): Active * 1 (ocf:pacemaker:Dummy): Active * 1 (ocf:pacemaker:Stateful): Active * 1 (ocf:pacemaker:ping): Active * 1 (ocf:pacemaker:remote): Active * GuestNode httpd-bundle-0@cluster01: online: * Resources: * 1 (ocf:heartbeat:apache): Active * GuestNode httpd-bundle-1@cluster02: online: * Resources: * 1 (ocf:heartbeat:apache): Active Node Attributes: * Node: cluster01: * location : office * pingd : 1000 * Node: cluster02: * pingd : 1000 Operations: * Node: cluster02: * ping: migration-threshold=1000000: * (11) start * (12) monitor: interval="10000ms" * dummy: migration-threshold=1000000: * (18) start * (19) monitor: interval="60000ms" * Public-IP: migration-threshold=1000000: * (2) start * Email: migration-threshold=1000000: * (2) start * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * promotable-rsc: migration-threshold=1000000: * (4) monitor: interval="10000ms" * (5) cancel: interval="10000ms" * (6) promote * (7) monitor: interval="5000ms" * httpd-bundle-ip-192.168.122.132: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-docker-1: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-1: migration-threshold=1000000: * (2) start * (3) monitor: interval="30000ms" * Node: cluster01: * ping: migration-threshold=1000000: * (17) start * (18) monitor: interval="10000ms" * Fencing: migration-threshold=1000000: * (15) start * (20) monitor: interval="60000ms" * dummy: migration-threshold=1000000: * (16) stop * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * promotable-rsc: migration-threshold=1000000: * (2) start * (4) monitor: interval="10000ms" * httpd-bundle-ip-192.168.122.131: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-docker-0: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-0: migration-threshold=1000000: * (2) start * (3) monitor: interval="30000ms" * Node: httpd-bundle-0@cluster01: * httpd: migration-threshold=1000000: * (1) start * Node: httpd-bundle-1@cluster02: * httpd: migration-threshold=1000000: * (1) start Negative Location Constraints: * not-on-cluster1 prevents dummy from running on cluster01 =#=#=#= End test: Complete brief text output grouped by node - OK (0) =#=#=#= * Passed: crm_mon - Complete brief text output grouped by node =#=#=#= Begin test: XML output grouped by node =#=#=#= =#=#=#= End test: XML output grouped by node - OK (0) =#=#=#= * Passed: crm_mon - XML output grouped by node =#=#=#= Begin test: Complete text output filtered by node =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 ] Active Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster01 ] * Fencing (stonith:fence_xvm): Started cluster01 * Container bundle set: httpd-bundle [pcmk:http]: * httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01 * httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped * Clone Set: mysql-clone-group [mysql-group]: * Started: [ cluster01 ] * Clone Set: promotable-clone [promotable-rsc] (promotable): * Unpromoted: [ cluster01 ] Node Attributes: * Node: cluster01: * location : office * pingd : 1000 Operations: * Node: cluster01: * ping: migration-threshold=1000000: * (17) start * (18) monitor: interval="10000ms" * Fencing: migration-threshold=1000000: * (15) start * (20) monitor: interval="60000ms" * dummy: migration-threshold=1000000: * (16) stop * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * promotable-rsc: migration-threshold=1000000: * (2) start * (4) monitor: interval="10000ms" * httpd-bundle-ip-192.168.122.131: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-docker-0: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-0: migration-threshold=1000000: * (2) start * (3) monitor: interval="30000ms" Negative Location Constraints: * not-on-cluster1 prevents dummy from running on cluster01 =#=#=#= End test: Complete text output filtered by node - OK (0) =#=#=#= * Passed: crm_mon - Complete text output filtered by node =#=#=#= Begin test: XML output filtered by node =#=#=#= =#=#=#= End test: XML output filtered by node - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by node =#=#=#= Begin test: Complete text output filtered by tag =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster02 ] Active Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster02 ] * dummy (ocf:pacemaker:Dummy): Started cluster02 * Container bundle set: httpd-bundle [pcmk:http]: * httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02 * httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped * Resource Group: exim-group: * Public-IP (ocf:heartbeat:IPaddr): Started cluster02 * Email (lsb:exim): Started cluster02 * Clone Set: mysql-clone-group [mysql-group]: * Started: [ cluster02 ] * Clone Set: promotable-clone [promotable-rsc] (promotable): * Promoted: [ cluster02 ] Node Attributes: * Node: cluster02: * pingd : 1000 Operations: * Node: cluster02: * ping: migration-threshold=1000000: * (11) start * (12) monitor: interval="10000ms" * dummy: migration-threshold=1000000: * (18) start * (19) monitor: interval="60000ms" * Public-IP: migration-threshold=1000000: * (2) start * Email: migration-threshold=1000000: * (2) start * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * promotable-rsc: migration-threshold=1000000: * (4) monitor: interval="10000ms" * (5) cancel: interval="10000ms" * (6) promote * (7) monitor: interval="5000ms" * httpd-bundle-ip-192.168.122.132: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-docker-1: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-1: migration-threshold=1000000: * (2) start * (3) monitor: interval="30000ms" Negative Location Constraints: * not-on-cluster1 prevents dummy from running on cluster01 =#=#=#= End test: Complete text output filtered by tag - OK (0) =#=#=#= * Passed: crm_mon - Complete text output filtered by tag =#=#=#= Begin test: XML output filtered by tag =#=#=#= =#=#=#= End test: XML output filtered by tag - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by tag =#=#=#= Begin test: Complete text output filtered by resource tag =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ] Active Resources: * Fencing (stonith:fence_xvm): Started cluster01 Node Attributes: * Node: cluster01: * location : office * pingd : 1000 * Node: cluster02: * pingd : 1000 Operations: * Node: cluster01: * Fencing: migration-threshold=1000000: * (15) start * (20) monitor: interval="60000ms" =#=#=#= End test: Complete text output filtered by resource tag - OK (0) =#=#=#= * Passed: crm_mon - Complete text output filtered by resource tag =#=#=#= Begin test: XML output filtered by resource tag =#=#=#= =#=#=#= End test: XML output filtered by resource tag - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by resource tag =#=#=#= Begin test: Basic text output filtered by node that doesn't exist =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Active Resources: * No active resources =#=#=#= End test: Basic text output filtered by node that doesn't exist - OK (0) =#=#=#= * Passed: crm_mon - Basic text output filtered by node that doesn't exist =#=#=#= Begin test: XML output filtered by node that doesn't exist =#=#=#= =#=#=#= End test: XML output filtered by node that doesn't exist - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by node that doesn't exist =#=#=#= Begin test: Basic text output with inactive resources =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ] Full List of Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster01 cluster02 ] * Fencing (stonith:fence_xvm): Started cluster01 * dummy (ocf:pacemaker:Dummy): Started cluster02 * Clone Set: inactive-clone [inactive-dhcpd] (disabled): * Stopped (disabled): [ cluster01 cluster02 ] * Resource Group: inactive-group (disabled): * inactive-dummy-1 (ocf:pacemaker:Dummy): Stopped (disabled) * inactive-dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled) * Container bundle set: httpd-bundle [pcmk:http]: * httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01 * httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02 * httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped * Resource Group: exim-group: * Public-IP (ocf:heartbeat:IPaddr): Started cluster02 * Email (lsb:exim): Started cluster02 * Clone Set: mysql-clone-group [mysql-group]: * Started: [ cluster01 cluster02 ] * Clone Set: promotable-clone [promotable-rsc] (promotable): * Promoted: [ cluster02 ] * Unpromoted: [ cluster01 ] =#=#=#= End test: Basic text output with inactive resources - OK (0) =#=#=#= * Passed: crm_mon - Basic text output with inactive resources =#=#=#= Begin test: Basic text output with inactive resources, filtered by node =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster02 ] Full List of Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster02 ] * dummy (ocf:pacemaker:Dummy): Started cluster02 * Clone Set: inactive-clone [inactive-dhcpd] (disabled): * Stopped (disabled): [ cluster02 ] * Resource Group: inactive-group (disabled): * inactive-dummy-1 (ocf:pacemaker:Dummy): Stopped (disabled) * inactive-dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled) * Container bundle set: httpd-bundle [pcmk:http]: * httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02 * httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped * Resource Group: exim-group: * Public-IP (ocf:heartbeat:IPaddr): Started cluster02 * Email (lsb:exim): Started cluster02 * Clone Set: mysql-clone-group [mysql-group]: * Started: [ cluster02 ] * Clone Set: promotable-clone [promotable-rsc] (promotable): * Promoted: [ cluster02 ] =#=#=#= End test: Basic text output with inactive resources, filtered by node - OK (0) =#=#=#= * Passed: crm_mon - Basic text output with inactive resources, filtered by node =#=#=#= Begin test: Complete text output filtered by primitive resource =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ] Active Resources: * Fencing (stonith:fence_xvm): Started cluster01 Node Attributes: * Node: cluster01: * location : office * pingd : 1000 * Node: cluster02: * pingd : 1000 Operations: * Node: cluster01: * Fencing: migration-threshold=1000000: * (15) start * (20) monitor: interval="60000ms" =#=#=#= End test: Complete text output filtered by primitive resource - OK (0) =#=#=#= * Passed: crm_mon - Complete text output filtered by primitive resource =#=#=#= Begin test: XML output filtered by primitive resource =#=#=#= =#=#=#= End test: XML output filtered by primitive resource - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by primitive resource =#=#=#= Begin test: Complete text output filtered by group resource =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ] Active Resources: * Resource Group: exim-group: * Public-IP (ocf:heartbeat:IPaddr): Started cluster02 * Email (lsb:exim): Started cluster02 Node Attributes: * Node: cluster01: * location : office * pingd : 1000 * Node: cluster02: * pingd : 1000 Operations: * Node: cluster02: * Public-IP: migration-threshold=1000000: * (2) start * Email: migration-threshold=1000000: * (2) start =#=#=#= End test: Complete text output filtered by group resource - OK (0) =#=#=#= * Passed: crm_mon - Complete text output filtered by group resource =#=#=#= Begin test: XML output filtered by group resource =#=#=#= =#=#=#= End test: XML output filtered by group resource - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by group resource =#=#=#= Begin test: Complete text output filtered by group resource member =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ] Active Resources: * Resource Group: exim-group: * Public-IP (ocf:heartbeat:IPaddr): Started cluster02 Node Attributes: * Node: cluster01: * location : office * pingd : 1000 * Node: cluster02: * pingd : 1000 Operations: * Node: cluster02: * Public-IP: migration-threshold=1000000: * (2) start =#=#=#= End test: Complete text output filtered by group resource member - OK (0) =#=#=#= * Passed: crm_mon - Complete text output filtered by group resource member =#=#=#= Begin test: XML output filtered by group resource member =#=#=#= =#=#=#= End test: XML output filtered by group resource member - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by group resource member =#=#=#= Begin test: Complete text output filtered by clone resource =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ] Active Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster01 cluster02 ] Node Attributes: * Node: cluster01: * location : office * pingd : 1000 * Node: cluster02: * pingd : 1000 Operations: * Node: cluster02: * ping: migration-threshold=1000000: * (11) start * (12) monitor: interval="10000ms" * Node: cluster01: * ping: migration-threshold=1000000: * (17) start * (18) monitor: interval="10000ms" =#=#=#= End test: Complete text output filtered by clone resource - OK (0) =#=#=#= * Passed: crm_mon - Complete text output filtered by clone resource =#=#=#= Begin test: XML output filtered by clone resource =#=#=#= =#=#=#= End test: XML output filtered by clone resource - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by clone resource =#=#=#= Begin test: Complete text output filtered by clone resource instance =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ] Active Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster01 cluster02 ] Node Attributes: * Node: cluster01: * location : office * pingd : 1000 * Node: cluster02: * pingd : 1000 Operations: * Node: cluster02: * ping: migration-threshold=1000000: * (11) start * (12) monitor: interval="10000ms" * Node: cluster01: * ping: migration-threshold=1000000: * (17) start * (18) monitor: interval="10000ms" =#=#=#= End test: Complete text output filtered by clone resource instance - OK (0) =#=#=#= * Passed: crm_mon - Complete text output filtered by clone resource instance =#=#=#= Begin test: XML output filtered by clone resource instance =#=#=#= =#=#=#= End test: XML output filtered by clone resource instance - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by clone resource instance =#=#=#= Begin test: Complete text output filtered by exact clone resource instance =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 (1) cluster02 (2) ] * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ] Active Resources: * Clone Set: ping-clone [ping]: * ping (ocf:pacemaker:ping): Started cluster02 Node Attributes: * Node: cluster01 (1): * location : office * pingd : 1000 * Node: cluster02 (2): * pingd : 1000 Operations: * Node: cluster02 (2): * ping: migration-threshold=1000000: * (11) start * (12) monitor: interval="10000ms" * Node: cluster01 (1): * ping: migration-threshold=1000000: * (17) start * (18) monitor: interval="10000ms" =#=#=#= End test: Complete text output filtered by exact clone resource instance - OK (0) =#=#=#= * Passed: crm_mon - Complete text output filtered by exact clone resource instance =#=#=#= Begin test: XML output filtered by exact clone resource instance =#=#=#= =#=#=#= End test: XML output filtered by exact clone resource instance - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by exact clone resource instance =#=#=#= Begin test: Basic text output filtered by resource that doesn't exist =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ] Active Resources: * No active resources =#=#=#= End test: Basic text output filtered by resource that doesn't exist - OK (0) =#=#=#= * Passed: crm_mon - Basic text output filtered by resource that doesn't exist =#=#=#= Begin test: XML output filtered by resource that doesn't exist =#=#=#= =#=#=#= End test: XML output filtered by resource that doesn't exist - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by resource that doesn't exist =#=#=#= Begin test: Basic text output with inactive resources, filtered by tag =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ] Full List of Resources: * Clone Set: inactive-clone [inactive-dhcpd] (disabled): * Stopped (disabled): [ cluster01 cluster02 ] * Resource Group: inactive-group (disabled): * inactive-dummy-1 (ocf:pacemaker:Dummy): Stopped (disabled) * inactive-dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled) =#=#=#= End test: Basic text output with inactive resources, filtered by tag - OK (0) =#=#=#= * Passed: crm_mon - Basic text output with inactive resources, filtered by tag =#=#=#= Begin test: Basic text output with inactive resources, filtered by bundle resource =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ] Full List of Resources: * Container bundle set: httpd-bundle [pcmk:http]: * httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01 * httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02 * httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped =#=#=#= End test: Basic text output with inactive resources, filtered by bundle resource - OK (0) =#=#=#= * Passed: crm_mon - Basic text output with inactive resources, filtered by bundle resource =#=#=#= Begin test: XML output filtered by inactive bundle resource =#=#=#= =#=#=#= End test: XML output filtered by inactive bundle resource - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by inactive bundle resource =#=#=#= Begin test: Basic text output with inactive resources, filtered by bundled IP address resource =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ] Full List of Resources: * Container bundle set: httpd-bundle [pcmk:http]: * Replica[0] * httpd-bundle-ip-192.168.122.131 (ocf:heartbeat:IPaddr2): Started cluster01 =#=#=#= End test: Basic text output with inactive resources, filtered by bundled IP address resource - OK (0) =#=#=#= * Passed: crm_mon - Basic text output with inactive resources, filtered by bundled IP address resource =#=#=#= Begin test: XML output filtered by bundled IP address resource =#=#=#= =#=#=#= End test: XML output filtered by bundled IP address resource - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by bundled IP address resource =#=#=#= Begin test: Basic text output with inactive resources, filtered by bundled container =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ] Full List of Resources: * Container bundle set: httpd-bundle [pcmk:http]: * Replica[1] * httpd-bundle-docker-1 (ocf:heartbeat:docker): Started cluster02 =#=#=#= End test: Basic text output with inactive resources, filtered by bundled container - OK (0) =#=#=#= * Passed: crm_mon - Basic text output with inactive resources, filtered by bundled container =#=#=#= Begin test: XML output filtered by bundled container =#=#=#= =#=#=#= End test: XML output filtered by bundled container - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by bundled container =#=#=#= Begin test: Basic text output with inactive resources, filtered by bundle connection =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ] Full List of Resources: * Container bundle set: httpd-bundle [pcmk:http]: * Replica[0] * httpd-bundle-0 (ocf:pacemaker:remote): Started cluster01 =#=#=#= End test: Basic text output with inactive resources, filtered by bundle connection - OK (0) =#=#=#= * Passed: crm_mon - Basic text output with inactive resources, filtered by bundle connection =#=#=#= Begin test: XML output filtered by bundle connection =#=#=#= =#=#=#= End test: XML output filtered by bundle connection - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by bundle connection =#=#=#= Begin test: Basic text output with inactive resources, filtered by bundled primitive resource =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ] Full List of Resources: * Container bundle set: httpd-bundle [pcmk:http]: * Replica[0] * httpd (ocf:heartbeat:apache): Started httpd-bundle-0 * Replica[1] * httpd (ocf:heartbeat:apache): Started httpd-bundle-1 * Replica[2] * httpd (ocf:heartbeat:apache): Stopped =#=#=#= End test: Basic text output with inactive resources, filtered by bundled primitive resource - OK (0) =#=#=#= * Passed: crm_mon - Basic text output with inactive resources, filtered by bundled primitive resource =#=#=#= Begin test: XML output filtered by bundled primitive resource =#=#=#= =#=#=#= End test: XML output filtered by bundled primitive resource - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by bundled primitive resource =#=#=#= Begin test: Complete text output, filtered by clone name in cloned group =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 (1) cluster02 (2) ] * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ] Active Resources: * Clone Set: mysql-clone-group [mysql-group]: * Resource Group: mysql-group:0: * mysql-proxy (lsb:mysql-proxy): Started cluster02 * Resource Group: mysql-group:1: * mysql-proxy (lsb:mysql-proxy): Started cluster01 Node Attributes: * Node: cluster01 (1): * location : office * pingd : 1000 * Node: cluster02 (2): * pingd : 1000 Operations: * Node: cluster02 (2): * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * Node: cluster01 (1): * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" =#=#=#= End test: Complete text output, filtered by clone name in cloned group - OK (0) =#=#=#= * Passed: crm_mon - Complete text output, filtered by clone name in cloned group =#=#=#= Begin test: XML output, filtered by clone name in cloned group =#=#=#= =#=#=#= End test: XML output, filtered by clone name in cloned group - OK (0) =#=#=#= * Passed: crm_mon - XML output, filtered by clone name in cloned group =#=#=#= Begin test: Complete text output, filtered by group name in cloned group =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 (1) cluster02 (2) ] * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ] Active Resources: * Clone Set: mysql-clone-group [mysql-group]: * Resource Group: mysql-group:0: * mysql-proxy (lsb:mysql-proxy): Started cluster02 * Resource Group: mysql-group:1: * mysql-proxy (lsb:mysql-proxy): Started cluster01 Node Attributes: * Node: cluster01 (1): * location : office * pingd : 1000 * Node: cluster02 (2): * pingd : 1000 Operations: * Node: cluster02 (2): * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * Node: cluster01 (1): * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" =#=#=#= End test: Complete text output, filtered by group name in cloned group - OK (0) =#=#=#= * Passed: crm_mon - Complete text output, filtered by group name in cloned group =#=#=#= Begin test: XML output, filtered by group name in cloned group =#=#=#= =#=#=#= End test: XML output, filtered by group name in cloned group - OK (0) =#=#=#= * Passed: crm_mon - XML output, filtered by group name in cloned group =#=#=#= Begin test: Complete text output, filtered by exact group instance name in cloned group =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 (1) cluster02 (2) ] * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ] Active Resources: * Clone Set: mysql-clone-group [mysql-group]: * Resource Group: mysql-group:1: * mysql-proxy (lsb:mysql-proxy): Started cluster01 Node Attributes: * Node: cluster01 (1): * location : office * pingd : 1000 * Node: cluster02 (2): * pingd : 1000 Operations: * Node: cluster02 (2): * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * Node: cluster01 (1): * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" =#=#=#= End test: Complete text output, filtered by exact group instance name in cloned group - OK (0) =#=#=#= * Passed: crm_mon - Complete text output, filtered by exact group instance name in cloned group =#=#=#= Begin test: XML output, filtered by exact group instance name in cloned group =#=#=#= =#=#=#= End test: XML output, filtered by exact group instance name in cloned group - OK (0) =#=#=#= * Passed: crm_mon - XML output, filtered by exact group instance name in cloned group =#=#=#= Begin test: Complete text output, filtered by primitive name in cloned group =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 (1) cluster02 (2) ] * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ] Active Resources: * Clone Set: mysql-clone-group [mysql-group]: * Resource Group: mysql-group:0: * mysql-proxy (lsb:mysql-proxy): Started cluster02 * Resource Group: mysql-group:1: * mysql-proxy (lsb:mysql-proxy): Started cluster01 Node Attributes: * Node: cluster01 (1): * location : office * pingd : 1000 * Node: cluster02 (2): * pingd : 1000 Operations: * Node: cluster02 (2): * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * Node: cluster01 (1): * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" =#=#=#= End test: Complete text output, filtered by primitive name in cloned group - OK (0) =#=#=#= * Passed: crm_mon - Complete text output, filtered by primitive name in cloned group =#=#=#= Begin test: XML output, filtered by primitive name in cloned group =#=#=#= =#=#=#= End test: XML output, filtered by primitive name in cloned group - OK (0) =#=#=#= * Passed: crm_mon - XML output, filtered by primitive name in cloned group =#=#=#= Begin test: Complete text output, filtered by exact primitive instance name in cloned group =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 (1) cluster02 (2) ] * GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ] Active Resources: * Clone Set: mysql-clone-group [mysql-group]: * Resource Group: mysql-group:1: * mysql-proxy (lsb:mysql-proxy): Started cluster01 Node Attributes: * Node: cluster01 (1): * location : office * pingd : 1000 * Node: cluster02 (2): * pingd : 1000 Operations: * Node: cluster02 (2): * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * Node: cluster01 (1): * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" =#=#=#= End test: Complete text output, filtered by exact primitive instance name in cloned group - OK (0) =#=#=#= * Passed: crm_mon - Complete text output, filtered by exact primitive instance name in cloned group =#=#=#= Begin test: XML output, filtered by exact primitive instance name in cloned group =#=#=#= =#=#=#= End test: XML output, filtered by exact primitive instance name in cloned group - OK (0) =#=#=#= * Passed: crm_mon - XML output, filtered by exact primitive instance name in cloned group =#=#=#= Begin test: Text output of partially active resources =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 4 nodes configured * 13 resource instances configured (1 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0@cluster02 httpd-bundle-1@cluster01 ] Active Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster01 ] * Fencing (stonith:fence_xvm): Started cluster01 * Container bundle set: httpd-bundle [pcmk:http]: * httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster02 * httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Stopped cluster01 - * Resource Group: partially-active-group: + * Resource Group: partially-active-group (1 member inactive): * dummy-1 (ocf:pacemaker:Dummy): Started cluster02 =#=#=#= End test: Text output of partially active resources - OK (0) =#=#=#= * Passed: crm_mon - Text output of partially active resources =#=#=#= Begin test: XML output of partially active resources =#=#=#= =#=#=#= End test: XML output of partially active resources - OK (0) =#=#=#= * Passed: crm_mon - XML output of partially active resources =#=#=#= Begin test: Text output of partially active resources, with inactive resources =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 4 nodes configured * 13 resource instances configured (1 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0@cluster02 httpd-bundle-1@cluster01 ] Full List of Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster01 ] * Stopped: [ cluster02 ] * Fencing (stonith:fence_xvm): Started cluster01 * Container bundle set: httpd-bundle [pcmk:http]: * httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster02 * httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Stopped cluster01 * Resource Group: partially-active-group: * dummy-1 (ocf:pacemaker:Dummy): Started cluster02 * dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled) =#=#=#= End test: Text output of partially active resources, with inactive resources - OK (0) =#=#=#= * Passed: crm_mon - Text output of partially active resources, with inactive resources =#=#=#= Begin test: Complete brief text output, with inactive resources =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 4 nodes configured * 13 resource instances configured (1 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0@cluster02 httpd-bundle-1@cluster01 ] Full List of Resources: * 1/1 (stonith:fence_xvm): Active cluster01 * Clone Set: ping-clone [ping]: * Started: [ cluster01 ] * Stopped: [ cluster02 ] * Container bundle set: httpd-bundle [pcmk:http]: * httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster02 * httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Stopped cluster01 * Resource Group: partially-active-group: * 1/2 (ocf:pacemaker:Dummy): Active cluster02 Node Attributes: * Node: cluster01: * pingd : 1000 * Node: cluster02: * pingd : 1000 Operations: * Node: cluster02: * httpd-bundle-ip-192.168.122.131: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-docker-0: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-0: migration-threshold=1000000: * (2) start * (3) monitor: interval="30000ms" * dummy-1: migration-threshold=1000000: * (2) start * Node: cluster01: * Fencing: migration-threshold=1000000: * (15) start * (20) monitor: interval="60000ms" * ping: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * httpd-bundle-ip-192.168.122.132: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-docker-1: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-1: migration-threshold=1000000: * (2) start * (3) monitor: interval="30000ms" * Node: httpd-bundle-0@cluster02: * httpd: migration-threshold=1000000: * (1) start =#=#=#= End test: Complete brief text output, with inactive resources - OK (0) =#=#=#= * Passed: crm_mon - Complete brief text output, with inactive resources =#=#=#= Begin test: Text output of partially active group =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 4 nodes configured * 13 resource instances configured (1 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0@cluster02 httpd-bundle-1@cluster01 ] Active Resources: - * Resource Group: partially-active-group: + * Resource Group: partially-active-group (1 member inactive): * dummy-1 (ocf:pacemaker:Dummy): Started cluster02 =#=#=#= End test: Text output of partially active group - OK (0) =#=#=#= * Passed: crm_mon - Text output of partially active group =#=#=#= Begin test: Text output of partially active group, with inactive resources =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 4 nodes configured * 13 resource instances configured (1 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0@cluster02 httpd-bundle-1@cluster01 ] Full List of Resources: * Resource Group: partially-active-group: * dummy-1 (ocf:pacemaker:Dummy): Started cluster02 * dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled) =#=#=#= End test: Text output of partially active group, with inactive resources - OK (0) =#=#=#= * Passed: crm_mon - Text output of partially active group, with inactive resources =#=#=#= Begin test: Text output of active member of partially active group =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 4 nodes configured * 13 resource instances configured (1 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0@cluster02 httpd-bundle-1@cluster01 ] Active Resources: - * Resource Group: partially-active-group: + * Resource Group: partially-active-group (1 member inactive): * dummy-1 (ocf:pacemaker:Dummy): Started cluster02 =#=#=#= End test: Text output of active member of partially active group - OK (0) =#=#=#= * Passed: crm_mon - Text output of active member of partially active group =#=#=#= Begin test: Text output of inactive member of partially active group =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 4 nodes configured * 13 resource instances configured (1 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0@cluster02 httpd-bundle-1@cluster01 ] Active Resources: - * Resource Group: partially-active-group: + * Resource Group: partially-active-group (1 member inactive): * dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled) =#=#=#= End test: Text output of inactive member of partially active group - OK (0) =#=#=#= * Passed: crm_mon - Text output of inactive member of partially active group =#=#=#= Begin test: Complete brief text output grouped by node, with inactive resources =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 4 nodes configured * 13 resource instances configured (1 DISABLED) Node List: * Node cluster01: online: * Resources: * 1 (ocf:heartbeat:IPaddr2): Active * 1 (ocf:heartbeat:docker): Active * 1 (ocf:pacemaker:ping): Active * 1 (ocf:pacemaker:remote): Active * 1 (stonith:fence_xvm): Active * Node cluster02: online: * Resources: * 1 (ocf:heartbeat:IPaddr2): Active * 1 (ocf:heartbeat:docker): Active * 1 (ocf:pacemaker:Dummy): Active * 1 (ocf:pacemaker:remote): Active * GuestNode httpd-bundle-0@cluster02: online: * Resources: * 1 (ocf:heartbeat:apache): Active Inactive Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster01 ] * Stopped: [ cluster02 ] * Container bundle set: httpd-bundle [pcmk:http]: * httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster02 * httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Stopped cluster01 * Resource Group: partially-active-group: * 1/2 (ocf:pacemaker:Dummy): Active cluster02 Node Attributes: * Node: cluster01: * pingd : 1000 * Node: cluster02: * pingd : 1000 Operations: * Node: cluster02: * httpd-bundle-ip-192.168.122.131: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-docker-0: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-0: migration-threshold=1000000: * (2) start * (3) monitor: interval="30000ms" * dummy-1: migration-threshold=1000000: * (2) start * Node: cluster01: * Fencing: migration-threshold=1000000: * (15) start * (20) monitor: interval="60000ms" * ping: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * httpd-bundle-ip-192.168.122.132: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-docker-1: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-1: migration-threshold=1000000: * (2) start * (3) monitor: interval="30000ms" * Node: httpd-bundle-0@cluster02: * httpd: migration-threshold=1000000: * (1) start =#=#=#= End test: Complete brief text output grouped by node, with inactive resources - OK (0) =#=#=#= * Passed: crm_mon - Complete brief text output grouped by node, with inactive resources =#=#=#= Begin test: Text output of partially active resources, with inactive resources, filtered by node =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 4 nodes configured * 13 resource instances configured (1 DISABLED) Node List: * Online: [ cluster01 ] Full List of Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster01 ] * Fencing (stonith:fence_xvm): Started cluster01 * Container bundle set: httpd-bundle [pcmk:http]: * httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Stopped cluster01 =#=#=#= End test: Text output of partially active resources, with inactive resources, filtered by node - OK (0) =#=#=#= * Passed: crm_mon - Text output of partially active resources, with inactive resources, filtered by node =#=#=#= Begin test: Text output of partially active resources, filtered by node =#=#=#= =#=#=#= End test: Text output of partially active resources, filtered by node - OK (0) =#=#=#= * Passed: crm_mon - Text output of partially active resources, filtered by node =#=#=#= Begin test: Text output of all resources with maintenance-mode enabled =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) *** Resource management is DISABLED *** The cluster will not attempt to start, stop or recover services Node List: * GuestNode httpd-bundle-0@cluster01: maintenance * GuestNode httpd-bundle-1@cluster02: maintenance * Online: [ cluster01 cluster02 ] Full List of Resources: * Clone Set: ping-clone [ping] (unmanaged): * ping (ocf:pacemaker:ping): Started cluster02 (unmanaged) * ping (ocf:pacemaker:ping): Started cluster01 (unmanaged) * Fencing (stonith:fence_xvm): Started cluster01 (unmanaged) * dummy (ocf:pacemaker:Dummy): Started cluster02 (unmanaged) * Clone Set: inactive-clone [inactive-dhcpd] (unmanaged) (disabled): * Stopped (disabled): [ cluster01 cluster02 ] * Resource Group: inactive-group (unmanaged) (disabled): * inactive-dummy-1 (ocf:pacemaker:Dummy): Stopped (disabled, unmanaged) * inactive-dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled, unmanaged) * Container bundle set: httpd-bundle [pcmk:http] (unmanaged): * httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01 (unmanaged) * httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02 (unmanaged) * httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped (unmanaged) * Resource Group: exim-group (unmanaged): * Public-IP (ocf:heartbeat:IPaddr): Started cluster02 (unmanaged) * Email (lsb:exim): Started cluster02 (unmanaged) * Clone Set: mysql-clone-group [mysql-group] (unmanaged): * Resource Group: mysql-group:0 (unmanaged): * mysql-proxy (lsb:mysql-proxy): Started cluster02 (unmanaged) * Resource Group: mysql-group:1 (unmanaged): * mysql-proxy (lsb:mysql-proxy): Started cluster01 (unmanaged) * Clone Set: promotable-clone [promotable-rsc] (promotable) (unmanaged): * promotable-rsc (ocf:pacemaker:Stateful): Promoted cluster02 (unmanaged) * promotable-rsc (ocf:pacemaker:Stateful): Unpromoted cluster01 (unmanaged) =#=#=#= End test: Text output of all resources with maintenance-mode enabled - OK (0) =#=#=#= * Passed: crm_mon - Text output of all resources with maintenance-mode enabled diff --git a/lib/pengine/group.c b/lib/pengine/group.c index 00681597c9..ae94fc7268 100644 --- a/lib/pengine/group.c +++ b/lib/pengine/group.c @@ -1,379 +1,428 @@ /* * Copyright 2004-2021 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include +#include #include #include #define VARIANT_GROUP 1 #include "./variant.h" +static int +inactive_resources(pe_resource_t *rsc) +{ + int retval = 0; + + for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) { + pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; + + if (!child_rsc->fns->active(child_rsc, TRUE)) { + retval++; + } + } + + return retval; +} + static bool skip_child_rsc(pe_resource_t *rsc, pe_resource_t *child, gboolean parent_passes, GList *only_rsc, unsigned int show_opts) { bool star_list = pcmk__list_of_1(only_rsc) && pcmk__str_eq("*", g_list_first(only_rsc)->data, pcmk__str_none); bool child_filtered = child->fns->is_filtered(child, only_rsc, FALSE); bool child_active = child->fns->active(child, FALSE); bool show_inactive = pcmk_is_set(show_opts, pcmk_show_inactive_rscs); /* If the resource is in only_rsc by name (so, ignoring "*") then allow * it regardless of if it's active or not. */ if (!star_list && !child_filtered) { return false; } else if (!child_filtered && (child_active || show_inactive)) { return false; } else if (parent_passes && (child_active || show_inactive)) { return false; } return true; } gboolean group_unpack(pe_resource_t * rsc, pe_working_set_t * data_set) { xmlNode *xml_obj = rsc->xml; xmlNode *xml_native_rsc = NULL; group_variant_data_t *group_data = NULL; const char *group_ordered = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_ORDERED); const char *group_colocated = g_hash_table_lookup(rsc->meta, "collocated"); const char *clone_id = NULL; pe_rsc_trace(rsc, "Processing resource %s...", rsc->id); group_data = calloc(1, sizeof(group_variant_data_t)); group_data->num_children = 0; group_data->first_child = NULL; group_data->last_child = NULL; rsc->variant_opaque = group_data; // We don't actually need the null checks but it speeds up the common case if ((group_ordered == NULL) || (crm_str_to_boolean(group_ordered, &(group_data->ordered)) < 0)) { group_data->ordered = TRUE; } if ((group_colocated == NULL) || (crm_str_to_boolean(group_colocated, &(group_data->colocated)) < 0)) { group_data->colocated = TRUE; } clone_id = crm_element_value(rsc->xml, XML_RSC_ATTR_INCARNATION); for (xml_native_rsc = pcmk__xe_first_child(xml_obj); xml_native_rsc != NULL; xml_native_rsc = pcmk__xe_next(xml_native_rsc)) { if (pcmk__str_eq((const char *)xml_native_rsc->name, XML_CIB_TAG_RESOURCE, pcmk__str_none)) { pe_resource_t *new_rsc = NULL; crm_xml_add(xml_native_rsc, XML_RSC_ATTR_INCARNATION, clone_id); if (common_unpack(xml_native_rsc, &new_rsc, rsc, data_set) == FALSE) { pe_err("Failed unpacking resource %s", crm_element_value(xml_obj, XML_ATTR_ID)); if (new_rsc != NULL && new_rsc->fns != NULL) { new_rsc->fns->free(new_rsc); } continue; } group_data->num_children++; rsc->children = g_list_append(rsc->children, new_rsc); if (group_data->first_child == NULL) { group_data->first_child = new_rsc; } group_data->last_child = new_rsc; pe_rsc_trace(rsc, "Added %s member %s", rsc->id, new_rsc->id); } } if (group_data->num_children == 0) { pcmk__config_warn("Group %s does not have any children", rsc->id); return TRUE; // Allow empty groups, children can be added later } pe_rsc_trace(rsc, "Added %d children to resource %s...", group_data->num_children, rsc->id); return TRUE; } gboolean group_active(pe_resource_t * rsc, gboolean all) { gboolean c_all = TRUE; gboolean c_any = FALSE; GList *gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; if (child_rsc->fns->active(child_rsc, all)) { c_any = TRUE; } else { c_all = FALSE; } } if (c_any == FALSE) { return FALSE; } else if (all && c_all == FALSE) { return FALSE; } return TRUE; } static void group_print_xml(pe_resource_t * rsc, const char *pre_text, long options, void *print_data) { GList *gIter = rsc->children; char *child_text = crm_strdup_printf("%s ", pre_text); status_print("%sid); status_print("number_resources=\"%d\" ", g_list_length(rsc->children)); status_print(">\n"); for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; child_rsc->fns->print(child_rsc, child_text, options, print_data); } status_print("%s\n", pre_text); free(child_text); } void group_print(pe_resource_t * rsc, const char *pre_text, long options, void *print_data) { char *child_text = NULL; GList *gIter = rsc->children; if (pre_text == NULL) { pre_text = " "; } if (options & pe_print_xml) { group_print_xml(rsc, pre_text, options, print_data); return; } child_text = crm_strdup_printf("%s ", pre_text); status_print("%sResource Group: %s", pre_text ? pre_text : "", rsc->id); if (options & pe_print_html) { status_print("\n
    \n"); } else if ((options & pe_print_log) == 0) { status_print("\n"); } if (options & pe_print_brief) { print_rscs_brief(rsc->children, child_text, options, print_data, TRUE); } else { for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; if (options & pe_print_html) { status_print("
  • \n"); } child_rsc->fns->print(child_rsc, child_text, options, print_data); if (options & pe_print_html) { status_print("
  • \n"); } } } if (options & pe_print_html) { status_print("
\n"); } free(child_text); } PCMK__OUTPUT_ARGS("group", "unsigned int", "pe_resource_t *", "GList *", "GList *") int pe__group_xml(pcmk__output_t *out, va_list args) { unsigned int show_opts = va_arg(args, unsigned int); pe_resource_t *rsc = va_arg(args, pe_resource_t *); GList *only_node = va_arg(args, GList *); GList *only_rsc = va_arg(args, GList *); GList *gIter = rsc->children; char *count = pcmk__itoa(g_list_length(gIter)); int rc = pcmk_rc_no_output; gboolean parent_passes = pcmk__str_in_list(only_rsc, rsc_printable_id(rsc), pcmk__str_none) || (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(only_rsc, rsc->id, pcmk__str_none)); if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) { free(count); return rc; } for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; if (skip_child_rsc(rsc, child_rsc, parent_passes, only_rsc, show_opts)) { continue; } if (rc == pcmk_rc_no_output) { rc = pe__name_and_nvpairs_xml(out, true, "group", 4 , "id", rsc->id , "number_resources", count , "managed", pe__rsc_bool_str(rsc, pe_rsc_managed) , "disabled", pcmk__btoa(pe__resource_is_disabled(rsc))); free(count); CRM_ASSERT(rc == pcmk_rc_ok); } out->message(out, crm_map_element_name(child_rsc->xml), show_opts, child_rsc, only_node, only_rsc); } if (rc == pcmk_rc_ok) { pcmk__output_xml_pop_parent(out); } return rc; } PCMK__OUTPUT_ARGS("group", "unsigned int", "pe_resource_t *", "GList *", "GList *") int pe__group_default(pcmk__output_t *out, va_list args) { unsigned int show_opts = va_arg(args, unsigned int); pe_resource_t *rsc = va_arg(args, pe_resource_t *); GList *only_node = va_arg(args, GList *); GList *only_rsc = va_arg(args, GList *); int rc = pcmk_rc_no_output; - gboolean parent_passes = pcmk__str_in_list(only_rsc, rsc_printable_id(rsc), pcmk__str_none) || (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(only_rsc, rsc->id, pcmk__str_none)); + gboolean active = rsc->fns->active(rsc, TRUE); + gboolean partially_active = rsc->fns->active(rsc, FALSE); + if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) { return rc; } if (pcmk_is_set(show_opts, pcmk_show_brief)) { GList *rscs = pe__filter_rsc_list(rsc->children, only_rsc); if (rscs != NULL) { - out->begin_list(out, NULL, NULL, "Resource Group: %s%s%s", rsc->id, + char *s = NULL; + + if (!active && partially_active && !pcmk_is_set(show_opts, pcmk_show_inactive_rscs)) { + int n_active = inactive_resources(rsc); + + if (n_active > 0) { + s = crm_strdup_printf(" (%d member%s inactive)", n_active, pcmk__plural_s(n_active)); + } + } + + out->begin_list(out, NULL, NULL, "Resource Group: %s%s%s%s", rsc->id, + s ? s : "", pcmk_is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)", pe__resource_is_disabled(rsc) ? " (disabled)" : ""); pe__rscs_brief_output(out, rscs, show_opts | pcmk_show_inactive_rscs); rc = pcmk_rc_ok; g_list_free(rscs); + + if (s) { + free(s); + } } } else { for (GList *gIter = rsc->children; gIter; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; + char *s = NULL; if (skip_child_rsc(rsc, child_rsc, parent_passes, only_rsc, show_opts)) { continue; } - PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Resource Group: %s%s%s", rsc->id, + if (!active && partially_active && !pcmk_is_set(show_opts, pcmk_show_inactive_rscs)) { + int n_active = inactive_resources(rsc); + + if (n_active > 0) { + s = crm_strdup_printf(" (%d member%s inactive)", n_active, pcmk__plural_s(n_active)); + } + } + + + PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Resource Group: %s%s%s%s", rsc->id, + s ? s : "", pcmk_is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)", pe__resource_is_disabled(rsc) ? " (disabled)" : ""); out->message(out, crm_map_element_name(child_rsc->xml), show_opts, child_rsc, only_node, only_rsc); + + if (s) { + free(s); + } } } PCMK__OUTPUT_LIST_FOOTER(out, rc); return rc; } void group_free(pe_resource_t * rsc) { CRM_CHECK(rsc != NULL, return); pe_rsc_trace(rsc, "Freeing %s", rsc->id); for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; CRM_ASSERT(child_rsc); pe_rsc_trace(child_rsc, "Freeing child %s", child_rsc->id); child_rsc->fns->free(child_rsc); } pe_rsc_trace(rsc, "Freeing child list"); g_list_free(rsc->children); common_free(rsc); } enum rsc_role_e group_resource_state(const pe_resource_t * rsc, gboolean current) { enum rsc_role_e group_role = RSC_ROLE_UNKNOWN; GList *gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; enum rsc_role_e role = child_rsc->fns->state(child_rsc, current); if (role > group_role) { group_role = role; } } pe_rsc_trace(rsc, "%s role: %s", rsc->id, role2text(group_role)); return group_role; } gboolean pe__group_is_filtered(pe_resource_t *rsc, GList *only_rsc, gboolean check_parent) { gboolean passes = FALSE; if (check_parent && pcmk__str_in_list(only_rsc, rsc_printable_id(uber_parent(rsc)), pcmk__str_none)) { passes = TRUE; } else if (pcmk__str_in_list(only_rsc, rsc_printable_id(rsc), pcmk__str_none)) { passes = TRUE; } else if (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(only_rsc, rsc->id, pcmk__str_none)) { passes = TRUE; } else { for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; if (!child_rsc->fns->is_filtered(child_rsc, only_rsc, FALSE)) { passes = TRUE; break; } } } return !passes; }