diff --git a/cts/cli/regression.crm_mon.exp b/cts/cli/regression.crm_mon.exp index 83b5f083c4..3fb04bdf22 100644 --- a/cts/cli/regression.crm_mon.exp +++ b/cts/cli/regression.crm_mon.exp @@ -1,5071 +1,5071 @@ =#=#=#= Begin test: Basic output =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Active Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster01 cluster02 ] * Fencing (stonith:fence_xvm): Started cluster01 * dummy (ocf:pacemaker:Dummy): Started cluster02 * Container bundle set: httpd-bundle [pcmk:http]: * httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01 * httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02 * httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped * Resource Group: exim-group: * Public-IP (ocf:heartbeat:IPaddr): Started cluster02 * Email (lsb:exim): Started cluster02 * Clone Set: mysql-clone-group [mysql-group]: * Started: [ cluster01 cluster02 ] * Clone Set: promotable-clone [promotable-rsc] (promotable): * Promoted: [ cluster02 ] * Unpromoted: [ cluster01 ] =#=#=#= End test: Basic output - OK (0) =#=#=#= * Passed: crm_mon - Basic output =#=#=#= Begin test: Basic output (XML) =#=#=#= =#=#=#= End test: Basic output (XML) - OK (0) =#=#=#= * Passed: crm_mon - Basic output (XML) =#=#=#= Begin test: Output without node section =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Active Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster01 cluster02 ] * Fencing (stonith:fence_xvm): Started cluster01 * dummy (ocf:pacemaker:Dummy): Started cluster02 * Container bundle set: httpd-bundle [pcmk:http]: * httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01 * httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02 * httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped * Resource Group: exim-group: * Public-IP (ocf:heartbeat:IPaddr): Started cluster02 * Email (lsb:exim): Started cluster02 * Clone Set: mysql-clone-group [mysql-group]: * Started: [ cluster01 cluster02 ] * Clone Set: promotable-clone [promotable-rsc] (promotable): * Promoted: [ cluster02 ] * Unpromoted: [ cluster01 ] =#=#=#= End test: Output without node section - OK (0) =#=#=#= * Passed: crm_mon - Output without node section =#=#=#= Begin test: Output without node section (XML) =#=#=#= =#=#=#= End test: Output without node section (XML) - OK (0) =#=#=#= * Passed: crm_mon - Output without node section (XML) =#=#=#= Begin test: Output with only the node section =#=#=#= Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] =#=#=#= End test: Output with only the node section - OK (0) =#=#=#= * Passed: crm_mon - Output with only the node section =#=#=#= Begin test: Complete text output =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Active Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster01 cluster02 ] * Fencing (stonith:fence_xvm): Started cluster01 * dummy (ocf:pacemaker:Dummy): Started cluster02 * Container bundle set: httpd-bundle [pcmk:http]: * httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01 * httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02 * httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped * Resource Group: exim-group: * Public-IP (ocf:heartbeat:IPaddr): Started cluster02 * Email (lsb:exim): Started cluster02 * Clone Set: mysql-clone-group [mysql-group]: * Started: [ cluster01 cluster02 ] * Clone Set: promotable-clone [promotable-rsc] (promotable): * Promoted: [ cluster02 ] * Unpromoted: [ cluster01 ] Node Attributes: * Node: cluster01: * location : office * pingd : 1000 * Node: cluster02: * pingd : 1000 Operations: * Node: cluster02: * ping: migration-threshold=1000000: * (11) start * (12) monitor: interval="10000ms" * dummy: migration-threshold=1000000: * (18) start * (19) monitor: interval="60000ms" * Public-IP: migration-threshold=1000000: * (2) start * Email: migration-threshold=1000000: * (2) start * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * promotable-rsc: migration-threshold=1000000: * (4) monitor: interval="10000ms" * (5) cancel: interval="10000ms" * (6) promote * (7) monitor: interval="5000ms" * httpd-bundle-ip-192.168.122.132: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-docker-1: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-1: migration-threshold=1000000: * (2) start * (3) monitor: interval="30000ms" * Node: cluster01: * ping: migration-threshold=1000000: * (17) start * (18) monitor: interval="10000ms" * Fencing: migration-threshold=1000000: * (15) start * (20) monitor: interval="60000ms" * dummy: migration-threshold=1000000: * (16) stop * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * promotable-rsc: migration-threshold=1000000: * (2) start * (4) monitor: interval="10000ms" * httpd-bundle-ip-192.168.122.131: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-docker-0: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-0: migration-threshold=1000000: * (2) start * (3) monitor: interval="30000ms" * Node: httpd-bundle-0: * httpd: migration-threshold=1000000: * (1) start * Node: httpd-bundle-1: * httpd: migration-threshold=1000000: * (1) start Negative Location Constraints: * not-on-cluster1 prevents dummy from running on cluster01 =#=#=#= End test: Complete text output - OK (0) =#=#=#= * Passed: crm_mon - Complete text output =#=#=#= Begin test: Complete text output with detail =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Node cluster01 (1): online, feature set <3.15.1 * Node cluster02 (2): online, feature set <3.15.1 * GuestNode httpd-bundle-0@cluster01: online * GuestNode httpd-bundle-1@cluster02: online * GuestNode httpd-bundle-2@: OFFLINE Active Resources: * Clone Set: ping-clone [ping]: * ping (ocf:pacemaker:ping): Started cluster02 * ping (ocf:pacemaker:ping): Started cluster01 * Fencing (stonith:fence_xvm): Started cluster01 * dummy (ocf:pacemaker:Dummy): Started cluster02 * Container bundle set: httpd-bundle [pcmk:http]: * Replica[0] * httpd-bundle-ip-192.168.122.131 (ocf:heartbeat:IPaddr2): Started cluster01 * httpd (ocf:heartbeat:apache): Started httpd-bundle-0 * httpd-bundle-docker-0 (ocf:heartbeat:docker): Started cluster01 * httpd-bundle-0 (ocf:pacemaker:remote): Started cluster01 * Replica[1] * httpd-bundle-ip-192.168.122.132 (ocf:heartbeat:IPaddr2): Started cluster02 * httpd (ocf:heartbeat:apache): Started httpd-bundle-1 * httpd-bundle-docker-1 (ocf:heartbeat:docker): Started cluster02 * httpd-bundle-1 (ocf:pacemaker:remote): Started cluster02 * Replica[2] * httpd-bundle-ip-192.168.122.133 (ocf:heartbeat:IPaddr2): Stopped * httpd (ocf:heartbeat:apache): Stopped * httpd-bundle-docker-2 (ocf:heartbeat:docker): Stopped * httpd-bundle-2 (ocf:pacemaker:remote): Stopped * Resource Group: exim-group: * Public-IP (ocf:heartbeat:IPaddr): Started cluster02 * Email (lsb:exim): Started cluster02 * Clone Set: mysql-clone-group [mysql-group]: * Resource Group: mysql-group:0: * mysql-proxy (lsb:mysql-proxy): Started cluster02 * Resource Group: mysql-group:1: * mysql-proxy (lsb:mysql-proxy): Started cluster01 * Clone Set: promotable-clone [promotable-rsc] (promotable): * promotable-rsc (ocf:pacemaker:Stateful): Promoted cluster02 (test_description) * promotable-rsc (ocf:pacemaker:Stateful): Unpromoted cluster01 (test_description) * promotable-rsc (ocf:pacemaker:Stateful): Stopped (test_description) * promotable-rsc (ocf:pacemaker:Stateful): Stopped (test_description) * promotable-rsc (ocf:pacemaker:Stateful): Stopped (test_description) Node Attributes: * Node: cluster01 (1): * location : office * pingd : 1000 * Node: cluster02 (2): * pingd : 1000 Operations: * Node: cluster02 (2): * ping: migration-threshold=1000000: * (11) start * (12) monitor: interval="10000ms" * dummy: migration-threshold=1000000: * (18) start * (19) monitor: interval="60000ms" * Public-IP: migration-threshold=1000000: * (2) start * Email: migration-threshold=1000000: * (2) start * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * promotable-rsc: migration-threshold=1000000: * (4) monitor: interval="10000ms" * (5) cancel: interval="10000ms" * (6) promote * (7) monitor: interval="5000ms" * httpd-bundle-ip-192.168.122.132: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-docker-1: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-1: migration-threshold=1000000: * (2) start * (3) monitor: interval="30000ms" * Node: cluster01 (1): * ping: migration-threshold=1000000: * (17) start * (18) monitor: interval="10000ms" * Fencing: migration-threshold=1000000: * (15) start * (20) monitor: interval="60000ms" * dummy: migration-threshold=1000000: * (16) stop * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * promotable-rsc: migration-threshold=1000000: * (2) start * (4) monitor: interval="10000ms" * httpd-bundle-ip-192.168.122.131: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-docker-0: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-0: migration-threshold=1000000: * (2) start * (3) monitor: interval="30000ms" * Node: httpd-bundle-0@cluster01: * httpd: migration-threshold=1000000: * (1) start * Node: httpd-bundle-1@cluster02: * httpd: migration-threshold=1000000: * (1) start Negative Location Constraints: * not-on-cluster1 prevents dummy from running on cluster01 (1) =#=#=#= End test: Complete text output with detail - OK (0) =#=#=#= * Passed: crm_mon - Complete text output with detail =#=#=#= Begin test: Complete brief text output =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Active Resources: * 1 (ocf:pacemaker:Dummy): Active cluster02 * 1 (stonith:fence_xvm): Active cluster01 * Clone Set: ping-clone [ping]: * Started: [ cluster01 cluster02 ] * Container bundle set: httpd-bundle [pcmk:http]: * httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01 * httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02 * httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped * Resource Group: exim-group: * 1/1 (lsb:exim): Active cluster02 * 1/1 (ocf:heartbeat:IPaddr): Active cluster02 * Clone Set: mysql-clone-group [mysql-group]: * Started: [ cluster01 cluster02 ] * Clone Set: promotable-clone [promotable-rsc] (promotable): * Promoted: [ cluster02 ] * Unpromoted: [ cluster01 ] Node Attributes: * Node: cluster01: * location : office * pingd : 1000 * Node: cluster02: * pingd : 1000 Operations: * Node: cluster02: * ping: migration-threshold=1000000: * (11) start * (12) monitor: interval="10000ms" * dummy: migration-threshold=1000000: * (18) start * (19) monitor: interval="60000ms" * Public-IP: migration-threshold=1000000: * (2) start * Email: migration-threshold=1000000: * (2) start * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * promotable-rsc: migration-threshold=1000000: * (4) monitor: interval="10000ms" * (5) cancel: interval="10000ms" * (6) promote * (7) monitor: interval="5000ms" * httpd-bundle-ip-192.168.122.132: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-docker-1: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-1: migration-threshold=1000000: * (2) start * (3) monitor: interval="30000ms" * Node: cluster01: * ping: migration-threshold=1000000: * (17) start * (18) monitor: interval="10000ms" * Fencing: migration-threshold=1000000: * (15) start * (20) monitor: interval="60000ms" * dummy: migration-threshold=1000000: * (16) stop * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * promotable-rsc: migration-threshold=1000000: * (2) start * (4) monitor: interval="10000ms" * httpd-bundle-ip-192.168.122.131: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-docker-0: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-0: migration-threshold=1000000: * (2) start * (3) monitor: interval="30000ms" * Node: httpd-bundle-0: * httpd: migration-threshold=1000000: * (1) start * Node: httpd-bundle-1: * httpd: migration-threshold=1000000: * (1) start Negative Location Constraints: * not-on-cluster1 prevents dummy from running on cluster01 =#=#=#= End test: Complete brief text output - OK (0) =#=#=#= * Passed: crm_mon - Complete brief text output =#=#=#= Begin test: Complete text output grouped by node =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Node cluster01: online: * Resources: * ping (ocf:pacemaker:ping): Started * Fencing (stonith:fence_xvm): Started * mysql-proxy (lsb:mysql-proxy): Started * promotable-rsc (ocf:pacemaker:Stateful): Unpromoted (test_description) * httpd-bundle-ip-192.168.122.131 (ocf:heartbeat:IPaddr2): Started * httpd-bundle-docker-0 (ocf:heartbeat:docker): Started * Node cluster02: online: * Resources: * ping (ocf:pacemaker:ping): Started * dummy (ocf:pacemaker:Dummy): Started * Public-IP (ocf:heartbeat:IPaddr): Started * Email (lsb:exim): Started * mysql-proxy (lsb:mysql-proxy): Started * promotable-rsc (ocf:pacemaker:Stateful): Promoted (test_description) * httpd-bundle-ip-192.168.122.132 (ocf:heartbeat:IPaddr2): Started * httpd-bundle-docker-1 (ocf:heartbeat:docker): Started * GuestNode httpd-bundle-0: online: * Resources: * httpd (ocf:heartbeat:apache): Started * GuestNode httpd-bundle-1: online: * Resources: * httpd (ocf:heartbeat:apache): Started * GuestNode httpd-bundle-2: OFFLINE: * Resources: Node Attributes: * Node: cluster01: * location : office * pingd : 1000 * Node: cluster02: * pingd : 1000 Operations: * Node: cluster02: * ping: migration-threshold=1000000: * (11) start * (12) monitor: interval="10000ms" * dummy: migration-threshold=1000000: * (18) start * (19) monitor: interval="60000ms" * Public-IP: migration-threshold=1000000: * (2) start * Email: migration-threshold=1000000: * (2) start * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * promotable-rsc: migration-threshold=1000000: * (4) monitor: interval="10000ms" * (5) cancel: interval="10000ms" * (6) promote * (7) monitor: interval="5000ms" * httpd-bundle-ip-192.168.122.132: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-docker-1: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-1: migration-threshold=1000000: * (2) start * (3) monitor: interval="30000ms" * Node: cluster01: * ping: migration-threshold=1000000: * (17) start * (18) monitor: interval="10000ms" * Fencing: migration-threshold=1000000: * (15) start * (20) monitor: interval="60000ms" * dummy: migration-threshold=1000000: * (16) stop * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * promotable-rsc: migration-threshold=1000000: * (2) start * (4) monitor: interval="10000ms" * httpd-bundle-ip-192.168.122.131: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-docker-0: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-0: migration-threshold=1000000: * (2) start * (3) monitor: interval="30000ms" * Node: httpd-bundle-0: * httpd: migration-threshold=1000000: * (1) start * Node: httpd-bundle-1: * httpd: migration-threshold=1000000: * (1) start Negative Location Constraints: * not-on-cluster1 prevents dummy from running on cluster01 =#=#=#= End test: Complete text output grouped by node - OK (0) =#=#=#= * Passed: crm_mon - Complete text output grouped by node =#=#=#= Begin test: Complete brief text output grouped by node =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Node cluster01: online: * Resources: * 1 (lsb:mysql-proxy): Active * 1 (ocf:heartbeat:IPaddr2): Active * 1 (ocf:heartbeat:docker): Active * 1 (ocf:pacemaker:Stateful): Active * 1 (ocf:pacemaker:ping): Active * 1 (ocf:pacemaker:remote): Active * 1 (stonith:fence_xvm): Active * Node cluster02: online: * Resources: * 1 (lsb:exim): Active * 1 (lsb:mysql-proxy): Active * 1 (ocf:heartbeat:IPaddr): Active * 1 (ocf:heartbeat:IPaddr2): Active * 1 (ocf:heartbeat:docker): Active * 1 (ocf:pacemaker:Dummy): Active * 1 (ocf:pacemaker:Stateful): Active * 1 (ocf:pacemaker:ping): Active * 1 (ocf:pacemaker:remote): Active * GuestNode httpd-bundle-0: online: * Resources: * 1 (ocf:heartbeat:apache): Active * GuestNode httpd-bundle-1: online: * Resources: * 1 (ocf:heartbeat:apache): Active Node Attributes: * Node: cluster01: * location : office * pingd : 1000 * Node: cluster02: * pingd : 1000 Operations: * Node: cluster02: * ping: migration-threshold=1000000: * (11) start * (12) monitor: interval="10000ms" * dummy: migration-threshold=1000000: * (18) start * (19) monitor: interval="60000ms" * Public-IP: migration-threshold=1000000: * (2) start * Email: migration-threshold=1000000: * (2) start * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * promotable-rsc: migration-threshold=1000000: * (4) monitor: interval="10000ms" * (5) cancel: interval="10000ms" * (6) promote * (7) monitor: interval="5000ms" * httpd-bundle-ip-192.168.122.132: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-docker-1: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-1: migration-threshold=1000000: * (2) start * (3) monitor: interval="30000ms" * Node: cluster01: * ping: migration-threshold=1000000: * (17) start * (18) monitor: interval="10000ms" * Fencing: migration-threshold=1000000: * (15) start * (20) monitor: interval="60000ms" * dummy: migration-threshold=1000000: * (16) stop * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * promotable-rsc: migration-threshold=1000000: * (2) start * (4) monitor: interval="10000ms" * httpd-bundle-ip-192.168.122.131: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-docker-0: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-0: migration-threshold=1000000: * (2) start * (3) monitor: interval="30000ms" * Node: httpd-bundle-0: * httpd: migration-threshold=1000000: * (1) start * Node: httpd-bundle-1: * httpd: migration-threshold=1000000: * (1) start Negative Location Constraints: * not-on-cluster1 prevents dummy from running on cluster01 =#=#=#= End test: Complete brief text output grouped by node - OK (0) =#=#=#= * Passed: crm_mon - Complete brief text output grouped by node =#=#=#= Begin test: Output grouped by node (XML) =#=#=#= =#=#=#= End test: Output grouped by node (XML) - OK (0) =#=#=#= * Passed: crm_mon - Output grouped by node (XML) =#=#=#= Begin test: Complete output filtered by node =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 ] Active Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster01 ] * Fencing (stonith:fence_xvm): Started cluster01 * Container bundle set: httpd-bundle [pcmk:http]: * httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01 * httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped * Clone Set: mysql-clone-group [mysql-group]: * Started: [ cluster01 ] * Clone Set: promotable-clone [promotable-rsc] (promotable): * Unpromoted: [ cluster01 ] Node Attributes: * Node: cluster01: * location : office * pingd : 1000 Operations: * Node: cluster01: * ping: migration-threshold=1000000: * (17) start * (18) monitor: interval="10000ms" * Fencing: migration-threshold=1000000: * (15) start * (20) monitor: interval="60000ms" * dummy: migration-threshold=1000000: * (16) stop * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * promotable-rsc: migration-threshold=1000000: * (2) start * (4) monitor: interval="10000ms" * httpd-bundle-ip-192.168.122.131: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-docker-0: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-0: migration-threshold=1000000: * (2) start * (3) monitor: interval="30000ms" Negative Location Constraints: * not-on-cluster1 prevents dummy from running on cluster01 =#=#=#= End test: Complete output filtered by node - OK (0) =#=#=#= * Passed: crm_mon - Complete output filtered by node =#=#=#= Begin test: Complete output filtered by node (XML) =#=#=#= =#=#=#= End test: Complete output filtered by node (XML) - OK (0) =#=#=#= * Passed: crm_mon - Complete output filtered by node (XML) =#=#=#= Begin test: Complete output filtered by tag =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster02 ] Active Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster02 ] * dummy (ocf:pacemaker:Dummy): Started cluster02 * Container bundle set: httpd-bundle [pcmk:http]: * httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02 * httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped * Resource Group: exim-group: * Public-IP (ocf:heartbeat:IPaddr): Started cluster02 * Email (lsb:exim): Started cluster02 * Clone Set: mysql-clone-group [mysql-group]: * Started: [ cluster02 ] * Clone Set: promotable-clone [promotable-rsc] (promotable): * Promoted: [ cluster02 ] Node Attributes: * Node: cluster02: * pingd : 1000 Operations: * Node: cluster02: * ping: migration-threshold=1000000: * (11) start * (12) monitor: interval="10000ms" * dummy: migration-threshold=1000000: * (18) start * (19) monitor: interval="60000ms" * Public-IP: migration-threshold=1000000: * (2) start * Email: migration-threshold=1000000: * (2) start * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * promotable-rsc: migration-threshold=1000000: * (4) monitor: interval="10000ms" * (5) cancel: interval="10000ms" * (6) promote * (7) monitor: interval="5000ms" * httpd-bundle-ip-192.168.122.132: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-docker-1: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-1: migration-threshold=1000000: * (2) start * (3) monitor: interval="30000ms" Negative Location Constraints: * not-on-cluster1 prevents dummy from running on cluster01 =#=#=#= End test: Complete output filtered by tag - OK (0) =#=#=#= * Passed: crm_mon - Complete output filtered by tag =#=#=#= Begin test: Complete output filtered by tag (XML) =#=#=#= =#=#=#= End test: Complete output filtered by tag (XML) - OK (0) =#=#=#= * Passed: crm_mon - Complete output filtered by tag (XML) =#=#=#= Begin test: Complete output filtered by resource tag =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Active Resources: * Fencing (stonith:fence_xvm): Started cluster01 Node Attributes: * Node: cluster01: * location : office * pingd : 1000 * Node: cluster02: * pingd : 1000 Operations: * Node: cluster01: * Fencing: migration-threshold=1000000: * (15) start * (20) monitor: interval="60000ms" =#=#=#= End test: Complete output filtered by resource tag - OK (0) =#=#=#= * Passed: crm_mon - Complete output filtered by resource tag =#=#=#= Begin test: Complete output filtered by resource tag (XML) =#=#=#= =#=#=#= End test: Complete output filtered by resource tag (XML) - OK (0) =#=#=#= * Passed: crm_mon - Complete output filtered by resource tag (XML) =#=#=#= Begin test: Output filtered by node that doesn't exist =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Active Resources: * No active resources =#=#=#= End test: Output filtered by node that doesn't exist - OK (0) =#=#=#= * Passed: crm_mon - Output filtered by node that doesn't exist =#=#=#= Begin test: Output filtered by node that doesn't exist (XML) =#=#=#= =#=#=#= End test: Output filtered by node that doesn't exist (XML) - OK (0) =#=#=#= * Passed: crm_mon - Output filtered by node that doesn't exist (XML) =#=#=#= Begin test: Basic text output with inactive resources =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Full List of Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster01 cluster02 ] * Fencing (stonith:fence_xvm): Started cluster01 * dummy (ocf:pacemaker:Dummy): Started cluster02 * Clone Set: inactive-clone [inactive-dhcpd] (disabled): * Stopped (disabled): [ cluster01 cluster02 ] * Resource Group: inactive-group (disabled): * inactive-dummy-1 (ocf:pacemaker:Dummy): Stopped (disabled) * inactive-dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled) * Container bundle set: httpd-bundle [pcmk:http]: * httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01 * httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02 * httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped * Resource Group: exim-group: * Public-IP (ocf:heartbeat:IPaddr): Started cluster02 * Email (lsb:exim): Started cluster02 * Clone Set: mysql-clone-group [mysql-group]: * Started: [ cluster01 cluster02 ] * Clone Set: promotable-clone [promotable-rsc] (promotable): * Promoted: [ cluster02 ] * Unpromoted: [ cluster01 ] =#=#=#= End test: Basic text output with inactive resources - OK (0) =#=#=#= * Passed: crm_mon - Basic text output with inactive resources =#=#=#= Begin test: Basic text output with inactive resources, filtered by node =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster02 ] Full List of Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster02 ] * dummy (ocf:pacemaker:Dummy): Started cluster02 * Clone Set: inactive-clone [inactive-dhcpd] (disabled): * Stopped (disabled): [ cluster02 ] * Resource Group: inactive-group (disabled): * inactive-dummy-1 (ocf:pacemaker:Dummy): Stopped (disabled) * inactive-dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled) * Container bundle set: httpd-bundle [pcmk:http]: * httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02 * httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped * Resource Group: exim-group: * Public-IP (ocf:heartbeat:IPaddr): Started cluster02 * Email (lsb:exim): Started cluster02 * Clone Set: mysql-clone-group [mysql-group]: * Started: [ cluster02 ] * Clone Set: promotable-clone [promotable-rsc] (promotable): * Promoted: [ cluster02 ] =#=#=#= End test: Basic text output with inactive resources, filtered by node - OK (0) =#=#=#= * Passed: crm_mon - Basic text output with inactive resources, filtered by node =#=#=#= Begin test: Complete output filtered by primitive resource =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Active Resources: * Fencing (stonith:fence_xvm): Started cluster01 Node Attributes: * Node: cluster01: * location : office * pingd : 1000 * Node: cluster02: * pingd : 1000 Operations: * Node: cluster01: * Fencing: migration-threshold=1000000: * (15) start * (20) monitor: interval="60000ms" =#=#=#= End test: Complete output filtered by primitive resource - OK (0) =#=#=#= * Passed: crm_mon - Complete output filtered by primitive resource =#=#=#= Begin test: Complete output filtered by primitive resource (XML) =#=#=#= =#=#=#= End test: Complete output filtered by primitive resource (XML) - OK (0) =#=#=#= * Passed: crm_mon - Complete output filtered by primitive resource (XML) =#=#=#= Begin test: Complete output filtered by group resource =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Active Resources: * Resource Group: exim-group: * Public-IP (ocf:heartbeat:IPaddr): Started cluster02 * Email (lsb:exim): Started cluster02 Node Attributes: * Node: cluster01: * location : office * pingd : 1000 * Node: cluster02: * pingd : 1000 Operations: * Node: cluster02: * Public-IP: migration-threshold=1000000: * (2) start * Email: migration-threshold=1000000: * (2) start =#=#=#= End test: Complete output filtered by group resource - OK (0) =#=#=#= * Passed: crm_mon - Complete output filtered by group resource =#=#=#= Begin test: Complete output filtered by group resource (XML) =#=#=#= =#=#=#= End test: Complete output filtered by group resource (XML) - OK (0) =#=#=#= * Passed: crm_mon - Complete output filtered by group resource (XML) =#=#=#= Begin test: Complete text output filtered by group resource member =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Active Resources: * Resource Group: exim-group: * Public-IP (ocf:heartbeat:IPaddr): Started cluster02 Node Attributes: * Node: cluster01: * location : office * pingd : 1000 * Node: cluster02: * pingd : 1000 Operations: * Node: cluster02: * Public-IP: migration-threshold=1000000: * (2) start =#=#=#= End test: Complete text output filtered by group resource member - OK (0) =#=#=#= * Passed: crm_mon - Complete text output filtered by group resource member =#=#=#= Begin test: Output filtered by group resource member (XML) =#=#=#= =#=#=#= End test: Output filtered by group resource member (XML) - OK (0) =#=#=#= * Passed: crm_mon - Output filtered by group resource member (XML) =#=#=#= Begin test: Complete output filtered by clone resource =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Active Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster01 cluster02 ] Node Attributes: * Node: cluster01: * location : office * pingd : 1000 * Node: cluster02: * pingd : 1000 Operations: * Node: cluster02: * ping: migration-threshold=1000000: * (11) start * (12) monitor: interval="10000ms" * Node: cluster01: * ping: migration-threshold=1000000: * (17) start * (18) monitor: interval="10000ms" =#=#=#= End test: Complete output filtered by clone resource - OK (0) =#=#=#= * Passed: crm_mon - Complete output filtered by clone resource =#=#=#= Begin test: Complete output filtered by clone resource (XML) =#=#=#= =#=#=#= End test: Complete output filtered by clone resource (XML) - OK (0) =#=#=#= * Passed: crm_mon - Complete output filtered by clone resource (XML) =#=#=#= Begin test: Complete output filtered by clone resource instance =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Active Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster01 cluster02 ] Node Attributes: * Node: cluster01: * location : office * pingd : 1000 * Node: cluster02: * pingd : 1000 Operations: * Node: cluster02: * ping: migration-threshold=1000000: * (11) start * (12) monitor: interval="10000ms" * Node: cluster01: * ping: migration-threshold=1000000: * (17) start * (18) monitor: interval="10000ms" =#=#=#= End test: Complete output filtered by clone resource instance - OK (0) =#=#=#= * Passed: crm_mon - Complete output filtered by clone resource instance =#=#=#= Begin test: Complete output filtered by clone resource instance (XML) =#=#=#= =#=#=#= End test: Complete output filtered by clone resource instance (XML) - OK (0) =#=#=#= * Passed: crm_mon - Complete output filtered by clone resource instance (XML) =#=#=#= Begin test: Complete text output filtered by exact clone resource instance =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Node cluster01 (1): online, feature set <3.15.1 * Node cluster02 (2): online, feature set <3.15.1 * GuestNode httpd-bundle-0@cluster01: online * GuestNode httpd-bundle-1@cluster02: online * GuestNode httpd-bundle-2@: OFFLINE Active Resources: * Clone Set: ping-clone [ping]: * ping (ocf:pacemaker:ping): Started cluster02 Node Attributes: * Node: cluster01 (1): * location : office * pingd : 1000 * Node: cluster02 (2): * pingd : 1000 Operations: * Node: cluster02 (2): * ping: migration-threshold=1000000: * (11) start * (12) monitor: interval="10000ms" * Node: cluster01 (1): * ping: migration-threshold=1000000: * (17) start * (18) monitor: interval="10000ms" =#=#=#= End test: Complete text output filtered by exact clone resource instance - OK (0) =#=#=#= * Passed: crm_mon - Complete text output filtered by exact clone resource instance =#=#=#= Begin test: Output filtered by exact clone resource instance (XML) =#=#=#= =#=#=#= End test: Output filtered by exact clone resource instance (XML) - OK (0) =#=#=#= * Passed: crm_mon - Output filtered by exact clone resource instance (XML) =#=#=#= Begin test: Output filtered by resource that doesn't exist =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Active Resources: * No active resources =#=#=#= End test: Output filtered by resource that doesn't exist - OK (0) =#=#=#= * Passed: crm_mon - Output filtered by resource that doesn't exist =#=#=#= Begin test: Output filtered by resource that doesn't exist (XML) =#=#=#= =#=#=#= End test: Output filtered by resource that doesn't exist (XML) - OK (0) =#=#=#= * Passed: crm_mon - Output filtered by resource that doesn't exist (XML) =#=#=#= Begin test: Basic text output with inactive resources, filtered by tag =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Full List of Resources: * Clone Set: inactive-clone [inactive-dhcpd] (disabled): * Stopped (disabled): [ cluster01 cluster02 ] * Resource Group: inactive-group (disabled): * inactive-dummy-1 (ocf:pacemaker:Dummy): Stopped (disabled) * inactive-dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled) =#=#=#= End test: Basic text output with inactive resources, filtered by tag - OK (0) =#=#=#= * Passed: crm_mon - Basic text output with inactive resources, filtered by tag =#=#=#= Begin test: Basic text output with inactive resources, filtered by bundle resource =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Full List of Resources: * Container bundle set: httpd-bundle [pcmk:http]: * httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01 * httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02 * httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped =#=#=#= End test: Basic text output with inactive resources, filtered by bundle resource - OK (0) =#=#=#= * Passed: crm_mon - Basic text output with inactive resources, filtered by bundle resource =#=#=#= Begin test: Output filtered by inactive bundle resource (XML) =#=#=#= =#=#=#= End test: Output filtered by inactive bundle resource (XML) - OK (0) =#=#=#= * Passed: crm_mon - Output filtered by inactive bundle resource (XML) =#=#=#= Begin test: Basic text output with inactive resources, filtered by bundled IP address resource =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Full List of Resources: * Container bundle set: httpd-bundle [pcmk:http]: * Replica[0] * httpd-bundle-ip-192.168.122.131 (ocf:heartbeat:IPaddr2): Started cluster01 =#=#=#= End test: Basic text output with inactive resources, filtered by bundled IP address resource - OK (0) =#=#=#= * Passed: crm_mon - Basic text output with inactive resources, filtered by bundled IP address resource =#=#=#= Begin test: Output filtered by bundled IP address resource (XML) =#=#=#= =#=#=#= End test: Output filtered by bundled IP address resource (XML) - OK (0) =#=#=#= * Passed: crm_mon - Output filtered by bundled IP address resource (XML) =#=#=#= Begin test: Basic text output with inactive resources, filtered by bundled container =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Full List of Resources: * Container bundle set: httpd-bundle [pcmk:http]: * Replica[1] * httpd-bundle-docker-1 (ocf:heartbeat:docker): Started cluster02 =#=#=#= End test: Basic text output with inactive resources, filtered by bundled container - OK (0) =#=#=#= * Passed: crm_mon - Basic text output with inactive resources, filtered by bundled container =#=#=#= Begin test: Output filtered by bundled container (XML) =#=#=#= =#=#=#= End test: Output filtered by bundled container (XML) - OK (0) =#=#=#= * Passed: crm_mon - Output filtered by bundled container (XML) =#=#=#= Begin test: Basic text output with inactive resources, filtered by bundle connection =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Full List of Resources: * Container bundle set: httpd-bundle [pcmk:http]: * Replica[0] * httpd-bundle-0 (ocf:pacemaker:remote): Started cluster01 =#=#=#= End test: Basic text output with inactive resources, filtered by bundle connection - OK (0) =#=#=#= * Passed: crm_mon - Basic text output with inactive resources, filtered by bundle connection =#=#=#= Begin test: Output filtered by bundle connection (XML) =#=#=#= =#=#=#= End test: Output filtered by bundle connection (XML) - OK (0) =#=#=#= * Passed: crm_mon - Output filtered by bundle connection (XML) =#=#=#= Begin test: Basic text output with inactive resources, filtered by bundled primitive resource =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Full List of Resources: * Container bundle set: httpd-bundle [pcmk:http]: * Replica[0] * httpd (ocf:heartbeat:apache): Started httpd-bundle-0 * Replica[1] * httpd (ocf:heartbeat:apache): Started httpd-bundle-1 * Replica[2] * httpd (ocf:heartbeat:apache): Stopped =#=#=#= End test: Basic text output with inactive resources, filtered by bundled primitive resource - OK (0) =#=#=#= * Passed: crm_mon - Basic text output with inactive resources, filtered by bundled primitive resource =#=#=#= Begin test: Output filtered by bundled primitive resource (XML) =#=#=#= =#=#=#= End test: Output filtered by bundled primitive resource (XML) - OK (0) =#=#=#= * Passed: crm_mon - Output filtered by bundled primitive resource (XML) =#=#=#= Begin test: Complete text output, filtered by clone name in cloned group =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Node cluster01 (1): online, feature set <3.15.1 * Node cluster02 (2): online, feature set <3.15.1 * GuestNode httpd-bundle-0@cluster01: online * GuestNode httpd-bundle-1@cluster02: online * GuestNode httpd-bundle-2@: OFFLINE Active Resources: * Clone Set: mysql-clone-group [mysql-group]: * Resource Group: mysql-group:0: * mysql-proxy (lsb:mysql-proxy): Started cluster02 * Resource Group: mysql-group:1: * mysql-proxy (lsb:mysql-proxy): Started cluster01 Node Attributes: * Node: cluster01 (1): * location : office * pingd : 1000 * Node: cluster02 (2): * pingd : 1000 Operations: * Node: cluster02 (2): * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * Node: cluster01 (1): * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" =#=#=#= End test: Complete text output, filtered by clone name in cloned group - OK (0) =#=#=#= * Passed: crm_mon - Complete text output, filtered by clone name in cloned group =#=#=#= Begin test: Output, filtered by clone name in cloned group (XML) =#=#=#= =#=#=#= End test: Output, filtered by clone name in cloned group (XML) - OK (0) =#=#=#= * Passed: crm_mon - Output, filtered by clone name in cloned group (XML) =#=#=#= Begin test: Complete text output, filtered by group name in cloned group =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Node cluster01 (1): online, feature set <3.15.1 * Node cluster02 (2): online, feature set <3.15.1 * GuestNode httpd-bundle-0@cluster01: online * GuestNode httpd-bundle-1@cluster02: online * GuestNode httpd-bundle-2@: OFFLINE Active Resources: * Clone Set: mysql-clone-group [mysql-group]: * Resource Group: mysql-group:0: * mysql-proxy (lsb:mysql-proxy): Started cluster02 * Resource Group: mysql-group:1: * mysql-proxy (lsb:mysql-proxy): Started cluster01 Node Attributes: * Node: cluster01 (1): * location : office * pingd : 1000 * Node: cluster02 (2): * pingd : 1000 Operations: * Node: cluster02 (2): * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * Node: cluster01 (1): * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" =#=#=#= End test: Complete text output, filtered by group name in cloned group - OK (0) =#=#=#= * Passed: crm_mon - Complete text output, filtered by group name in cloned group =#=#=#= Begin test: Output, filtered by group name in cloned group (XML) =#=#=#= =#=#=#= End test: Output, filtered by group name in cloned group (XML) - OK (0) =#=#=#= * Passed: crm_mon - Output, filtered by group name in cloned group (XML) =#=#=#= Begin test: Complete text output, filtered by exact group instance name in cloned group =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Node cluster01 (1): online, feature set <3.15.1 * Node cluster02 (2): online, feature set <3.15.1 * GuestNode httpd-bundle-0@cluster01: online * GuestNode httpd-bundle-1@cluster02: online * GuestNode httpd-bundle-2@: OFFLINE Active Resources: * Clone Set: mysql-clone-group [mysql-group]: * Resource Group: mysql-group:1: * mysql-proxy (lsb:mysql-proxy): Started cluster01 Node Attributes: * Node: cluster01 (1): * location : office * pingd : 1000 * Node: cluster02 (2): * pingd : 1000 Operations: * Node: cluster02 (2): * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * Node: cluster01 (1): * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" =#=#=#= End test: Complete text output, filtered by exact group instance name in cloned group - OK (0) =#=#=#= * Passed: crm_mon - Complete text output, filtered by exact group instance name in cloned group =#=#=#= Begin test: Output, filtered by exact group instance name in cloned group (XML) =#=#=#= =#=#=#= End test: Output, filtered by exact group instance name in cloned group (XML) - OK (0) =#=#=#= * Passed: crm_mon - Output, filtered by exact group instance name in cloned group (XML) =#=#=#= Begin test: Complete text output, filtered by primitive name in cloned group =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Node cluster01 (1): online, feature set <3.15.1 * Node cluster02 (2): online, feature set <3.15.1 * GuestNode httpd-bundle-0@cluster01: online * GuestNode httpd-bundle-1@cluster02: online * GuestNode httpd-bundle-2@: OFFLINE Active Resources: * Clone Set: mysql-clone-group [mysql-group]: * Resource Group: mysql-group:0: * mysql-proxy (lsb:mysql-proxy): Started cluster02 * Resource Group: mysql-group:1: * mysql-proxy (lsb:mysql-proxy): Started cluster01 Node Attributes: * Node: cluster01 (1): * location : office * pingd : 1000 * Node: cluster02 (2): * pingd : 1000 Operations: * Node: cluster02 (2): * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * Node: cluster01 (1): * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" =#=#=#= End test: Complete text output, filtered by primitive name in cloned group - OK (0) =#=#=#= * Passed: crm_mon - Complete text output, filtered by primitive name in cloned group =#=#=#= Begin test: Output, filtered by primitive name in cloned group (XML) =#=#=#= =#=#=#= End test: Output, filtered by primitive name in cloned group (XML) - OK (0) =#=#=#= * Passed: crm_mon - Output, filtered by primitive name in cloned group (XML) =#=#=#= Begin test: Complete text output, filtered by exact primitive instance name in cloned group =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Node cluster01 (1): online, feature set <3.15.1 * Node cluster02 (2): online, feature set <3.15.1 * GuestNode httpd-bundle-0@cluster01: online * GuestNode httpd-bundle-1@cluster02: online * GuestNode httpd-bundle-2@: OFFLINE Active Resources: * Clone Set: mysql-clone-group [mysql-group]: * Resource Group: mysql-group:1: * mysql-proxy (lsb:mysql-proxy): Started cluster01 Node Attributes: * Node: cluster01 (1): * location : office * pingd : 1000 * Node: cluster02 (2): * pingd : 1000 Operations: * Node: cluster02 (2): * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * Node: cluster01 (1): * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" =#=#=#= End test: Complete text output, filtered by exact primitive instance name in cloned group - OK (0) =#=#=#= * Passed: crm_mon - Complete text output, filtered by exact primitive instance name in cloned group =#=#=#= Begin test: Output, filtered by exact primitive instance name in cloned group (XML) =#=#=#= =#=#=#= End test: Output, filtered by exact primitive instance name in cloned group (XML) - OK (0) =#=#=#= * Passed: crm_mon - Output, filtered by exact primitive instance name in cloned group (XML) =#=#=#= Begin test: Check that CIB_file="-" works =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Active Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster01 cluster02 ] * Fencing (stonith:fence_xvm): Started cluster01 * dummy (ocf:pacemaker:Dummy): Started cluster02 * Container bundle set: httpd-bundle [pcmk:http]: * httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01 * httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02 * httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped * Resource Group: exim-group: * Public-IP (ocf:heartbeat:IPaddr): Started cluster02 * Email (lsb:exim): Started cluster02 * Clone Set: mysql-clone-group [mysql-group]: * Started: [ cluster01 cluster02 ] * Clone Set: promotable-clone [promotable-rsc] (promotable): * Promoted: [ cluster02 ] * Unpromoted: [ cluster01 ] =#=#=#= End test: Check that CIB_file="-" works - OK (0) =#=#=#= * Passed: crm_mon - Check that CIB_file="-" works =#=#=#= Begin test: Output of partially active resources =#=#=#= unpack_rsc_op error: Preventing dummy-2 from restarting on cluster02 because of hard failure (unimplemented feature) | dummy-2_last_failure_0 unpack_rsc_op error: Preventing httpd-bundle-clone from restarting on httpd-bundle-1 because of hard failure (invalid parameter) | httpd_last_failure_0 Cluster Summary: * Stack: corosync * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: * 4 nodes configured * 16 resource instances configured (1 DISABLED) Node List: * Node cluster01 (1): online, feature set <3.15.1 * Node cluster02 (2): online, feature set <3.15.1 * GuestNode httpd-bundle-0@cluster02: online * GuestNode httpd-bundle-1@cluster01: online Active Resources: * Clone Set: ping-clone [ping]: * ping (ocf:pacemaker:ping): Started cluster01 * ping (ocf:pacemaker:ping): Stopped (not installed) * Fencing (stonith:fence_xvm): Started cluster01 * Container bundle set: httpd-bundle [pcmk:http]: * Replica[0] * httpd-bundle-ip-192.168.122.131 (ocf:heartbeat:IPaddr2): Started cluster02 * httpd (ocf:heartbeat:apache): Started httpd-bundle-0 * httpd-bundle-docker-0 (ocf:heartbeat:docker): Started cluster02 * httpd-bundle-0 (ocf:pacemaker:remote): Started cluster02 * Replica[1] * httpd-bundle-ip-192.168.122.132 (ocf:heartbeat:IPaddr2): Started cluster01 * httpd (ocf:heartbeat:apache): FAILED httpd-bundle-1 * httpd-bundle-docker-1 (ocf:heartbeat:docker): Started cluster01 * httpd-bundle-1 (ocf:pacemaker:remote): Started cluster01 * Resource Group: partially-active-group (2 members inactive): * dummy-1 (ocf:pacemaker:Dummy): Started cluster02 * dummy-2 (ocf:pacemaker:Dummy): FAILED cluster02 Failed Resource Actions: - * dummy-2_monitor_0 on cluster02 'unimplemented feature' (3): call=2, status='complete', queued=0ms, exec=33ms + * dummy-2_monitor_0 on cluster02 'unimplemented feature' (3): call=2, status='Done', queued=0ms, exec=33ms =#=#=#= End test: Output of partially active resources - OK (0) =#=#=#= * Passed: crm_mon - Output of partially active resources =#=#=#= Begin test: Output of partially active resources (XML) =#=#=#= unpack_rsc_op error: Preventing dummy-2 from restarting on cluster02 because of hard failure (unimplemented feature) | dummy-2_last_failure_0 unpack_rsc_op error: Preventing httpd-bundle-clone from restarting on httpd-bundle-1 because of hard failure (invalid parameter) | httpd_last_failure_0 - + =#=#=#= End test: Output of partially active resources (XML) - OK (0) =#=#=#= * Passed: crm_mon - Output of partially active resources (XML) =#=#=#= Begin test: Output of partially active resources, with inactive resources =#=#=#= unpack_rsc_op error: Preventing dummy-2 from restarting on cluster02 because of hard failure (unimplemented feature) | dummy-2_last_failure_0 unpack_rsc_op error: Preventing httpd-bundle-clone from restarting on httpd-bundle-1 because of hard failure (invalid parameter) | httpd_last_failure_0 Cluster Summary: * Stack: corosync * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: * 4 nodes configured * 16 resource instances configured (1 DISABLED) Node List: * Node cluster01 (1): online, feature set <3.15.1 * Node cluster02 (2): online, feature set <3.15.1 * GuestNode httpd-bundle-0@cluster02: online * GuestNode httpd-bundle-1@cluster01: online Full List of Resources: * Clone Set: ping-clone [ping]: * ping (ocf:pacemaker:ping): Started cluster01 * ping (ocf:pacemaker:ping): Stopped (not installed) * Fencing (stonith:fence_xvm): Started cluster01 * Container bundle set: httpd-bundle [pcmk:http]: * Replica[0] * httpd-bundle-ip-192.168.122.131 (ocf:heartbeat:IPaddr2): Started cluster02 * httpd (ocf:heartbeat:apache): Started httpd-bundle-0 * httpd-bundle-docker-0 (ocf:heartbeat:docker): Started cluster02 * httpd-bundle-0 (ocf:pacemaker:remote): Started cluster02 * Replica[1] * httpd-bundle-ip-192.168.122.132 (ocf:heartbeat:IPaddr2): Started cluster01 * httpd (ocf:heartbeat:apache): FAILED httpd-bundle-1 * httpd-bundle-docker-1 (ocf:heartbeat:docker): Started cluster01 * httpd-bundle-1 (ocf:pacemaker:remote): Started cluster01 * Resource Group: partially-active-group: * dummy-1 (ocf:pacemaker:Dummy): Started cluster02 * dummy-2 (ocf:pacemaker:Dummy): FAILED cluster02 * dummy-3 (ocf:pacemaker:Dummy): Stopped (disabled) * dummy-4 (ocf:pacemaker:Dummy): Stopped (not installed) * smart-mon (ocf:pacemaker:HealthSMART): Stopped (not installed) Failed Resource Actions: - * dummy-2_monitor_0 on cluster02 'unimplemented feature' (3): call=2, status='complete', queued=0ms, exec=33ms + * dummy-2_monitor_0 on cluster02 'unimplemented feature' (3): call=2, status='Done', queued=0ms, exec=33ms =#=#=#= End test: Output of partially active resources, with inactive resources - OK (0) =#=#=#= * Passed: crm_mon - Output of partially active resources, with inactive resources =#=#=#= Begin test: Complete brief text output, with inactive resources =#=#=#= unpack_rsc_op error: Preventing dummy-2 from restarting on cluster02 because of hard failure (unimplemented feature) | dummy-2_last_failure_0 unpack_rsc_op error: Preventing httpd-bundle-clone from restarting on httpd-bundle-1 because of hard failure (invalid parameter) | httpd_last_failure_0 Cluster Summary: * Stack: corosync * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: * 4 nodes configured * 16 resource instances configured (1 DISABLED) Node List: * Node cluster01 (1): online, feature set <3.15.1 * Node cluster02 (2): online, feature set <3.15.1 * GuestNode httpd-bundle-0@cluster02: online * GuestNode httpd-bundle-1@cluster01: online Full List of Resources: * 0/1 (ocf:pacemaker:HealthSMART): Active * 1/1 (stonith:fence_xvm): Active cluster01 * Clone Set: ping-clone [ping]: * ping (ocf:pacemaker:ping): Started cluster01 * ping (ocf:pacemaker:ping): Stopped (not installed) * Container bundle set: httpd-bundle [pcmk:http]: * Replica[0] * httpd-bundle-ip-192.168.122.131 (ocf:heartbeat:IPaddr2): Started cluster02 * httpd (ocf:heartbeat:apache): Started httpd-bundle-0 * httpd-bundle-docker-0 (ocf:heartbeat:docker): Started cluster02 * httpd-bundle-0 (ocf:pacemaker:remote): Started cluster02 * Replica[1] * httpd-bundle-ip-192.168.122.132 (ocf:heartbeat:IPaddr2): Started cluster01 * httpd (ocf:heartbeat:apache): FAILED httpd-bundle-1 * httpd-bundle-docker-1 (ocf:heartbeat:docker): Started cluster01 * httpd-bundle-1 (ocf:pacemaker:remote): Started cluster01 * Resource Group: partially-active-group: * 2/4 (ocf:pacemaker:Dummy): Active cluster02 Node Attributes: * Node: cluster01 (1): * pingd : 1000 * Node: cluster02 (2): * pingd : 1000 Operations: * Node: cluster02 (2): * httpd-bundle-ip-192.168.122.131: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-docker-0: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-0: migration-threshold=1000000: * (2) start * (3) monitor: interval="30000ms" * dummy-1: migration-threshold=1000000: * (2) start * dummy-2: migration-threshold=1000000: * (2) probe * dummy-4: migration-threshold=1000000: * (2) probe * smart-mon: migration-threshold=1000000: * (9) probe * ping: migration-threshold=1000000: * (6) probe * Node: cluster01 (1): * Fencing: migration-threshold=1000000: * (15) start * (20) monitor: interval="60000ms" * ping: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * httpd-bundle-ip-192.168.122.132: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-docker-1: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-1: migration-threshold=1000000: * (2) start * (3) monitor: interval="30000ms" * Node: httpd-bundle-0@cluster02: * httpd: migration-threshold=1000000: * (1) start * Node: httpd-bundle-1@cluster01: * httpd: migration-threshold=1000000: * (1) probe Failed Resource Actions: - * dummy-2_monitor_0 on cluster02 'unimplemented feature' (3): call=2, status='complete', queued=0ms, exec=33ms + * dummy-2_monitor_0 on cluster02 'unimplemented feature' (3): call=2, status='Done', queued=0ms, exec=33ms =#=#=#= End test: Complete brief text output, with inactive resources - OK (0) =#=#=#= * Passed: crm_mon - Complete brief text output, with inactive resources =#=#=#= Begin test: Text output of partially active group =#=#=#= unpack_rsc_op error: Preventing dummy-2 from restarting on cluster02 because of hard failure (unimplemented feature) | dummy-2_last_failure_0 unpack_rsc_op error: Preventing httpd-bundle-clone from restarting on httpd-bundle-1 because of hard failure (invalid parameter) | httpd_last_failure_0 Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 4 nodes configured * 16 resource instances configured (1 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Active Resources: * Resource Group: partially-active-group (2 members inactive): * dummy-1 (ocf:pacemaker:Dummy): Started cluster02 * dummy-2 (ocf:pacemaker:Dummy): FAILED cluster02 =#=#=#= End test: Text output of partially active group - OK (0) =#=#=#= * Passed: crm_mon - Text output of partially active group =#=#=#= Begin test: Text output of partially active group, with inactive resources =#=#=#= unpack_rsc_op error: Preventing dummy-2 from restarting on cluster02 because of hard failure (unimplemented feature) | dummy-2_last_failure_0 unpack_rsc_op error: Preventing httpd-bundle-clone from restarting on httpd-bundle-1 because of hard failure (invalid parameter) | httpd_last_failure_0 Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 4 nodes configured * 16 resource instances configured (1 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Full List of Resources: * Resource Group: partially-active-group: * dummy-1 (ocf:pacemaker:Dummy): Started cluster02 * dummy-2 (ocf:pacemaker:Dummy): FAILED cluster02 * dummy-3 (ocf:pacemaker:Dummy): Stopped (disabled) * dummy-4 (ocf:pacemaker:Dummy): Stopped (not installed) =#=#=#= End test: Text output of partially active group, with inactive resources - OK (0) =#=#=#= * Passed: crm_mon - Text output of partially active group, with inactive resources =#=#=#= Begin test: Text output of active member of partially active group =#=#=#= unpack_rsc_op error: Preventing dummy-2 from restarting on cluster02 because of hard failure (unimplemented feature) | dummy-2_last_failure_0 unpack_rsc_op error: Preventing httpd-bundle-clone from restarting on httpd-bundle-1 because of hard failure (invalid parameter) | httpd_last_failure_0 Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 4 nodes configured * 16 resource instances configured (1 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Active Resources: * Resource Group: partially-active-group (2 members inactive): * dummy-1 (ocf:pacemaker:Dummy): Started cluster02 =#=#=#= End test: Text output of active member of partially active group - OK (0) =#=#=#= * Passed: crm_mon - Text output of active member of partially active group =#=#=#= Begin test: Text output of inactive member of partially active group =#=#=#= unpack_rsc_op error: Preventing dummy-2 from restarting on cluster02 because of hard failure (unimplemented feature) | dummy-2_last_failure_0 unpack_rsc_op error: Preventing httpd-bundle-clone from restarting on httpd-bundle-1 because of hard failure (invalid parameter) | httpd_last_failure_0 Cluster Summary: * Stack: corosync * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: * 4 nodes configured * 16 resource instances configured (1 DISABLED) Node List: * Node cluster01 (1): online, feature set <3.15.1 * Node cluster02 (2): online, feature set <3.15.1 * GuestNode httpd-bundle-0@cluster02: online * GuestNode httpd-bundle-1@cluster01: online Active Resources: * Resource Group: partially-active-group (2 members inactive): * dummy-2 (ocf:pacemaker:Dummy): FAILED cluster02 Failed Resource Actions: - * dummy-2_monitor_0 on cluster02 'unimplemented feature' (3): call=2, status='complete', queued=0ms, exec=33ms + * dummy-2_monitor_0 on cluster02 'unimplemented feature' (3): call=2, status='Done', queued=0ms, exec=33ms =#=#=#= End test: Text output of inactive member of partially active group - OK (0) =#=#=#= * Passed: crm_mon - Text output of inactive member of partially active group =#=#=#= Begin test: Complete brief text output grouped by node, with inactive resources =#=#=#= unpack_rsc_op error: Preventing dummy-2 from restarting on cluster02 because of hard failure (unimplemented feature) | dummy-2_last_failure_0 unpack_rsc_op error: Preventing httpd-bundle-clone from restarting on httpd-bundle-1 because of hard failure (invalid parameter) | httpd_last_failure_0 Cluster Summary: * Stack: corosync * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: * 4 nodes configured * 16 resource instances configured (1 DISABLED) Node List: * Node cluster01 (1): online, feature set <3.15.1: * Resources: * 1 (ocf:heartbeat:IPaddr2): Active * 1 (ocf:heartbeat:docker): Active * 1 (ocf:pacemaker:ping): Active * 1 (ocf:pacemaker:remote): Active * 1 (stonith:fence_xvm): Active * Node cluster02 (2): online, feature set <3.15.1: * Resources: * 1 (ocf:heartbeat:IPaddr2): Active * 1 (ocf:heartbeat:docker): Active * 2 (ocf:pacemaker:Dummy): Active * 1 (ocf:pacemaker:remote): Active * GuestNode httpd-bundle-0@cluster02: online: * Resources: * 1 (ocf:heartbeat:apache): Active * GuestNode httpd-bundle-1@cluster01: online: * Resources: * 1 (ocf:heartbeat:apache): Active Inactive Resources: * Clone Set: ping-clone [ping]: * ping (ocf:pacemaker:ping): Started cluster01 * ping (ocf:pacemaker:ping): Stopped (not installed) * Resource Group: partially-active-group: * 2/4 (ocf:pacemaker:Dummy): Active cluster02 * smart-mon (ocf:pacemaker:HealthSMART): Stopped (not installed) Node Attributes: * Node: cluster01 (1): * pingd : 1000 * Node: cluster02 (2): * pingd : 1000 Operations: * Node: cluster02 (2): * httpd-bundle-ip-192.168.122.131: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-docker-0: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-0: migration-threshold=1000000: * (2) start * (3) monitor: interval="30000ms" * dummy-1: migration-threshold=1000000: * (2) start * dummy-2: migration-threshold=1000000: * (2) probe * dummy-4: migration-threshold=1000000: * (2) probe * smart-mon: migration-threshold=1000000: * (9) probe * ping: migration-threshold=1000000: * (6) probe * Node: cluster01 (1): * Fencing: migration-threshold=1000000: * (15) start * (20) monitor: interval="60000ms" * ping: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * httpd-bundle-ip-192.168.122.132: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-docker-1: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-1: migration-threshold=1000000: * (2) start * (3) monitor: interval="30000ms" * Node: httpd-bundle-0@cluster02: * httpd: migration-threshold=1000000: * (1) start * Node: httpd-bundle-1@cluster01: * httpd: migration-threshold=1000000: * (1) probe Failed Resource Actions: - * dummy-2_monitor_0 on cluster02 'unimplemented feature' (3): call=2, status='complete', queued=0ms, exec=33ms + * dummy-2_monitor_0 on cluster02 'unimplemented feature' (3): call=2, status='Done', queued=0ms, exec=33ms =#=#=#= End test: Complete brief text output grouped by node, with inactive resources - OK (0) =#=#=#= * Passed: crm_mon - Complete brief text output grouped by node, with inactive resources =#=#=#= Begin test: Text output of partially active resources, with inactive resources, filtered by node =#=#=#= unpack_rsc_op error: Preventing dummy-2 from restarting on cluster02 because of hard failure (unimplemented feature) | dummy-2_last_failure_0 unpack_rsc_op error: Preventing httpd-bundle-clone from restarting on httpd-bundle-1 because of hard failure (invalid parameter) | httpd_last_failure_0 Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 4 nodes configured * 16 resource instances configured (1 DISABLED) Node List: * Online: [ cluster01 ] Full List of Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster01 ] * Fencing (stonith:fence_xvm): Started cluster01 * Container bundle set: httpd-bundle [pcmk:http]: * httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): FAILED cluster01 * smart-mon (ocf:pacemaker:HealthSMART): Stopped (not installed) =#=#=#= End test: Text output of partially active resources, with inactive resources, filtered by node - OK (0) =#=#=#= * Passed: crm_mon - Text output of partially active resources, with inactive resources, filtered by node =#=#=#= Begin test: Output of partially active resources, filtered by node (XML) =#=#=#= unpack_rsc_op error: Preventing dummy-2 from restarting on cluster02 because of hard failure (unimplemented feature) | dummy-2_last_failure_0 unpack_rsc_op error: Preventing httpd-bundle-clone from restarting on httpd-bundle-1 because of hard failure (invalid parameter) | httpd_last_failure_0 =#=#=#= End test: Output of partially active resources, filtered by node (XML) - OK (0) =#=#=#= * Passed: crm_mon - Output of partially active resources, filtered by node (XML) =#=#=#= Begin test: Output of active unmanaged resource on offline node =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 2 nodes configured * 3 resource instances configured *** Resource management is DISABLED *** The cluster will not attempt to start, stop or recover services Node List: * Online: [ cluster01 ] * OFFLINE: [ cluster02 ] Active Resources: * Fencing (stonith:fence_xvm): Started cluster01 (maintenance) * rsc1 (ocf:pacemaker:Dummy): Started cluster01 (maintenance) * rsc2 (ocf:pacemaker:Dummy): Started cluster02 (maintenance) =#=#=#= End test: Output of active unmanaged resource on offline node - OK (0) =#=#=#= * Passed: crm_mon - Output of active unmanaged resource on offline node =#=#=#= Begin test: Output of active unmanaged resource on offline node (XML) =#=#=#= =#=#=#= End test: Output of active unmanaged resource on offline node (XML) - OK (0) =#=#=#= * Passed: crm_mon - Output of active unmanaged resource on offline node (XML) =#=#=#= Begin test: Brief text output of active unmanaged resource on offline node =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 2 nodes configured * 3 resource instances configured *** Resource management is DISABLED *** The cluster will not attempt to start, stop or recover services Node List: * Online: [ cluster01 ] * OFFLINE: [ cluster02 ] Active Resources: * 1 (ocf:pacemaker:Dummy): Active cluster01 * 1 (ocf:pacemaker:Dummy): Active cluster02 * 1 (stonith:fence_xvm): Active cluster01 =#=#=#= End test: Brief text output of active unmanaged resource on offline node - OK (0) =#=#=#= * Passed: crm_mon - Brief text output of active unmanaged resource on offline node =#=#=#= Begin test: Brief text output of active unmanaged resource on offline node, grouped by node =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 2 nodes configured * 3 resource instances configured *** Resource management is DISABLED *** The cluster will not attempt to start, stop or recover services Node List: * Node cluster01: online: * Resources: * 1 (ocf:pacemaker:Dummy): Active * 1 (stonith:fence_xvm): Active * Node cluster02: OFFLINE: * Resources: * 1 (ocf:pacemaker:Dummy): Active =#=#=#= End test: Brief text output of active unmanaged resource on offline node, grouped by node - OK (0) =#=#=#= * Passed: crm_mon - Brief text output of active unmanaged resource on offline node, grouped by node =#=#=#= Begin test: Output of all resources with maintenance-mode enabled =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) *** Resource management is DISABLED *** The cluster will not attempt to start, stop or recover services Node List: * GuestNode httpd-bundle-0: maintenance * GuestNode httpd-bundle-1: maintenance * Online: [ cluster01 cluster02 ] Full List of Resources: * Clone Set: ping-clone [ping] (maintenance): * ping (ocf:pacemaker:ping): Started cluster02 (maintenance) * ping (ocf:pacemaker:ping): Started cluster01 (maintenance) * Fencing (stonith:fence_xvm): Started cluster01 (maintenance) * dummy (ocf:pacemaker:Dummy): Started cluster02 (maintenance) * Clone Set: inactive-clone [inactive-dhcpd] (disabled, maintenance): * Stopped (disabled): [ cluster01 cluster02 ] * Resource Group: inactive-group (disabled, maintenance): * inactive-dummy-1 (ocf:pacemaker:Dummy): Stopped (disabled, maintenance) * inactive-dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled, maintenance) * Container bundle set: httpd-bundle [pcmk:http] (maintenance): * httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01 (maintenance) * httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02 (maintenance) * httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped (maintenance) * Resource Group: exim-group (maintenance): * Public-IP (ocf:heartbeat:IPaddr): Started cluster02 (maintenance) * Email (lsb:exim): Started cluster02 (maintenance) * Clone Set: mysql-clone-group [mysql-group] (maintenance): * Resource Group: mysql-group:0 (maintenance): * mysql-proxy (lsb:mysql-proxy): Started cluster02 (maintenance) * Resource Group: mysql-group:1 (maintenance): * mysql-proxy (lsb:mysql-proxy): Started cluster01 (maintenance) * Clone Set: promotable-clone [promotable-rsc] (promotable, maintenance): * promotable-rsc (ocf:pacemaker:Stateful): Promoted cluster02 (maintenance) * promotable-rsc (ocf:pacemaker:Stateful): Unpromoted cluster01 (maintenance) =#=#=#= End test: Output of all resources with maintenance-mode enabled - OK (0) =#=#=#= * Passed: crm_mon - Output of all resources with maintenance-mode enabled =#=#=#= Begin test: Output of all resources with maintenance-mode enabled (XML) =#=#=#= =#=#=#= End test: Output of all resources with maintenance-mode enabled (XML) - OK (0) =#=#=#= * Passed: crm_mon - Output of all resources with maintenance-mode enabled (XML) =#=#=#= Begin test: Output of all resources with maintenance enabled for a node =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Node cluster02: maintenance * GuestNode httpd-bundle-1: maintenance * Online: [ cluster01 ] * GuestOnline: [ httpd-bundle-0 ] Full List of Resources: * Clone Set: ping-clone [ping]: * ping (ocf:pacemaker:ping): Started cluster02 (maintenance) * Started: [ cluster01 ] * Fencing (stonith:fence_xvm): Started cluster01 * dummy (ocf:pacemaker:Dummy): Started cluster02 (maintenance) * Clone Set: inactive-clone [inactive-dhcpd] (disabled): * Stopped (disabled): [ cluster01 cluster02 ] * Resource Group: inactive-group (disabled): * inactive-dummy-1 (ocf:pacemaker:Dummy): Stopped (disabled) * inactive-dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled) * Container bundle set: httpd-bundle [pcmk:http]: * httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01 * httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02 (maintenance) * httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped * Resource Group: exim-group: * Public-IP (ocf:heartbeat:IPaddr): Started cluster02 (maintenance) * Email (lsb:exim): Started cluster02 (maintenance) * Clone Set: mysql-clone-group [mysql-group]: * Resource Group: mysql-group:0: * mysql-proxy (lsb:mysql-proxy): Started cluster02 (maintenance) * Started: [ cluster01 ] * Clone Set: promotable-clone [promotable-rsc] (promotable): * promotable-rsc (ocf:pacemaker:Stateful): Promoted cluster02 (maintenance) * Unpromoted: [ cluster01 ] =#=#=#= End test: Output of all resources with maintenance enabled for a node - OK (0) =#=#=#= * Passed: crm_mon - Output of all resources with maintenance enabled for a node =#=#=#= Begin test: Output of all resources with maintenance enabled for a node (XML) =#=#=#= =#=#=#= End test: Output of all resources with maintenance enabled for a node (XML) - OK (0) =#=#=#= * Passed: crm_mon - Output of all resources with maintenance enabled for a node (XML) =#=#=#= Begin test: Output of all resources with maintenance meta attribute true =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * GuestNode httpd-bundle-0: maintenance * GuestNode httpd-bundle-1: maintenance * Online: [ cluster01 cluster02 ] Full List of Resources: * Clone Set: ping-clone [ping] (maintenance): * ping (ocf:pacemaker:ping): Started cluster02 (maintenance) * ping (ocf:pacemaker:ping): Started cluster01 (maintenance) * Fencing (stonith:fence_xvm): Started cluster01 * dummy (ocf:pacemaker:Dummy): Started cluster02 (maintenance) * Clone Set: inactive-clone [inactive-dhcpd] (disabled, maintenance): * Stopped (disabled): [ cluster01 cluster02 ] * Resource Group: inactive-group (disabled, maintenance): * inactive-dummy-1 (ocf:pacemaker:Dummy): Stopped (disabled, maintenance) * inactive-dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled, maintenance) * Container bundle set: httpd-bundle [pcmk:http] (maintenance): * httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01 (maintenance) * httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02 (maintenance) * httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped (maintenance) * Resource Group: exim-group (maintenance): * Public-IP (ocf:heartbeat:IPaddr): Started cluster02 (maintenance) * Email (lsb:exim): Started cluster02 (maintenance) * Clone Set: mysql-clone-group [mysql-group] (maintenance): * Resource Group: mysql-group:0 (maintenance): * mysql-proxy (lsb:mysql-proxy): Started cluster02 (maintenance) * Resource Group: mysql-group:1 (maintenance): * mysql-proxy (lsb:mysql-proxy): Started cluster01 (maintenance) * Clone Set: promotable-clone [promotable-rsc] (promotable, maintenance): * promotable-rsc (ocf:pacemaker:Stateful): Promoted cluster02 (maintenance) * promotable-rsc (ocf:pacemaker:Stateful): Unpromoted cluster01 (maintenance) =#=#=#= End test: Output of all resources with maintenance meta attribute true - OK (0) =#=#=#= * Passed: crm_mon - Output of all resources with maintenance meta attribute true =#=#=#= Begin test: Output of all resources with maintenance meta attribute true (XML) =#=#=#= =#=#=#= End test: Output of all resources with maintenance meta attribute true (XML) - OK (0) =#=#=#= * Passed: crm_mon - Output of all resources with maintenance meta attribute true (XML) =#=#=#= Begin test: Text output of guest node's container on different node from its remote resource =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cent7-host2 (version) - partition with quorum * Last updated: * Last change: * 4 nodes configured * 10 resource instances configured Node List: * Online: [ cent7-host1 cent7-host2 ] * GuestOnline: [ httpd-bundle1-0 httpd-bundle2-0 ] Active Resources: * Resource Group: group1: * dummy1 (ocf:pacemaker:Dummy): Started cent7-host1 * Resource Group: group2: * dummy2 (ocf:pacemaker:Dummy): Started cent7-host2 * Container bundle: httpd-bundle1 [pcmktest:http]: * httpd-bundle1-0 (192.168.20.188) (ocf:heartbeat:apache): Started cent7-host1 * Container bundle: httpd-bundle2 [pcmktest:http]: * httpd-bundle2-0 (192.168.20.190) (ocf:heartbeat:apache): Started cent7-host2 =#=#=#= End test: Text output of guest node's container on different node from its remote resource - OK (0) =#=#=#= * Passed: crm_mon - Text output of guest node's container on different node from its remote resource =#=#=#= Begin test: Complete text output of guest node's container on different node from its remote resource =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cent7-host2 (3232262829) (version) - partition with quorum * Last updated: * Last change: * 4 nodes configured * 10 resource instances configured Node List: * Node cent7-host1 (3232262828): online, feature set <3.15.1 * Node cent7-host2 (3232262829): online, feature set <3.15.1 * GuestNode httpd-bundle1-0@cent7-host1: online * GuestNode httpd-bundle2-0@cent7-host2: online Active Resources: * Resource Group: group1: * dummy1 (ocf:pacemaker:Dummy): Started cent7-host1 * Resource Group: group2: * dummy2 (ocf:pacemaker:Dummy): Started cent7-host2 * Container bundle: httpd-bundle1 [pcmktest:http]: * httpd-bundle1-ip-192.168.20.188 (ocf:heartbeat:IPaddr2): Started cent7-host1 * httpd1 (ocf:heartbeat:apache): Started httpd-bundle1-0 * httpd-bundle1-docker-0 (ocf:heartbeat:docker): Started cent7-host1 * httpd-bundle1-0 (ocf:pacemaker:remote): Started cent7-host2 * Container bundle: httpd-bundle2 [pcmktest:http]: * httpd-bundle2-ip-192.168.20.190 (ocf:heartbeat:IPaddr2): Started cent7-host2 * httpd2 (ocf:heartbeat:apache): Started httpd-bundle2-0 * httpd-bundle2-docker-0 (ocf:heartbeat:docker): Started cent7-host2 * httpd-bundle2-0 (ocf:pacemaker:remote): Started cent7-host2 =#=#=#= End test: Complete text output of guest node's container on different node from its remote resource - OK (0) =#=#=#= * Passed: crm_mon - Complete text output of guest node's container on different node from its remote resource diff --git a/cts/cli/regression.crm_resource.exp b/cts/cli/regression.crm_resource.exp index d9e473b21c..9859fe316d 100644 --- a/cts/cli/regression.crm_resource.exp +++ b/cts/cli/regression.crm_resource.exp @@ -1,4049 +1,4049 @@ =#=#=#= Begin test: crm_resource run with extra arguments =#=#=#= crm_resource: non-option ARGV-elements: [1 of 2] foo [2 of 2] bar =#=#=#= End test: crm_resource run with extra arguments - Incorrect usage (64) =#=#=#= * Passed: crm_resource - crm_resource run with extra arguments =#=#=#= Begin test: List all available resource options (invalid type) =#=#=#= crm_resource: Error parsing option --list-options =#=#=#= End test: List all available resource options (invalid type) - Incorrect usage (64) =#=#=#= * Passed: crm_resource - List all available resource options (invalid type) =#=#=#= Begin test: List all available resource options (invalid type) =#=#=#= crm_resource: Error parsing option --list-options =#=#=#= End test: List all available resource options (invalid type) - Incorrect usage (64) =#=#=#= * Passed: crm_resource - List all available resource options (invalid type) =#=#=#= Begin test: List non-advanced primitive meta-attributes =#=#=#= Primitive meta-attributes Meta-attributes applicable to primitive resources * priority: Resource assignment priority * If not all resources can be active, the cluster will stop lower-priority resources in order to keep higher-priority ones active. * Possible values: score (default: ) * critical: Default value for influence in colocation constraints * Use this value as the default for influence in all colocation constraints involving this resource, as well as in the implicit colocation constraints created if this resource is in a group. * Possible values: boolean (default: ) * target-role: State the cluster should attempt to keep this resource in * "Stopped" forces the resource to be stopped. "Started" allows the resource to be started (and in the case of promotable clone resources, promoted if appropriate). "Unpromoted" allows the resource to be started, but only in the unpromoted role if the resource is promotable. "Promoted" is equivalent to "Started". * Possible values: "Stopped", "Started" (default), "Unpromoted", "Promoted" * is-managed: Whether the cluster is allowed to actively change the resource's state * If false, the cluster will not start, stop, promote, or demote the resource on any node. Recurring actions for the resource are unaffected. If true, a true value for the maintenance-mode cluster option, the maintenance node attribute, or the maintenance resource meta-attribute overrides this. * Possible values: boolean (default: ) * maintenance: If true, the cluster will not schedule any actions involving the resource * If true, the cluster will not start, stop, promote, or demote the resource on any node, and will pause any recurring monitors (except those specifying role as "Stopped"). If false, a true value for the maintenance-mode cluster option or maintenance node attribute overrides this. * Possible values: boolean (default: ) * resource-stickiness: Score to add to the current node when a resource is already active * Score to add to the current node when a resource is already active. This allows running resources to stay where they are, even if they would be placed elsewhere if they were being started from a stopped state. The default is 1 for individual clone instances, and 0 for all other resources. * Possible values: score (no default) * requires: Conditions under which the resource can be started * Conditions under which the resource can be started. "nothing" means the cluster can always start this resource. "quorum" means the cluster can start this resource only if a majority of the configured nodes are active. "fencing" means the cluster can start this resource only if a majority of the configured nodes are active and any failed or unknown nodes have been fenced. "unfencing" means the cluster can start this resource only if a majority of the configured nodes are active and any failed or unknown nodes have been fenced, and only on nodes that have been unfenced. The default is "quorum" for resources with a class of stonith; otherwise, "unfencing" if unfencing is active in the cluster; otherwise, "fencing" if the stonith-enabled cluster option is true; otherwise, "quorum". * Possible values: "nothing", "quorum", "fencing", "unfencing" * migration-threshold: Number of failures on a node before the resource becomes ineligible to run there. * Number of failures that may occur for this resource on a node, before that node is marked ineligible to host this resource. A value of 0 indicates that this feature is disabled (the node will never be marked ineligible). By contrast, the cluster treats "INFINITY" (the default) as a very large but finite number. This option has an effect only if the failed operation specifies its on-fail attribute as "restart" (the default), and additionally for failed start operations, if the start-failure-is-fatal cluster property is set to false. * Possible values: score (default: ) * failure-timeout: Number of seconds before acting as if a failure had not occurred * Number of seconds after a failed action for this resource before acting as if the failure had not occurred, and potentially allowing the resource back to the node on which it failed. A value of 0 indicates that this feature is disabled. * Possible values: duration (default: ) * multiple-active: What to do if the cluster finds the resource active on more than one node * What to do if the cluster finds the resource active on more than one node. "block" means to mark the resource as unmanaged. "stop_only" means to stop all active instances of this resource and leave them stopped. "stop_start" means to stop all active instances of this resource and start the resource in one location only. "stop_unexpected" means to stop all active instances of this resource except where the resource should be active. (This should be used only when extra instances are not expected to disrupt existing instances, and the resource agent's monitor of an existing instance is capable of detecting any problems that could be caused. Note that any resources ordered after this one will still need to be restarted.) * Possible values: "block", "stop_only", "stop_start" (default), "stop_unexpected" * allow-migrate: Whether the cluster should try to "live migrate" this resource when it needs to be moved * Whether the cluster should try to "live migrate" this resource when it needs to be moved. The default is true for ocf:pacemaker:remote resources, and false otherwise. * Possible values: boolean (no default) * allow-unhealthy-nodes: Whether the resource should be allowed to run on a node even if the node's health score would otherwise prevent it * Possible values: boolean (default: ) * container-attribute-target: Where to check user-defined node attributes * Whether to check user-defined node attributes on the physical host where a container is running or on the local node. This is usually set for a bundle resource and inherited by the bundle's primitive resource. A value of "host" means to check user-defined node attributes on the underlying physical host. Any other value means to check user-defined node attributes on the local node (for a bundled primitive resource, this is the bundle node). * Possible values: string (no default) * remote-node: Name of the Pacemaker Remote guest node this resource is associated with, if any * Name of the Pacemaker Remote guest node this resource is associated with, if any. If specified, this both enables the resource as a guest node and defines the unique name used to identify the guest node. The guest must be configured to run the Pacemaker Remote daemon when it is started. WARNING: This value cannot overlap with any resource or node IDs. * Possible values: string (no default) * remote-addr: If remote-node is specified, the IP address or hostname used to connect to the guest via Pacemaker Remote * If remote-node is specified, the IP address or hostname used to connect to the guest via Pacemaker Remote. The Pacemaker Remote daemon on the guest must be configured to accept connections on this address. The default is the value of the remote-node meta-attribute. * Possible values: string (no default) * remote-port: If remote-node is specified, port on the guest used for its Pacemaker Remote connection * If remote-node is specified, the port on the guest used for its Pacemaker Remote connection. The Pacemaker Remote daemon on the guest must be configured to listen on this port. * Possible values: port (default: ) * remote-connect-timeout: If remote-node is specified, how long before a pending Pacemaker Remote guest connection times out. * Possible values: timeout (default: ) * remote-allow-migrate: If remote-node is specified, this acts as the allow-migrate meta-attribute for the implicit remote connection resource (ocf:pacemaker:remote). * Possible values: boolean (default: ) =#=#=#= End test: List non-advanced primitive meta-attributes - OK (0) =#=#=#= * Passed: crm_resource - List non-advanced primitive meta-attributes =#=#=#= Begin test: List non-advanced primitive meta-attributes (XML) =#=#=#= 1.1 Meta-attributes applicable to primitive resources Primitive meta-attributes If not all resources can be active, the cluster will stop lower-priority resources in order to keep higher-priority ones active. Resource assignment priority Use this value as the default for influence in all colocation constraints involving this resource, as well as in the implicit colocation constraints created if this resource is in a group. Default value for influence in colocation constraints "Stopped" forces the resource to be stopped. "Started" allows the resource to be started (and in the case of promotable clone resources, promoted if appropriate). "Unpromoted" allows the resource to be started, but only in the unpromoted role if the resource is promotable. "Promoted" is equivalent to "Started". State the cluster should attempt to keep this resource in If false, the cluster will not start, stop, promote, or demote the resource on any node. Recurring actions for the resource are unaffected. If true, a true value for the maintenance-mode cluster option, the maintenance node attribute, or the maintenance resource meta-attribute overrides this. Whether the cluster is allowed to actively change the resource's state If true, the cluster will not start, stop, promote, or demote the resource on any node, and will pause any recurring monitors (except those specifying role as "Stopped"). If false, a true value for the maintenance-mode cluster option or maintenance node attribute overrides this. If true, the cluster will not schedule any actions involving the resource Score to add to the current node when a resource is already active. This allows running resources to stay where they are, even if they would be placed elsewhere if they were being started from a stopped state. The default is 1 for individual clone instances, and 0 for all other resources. Score to add to the current node when a resource is already active Conditions under which the resource can be started. "nothing" means the cluster can always start this resource. "quorum" means the cluster can start this resource only if a majority of the configured nodes are active. "fencing" means the cluster can start this resource only if a majority of the configured nodes are active and any failed or unknown nodes have been fenced. "unfencing" means the cluster can start this resource only if a majority of the configured nodes are active and any failed or unknown nodes have been fenced, and only on nodes that have been unfenced. The default is "quorum" for resources with a class of stonith; otherwise, "unfencing" if unfencing is active in the cluster; otherwise, "fencing" if the stonith-enabled cluster option is true; otherwise, "quorum". Conditions under which the resource can be started Number of failures that may occur for this resource on a node, before that node is marked ineligible to host this resource. A value of 0 indicates that this feature is disabled (the node will never be marked ineligible). By contrast, the cluster treats "INFINITY" (the default) as a very large but finite number. This option has an effect only if the failed operation specifies its on-fail attribute as "restart" (the default), and additionally for failed start operations, if the start-failure-is-fatal cluster property is set to false. Number of failures on a node before the resource becomes ineligible to run there. Number of seconds after a failed action for this resource before acting as if the failure had not occurred, and potentially allowing the resource back to the node on which it failed. A value of 0 indicates that this feature is disabled. Number of seconds before acting as if a failure had not occurred What to do if the cluster finds the resource active on more than one node. "block" means to mark the resource as unmanaged. "stop_only" means to stop all active instances of this resource and leave them stopped. "stop_start" means to stop all active instances of this resource and start the resource in one location only. "stop_unexpected" means to stop all active instances of this resource except where the resource should be active. (This should be used only when extra instances are not expected to disrupt existing instances, and the resource agent's monitor of an existing instance is capable of detecting any problems that could be caused. Note that any resources ordered after this one will still need to be restarted.) What to do if the cluster finds the resource active on more than one node Whether the cluster should try to "live migrate" this resource when it needs to be moved. The default is true for ocf:pacemaker:remote resources, and false otherwise. Whether the cluster should try to "live migrate" this resource when it needs to be moved Whether the resource should be allowed to run on a node even if the node's health score would otherwise prevent it Whether the resource should be allowed to run on a node even if the node's health score would otherwise prevent it Whether to check user-defined node attributes on the physical host where a container is running or on the local node. This is usually set for a bundle resource and inherited by the bundle's primitive resource. A value of "host" means to check user-defined node attributes on the underlying physical host. Any other value means to check user-defined node attributes on the local node (for a bundled primitive resource, this is the bundle node). Where to check user-defined node attributes Name of the Pacemaker Remote guest node this resource is associated with, if any. If specified, this both enables the resource as a guest node and defines the unique name used to identify the guest node. The guest must be configured to run the Pacemaker Remote daemon when it is started. WARNING: This value cannot overlap with any resource or node IDs. Name of the Pacemaker Remote guest node this resource is associated with, if any If remote-node is specified, the IP address or hostname used to connect to the guest via Pacemaker Remote. The Pacemaker Remote daemon on the guest must be configured to accept connections on this address. The default is the value of the remote-node meta-attribute. If remote-node is specified, the IP address or hostname used to connect to the guest via Pacemaker Remote If remote-node is specified, the port on the guest used for its Pacemaker Remote connection. The Pacemaker Remote daemon on the guest must be configured to listen on this port. If remote-node is specified, port on the guest used for its Pacemaker Remote connection If remote-node is specified, how long before a pending Pacemaker Remote guest connection times out. If remote-node is specified, how long before a pending Pacemaker Remote guest connection times out. If remote-node is specified, this acts as the allow-migrate meta-attribute for the implicit remote connection resource (ocf:pacemaker:remote). If remote-node is specified, this acts as the allow-migrate meta-attribute for the implicit remote connection resource (ocf:pacemaker:remote). =#=#=#= End test: List non-advanced primitive meta-attributes (XML) - OK (0) =#=#=#= * Passed: crm_resource - List non-advanced primitive meta-attributes (XML) =#=#=#= Begin test: List all available primitive meta-attributes =#=#=#= Primitive meta-attributes Meta-attributes applicable to primitive resources * priority: Resource assignment priority * If not all resources can be active, the cluster will stop lower-priority resources in order to keep higher-priority ones active. * Possible values: score (default: ) * critical: Default value for influence in colocation constraints * Use this value as the default for influence in all colocation constraints involving this resource, as well as in the implicit colocation constraints created if this resource is in a group. * Possible values: boolean (default: ) * target-role: State the cluster should attempt to keep this resource in * "Stopped" forces the resource to be stopped. "Started" allows the resource to be started (and in the case of promotable clone resources, promoted if appropriate). "Unpromoted" allows the resource to be started, but only in the unpromoted role if the resource is promotable. "Promoted" is equivalent to "Started". * Possible values: "Stopped", "Started" (default), "Unpromoted", "Promoted" * is-managed: Whether the cluster is allowed to actively change the resource's state * If false, the cluster will not start, stop, promote, or demote the resource on any node. Recurring actions for the resource are unaffected. If true, a true value for the maintenance-mode cluster option, the maintenance node attribute, or the maintenance resource meta-attribute overrides this. * Possible values: boolean (default: ) * maintenance: If true, the cluster will not schedule any actions involving the resource * If true, the cluster will not start, stop, promote, or demote the resource on any node, and will pause any recurring monitors (except those specifying role as "Stopped"). If false, a true value for the maintenance-mode cluster option or maintenance node attribute overrides this. * Possible values: boolean (default: ) * resource-stickiness: Score to add to the current node when a resource is already active * Score to add to the current node when a resource is already active. This allows running resources to stay where they are, even if they would be placed elsewhere if they were being started from a stopped state. The default is 1 for individual clone instances, and 0 for all other resources. * Possible values: score (no default) * requires: Conditions under which the resource can be started * Conditions under which the resource can be started. "nothing" means the cluster can always start this resource. "quorum" means the cluster can start this resource only if a majority of the configured nodes are active. "fencing" means the cluster can start this resource only if a majority of the configured nodes are active and any failed or unknown nodes have been fenced. "unfencing" means the cluster can start this resource only if a majority of the configured nodes are active and any failed or unknown nodes have been fenced, and only on nodes that have been unfenced. The default is "quorum" for resources with a class of stonith; otherwise, "unfencing" if unfencing is active in the cluster; otherwise, "fencing" if the stonith-enabled cluster option is true; otherwise, "quorum". * Possible values: "nothing", "quorum", "fencing", "unfencing" * migration-threshold: Number of failures on a node before the resource becomes ineligible to run there. * Number of failures that may occur for this resource on a node, before that node is marked ineligible to host this resource. A value of 0 indicates that this feature is disabled (the node will never be marked ineligible). By contrast, the cluster treats "INFINITY" (the default) as a very large but finite number. This option has an effect only if the failed operation specifies its on-fail attribute as "restart" (the default), and additionally for failed start operations, if the start-failure-is-fatal cluster property is set to false. * Possible values: score (default: ) * failure-timeout: Number of seconds before acting as if a failure had not occurred * Number of seconds after a failed action for this resource before acting as if the failure had not occurred, and potentially allowing the resource back to the node on which it failed. A value of 0 indicates that this feature is disabled. * Possible values: duration (default: ) * multiple-active: What to do if the cluster finds the resource active on more than one node * What to do if the cluster finds the resource active on more than one node. "block" means to mark the resource as unmanaged. "stop_only" means to stop all active instances of this resource and leave them stopped. "stop_start" means to stop all active instances of this resource and start the resource in one location only. "stop_unexpected" means to stop all active instances of this resource except where the resource should be active. (This should be used only when extra instances are not expected to disrupt existing instances, and the resource agent's monitor of an existing instance is capable of detecting any problems that could be caused. Note that any resources ordered after this one will still need to be restarted.) * Possible values: "block", "stop_only", "stop_start" (default), "stop_unexpected" * allow-migrate: Whether the cluster should try to "live migrate" this resource when it needs to be moved * Whether the cluster should try to "live migrate" this resource when it needs to be moved. The default is true for ocf:pacemaker:remote resources, and false otherwise. * Possible values: boolean (no default) * allow-unhealthy-nodes: Whether the resource should be allowed to run on a node even if the node's health score would otherwise prevent it * Possible values: boolean (default: ) * container-attribute-target: Where to check user-defined node attributes * Whether to check user-defined node attributes on the physical host where a container is running or on the local node. This is usually set for a bundle resource and inherited by the bundle's primitive resource. A value of "host" means to check user-defined node attributes on the underlying physical host. Any other value means to check user-defined node attributes on the local node (for a bundled primitive resource, this is the bundle node). * Possible values: string (no default) * remote-node: Name of the Pacemaker Remote guest node this resource is associated with, if any * Name of the Pacemaker Remote guest node this resource is associated with, if any. If specified, this both enables the resource as a guest node and defines the unique name used to identify the guest node. The guest must be configured to run the Pacemaker Remote daemon when it is started. WARNING: This value cannot overlap with any resource or node IDs. * Possible values: string (no default) * remote-addr: If remote-node is specified, the IP address or hostname used to connect to the guest via Pacemaker Remote * If remote-node is specified, the IP address or hostname used to connect to the guest via Pacemaker Remote. The Pacemaker Remote daemon on the guest must be configured to accept connections on this address. The default is the value of the remote-node meta-attribute. * Possible values: string (no default) * remote-port: If remote-node is specified, port on the guest used for its Pacemaker Remote connection * If remote-node is specified, the port on the guest used for its Pacemaker Remote connection. The Pacemaker Remote daemon on the guest must be configured to listen on this port. * Possible values: port (default: ) * remote-connect-timeout: If remote-node is specified, how long before a pending Pacemaker Remote guest connection times out. * Possible values: timeout (default: ) * remote-allow-migrate: If remote-node is specified, this acts as the allow-migrate meta-attribute for the implicit remote connection resource (ocf:pacemaker:remote). * Possible values: boolean (default: ) =#=#=#= End test: List all available primitive meta-attributes - OK (0) =#=#=#= * Passed: crm_resource - List all available primitive meta-attributes =#=#=#= Begin test: List all available primitive meta-attributes (XML) =#=#=#= 1.1 Meta-attributes applicable to primitive resources Primitive meta-attributes If not all resources can be active, the cluster will stop lower-priority resources in order to keep higher-priority ones active. Resource assignment priority Use this value as the default for influence in all colocation constraints involving this resource, as well as in the implicit colocation constraints created if this resource is in a group. Default value for influence in colocation constraints "Stopped" forces the resource to be stopped. "Started" allows the resource to be started (and in the case of promotable clone resources, promoted if appropriate). "Unpromoted" allows the resource to be started, but only in the unpromoted role if the resource is promotable. "Promoted" is equivalent to "Started". State the cluster should attempt to keep this resource in If false, the cluster will not start, stop, promote, or demote the resource on any node. Recurring actions for the resource are unaffected. If true, a true value for the maintenance-mode cluster option, the maintenance node attribute, or the maintenance resource meta-attribute overrides this. Whether the cluster is allowed to actively change the resource's state If true, the cluster will not start, stop, promote, or demote the resource on any node, and will pause any recurring monitors (except those specifying role as "Stopped"). If false, a true value for the maintenance-mode cluster option or maintenance node attribute overrides this. If true, the cluster will not schedule any actions involving the resource Score to add to the current node when a resource is already active. This allows running resources to stay where they are, even if they would be placed elsewhere if they were being started from a stopped state. The default is 1 for individual clone instances, and 0 for all other resources. Score to add to the current node when a resource is already active Conditions under which the resource can be started. "nothing" means the cluster can always start this resource. "quorum" means the cluster can start this resource only if a majority of the configured nodes are active. "fencing" means the cluster can start this resource only if a majority of the configured nodes are active and any failed or unknown nodes have been fenced. "unfencing" means the cluster can start this resource only if a majority of the configured nodes are active and any failed or unknown nodes have been fenced, and only on nodes that have been unfenced. The default is "quorum" for resources with a class of stonith; otherwise, "unfencing" if unfencing is active in the cluster; otherwise, "fencing" if the stonith-enabled cluster option is true; otherwise, "quorum". Conditions under which the resource can be started Number of failures that may occur for this resource on a node, before that node is marked ineligible to host this resource. A value of 0 indicates that this feature is disabled (the node will never be marked ineligible). By contrast, the cluster treats "INFINITY" (the default) as a very large but finite number. This option has an effect only if the failed operation specifies its on-fail attribute as "restart" (the default), and additionally for failed start operations, if the start-failure-is-fatal cluster property is set to false. Number of failures on a node before the resource becomes ineligible to run there. Number of seconds after a failed action for this resource before acting as if the failure had not occurred, and potentially allowing the resource back to the node on which it failed. A value of 0 indicates that this feature is disabled. Number of seconds before acting as if a failure had not occurred What to do if the cluster finds the resource active on more than one node. "block" means to mark the resource as unmanaged. "stop_only" means to stop all active instances of this resource and leave them stopped. "stop_start" means to stop all active instances of this resource and start the resource in one location only. "stop_unexpected" means to stop all active instances of this resource except where the resource should be active. (This should be used only when extra instances are not expected to disrupt existing instances, and the resource agent's monitor of an existing instance is capable of detecting any problems that could be caused. Note that any resources ordered after this one will still need to be restarted.) What to do if the cluster finds the resource active on more than one node Whether the cluster should try to "live migrate" this resource when it needs to be moved. The default is true for ocf:pacemaker:remote resources, and false otherwise. Whether the cluster should try to "live migrate" this resource when it needs to be moved Whether the resource should be allowed to run on a node even if the node's health score would otherwise prevent it Whether the resource should be allowed to run on a node even if the node's health score would otherwise prevent it Whether to check user-defined node attributes on the physical host where a container is running or on the local node. This is usually set for a bundle resource and inherited by the bundle's primitive resource. A value of "host" means to check user-defined node attributes on the underlying physical host. Any other value means to check user-defined node attributes on the local node (for a bundled primitive resource, this is the bundle node). Where to check user-defined node attributes Name of the Pacemaker Remote guest node this resource is associated with, if any. If specified, this both enables the resource as a guest node and defines the unique name used to identify the guest node. The guest must be configured to run the Pacemaker Remote daemon when it is started. WARNING: This value cannot overlap with any resource or node IDs. Name of the Pacemaker Remote guest node this resource is associated with, if any If remote-node is specified, the IP address or hostname used to connect to the guest via Pacemaker Remote. The Pacemaker Remote daemon on the guest must be configured to accept connections on this address. The default is the value of the remote-node meta-attribute. If remote-node is specified, the IP address or hostname used to connect to the guest via Pacemaker Remote If remote-node is specified, the port on the guest used for its Pacemaker Remote connection. The Pacemaker Remote daemon on the guest must be configured to listen on this port. If remote-node is specified, port on the guest used for its Pacemaker Remote connection If remote-node is specified, how long before a pending Pacemaker Remote guest connection times out. If remote-node is specified, how long before a pending Pacemaker Remote guest connection times out. If remote-node is specified, this acts as the allow-migrate meta-attribute for the implicit remote connection resource (ocf:pacemaker:remote). If remote-node is specified, this acts as the allow-migrate meta-attribute for the implicit remote connection resource (ocf:pacemaker:remote). =#=#=#= End test: List all available primitive meta-attributes (XML) - OK (0) =#=#=#= * Passed: crm_resource - List all available primitive meta-attributes (XML) =#=#=#= Begin test: List non-advanced fencing parameters =#=#=#= Fencing resource common parameters Special parameters that are available for all fencing resources, regardless of type. They are processed by Pacemaker, rather than by the fence agent or the fencing library. * pcmk_host_map: A mapping of node names to port numbers for devices that do not support node names. * For example, "node1:1;node2:2,3" would tell the cluster to use port 1 for node1 and ports 2 and 3 for node2. * Possible values: string (no default) * pcmk_host_list: Nodes targeted by this device * Comma-separated list of nodes that can be targeted by this device (for example, "node1,node2,node3"). If pcmk_host_check is "static-list", either this or pcmk_host_map must be set. * Possible values: string (no default) * pcmk_host_check: How to determine which nodes can be targeted by the device * Use "dynamic-list" to query the device via the 'list' command; "static-list" to check the pcmk_host_list attribute; "status" to query the device via the 'status' command; or "none" to assume every device can fence every node. The default value is "static-list" if pcmk_host_map or pcmk_host_list is set; otherwise "dynamic-list" if the device supports the list operation; otherwise "status" if the device supports the status operation; otherwise "none" * Possible values: "dynamic-list", "static-list", "status", "none" * pcmk_delay_max: Enable a delay of no more than the time specified before executing fencing actions. * Enable a delay of no more than the time specified before executing fencing actions. Pacemaker derives the overall delay by taking the value of pcmk_delay_base and adding a random delay value such that the sum is kept below this maximum. * Possible values: duration (default: ) * pcmk_delay_base: Enable a base delay for fencing actions and specify base delay value. * This enables a static delay for fencing actions, which can help avoid "death matches" where two nodes try to fence each other at the same time. If pcmk_delay_max is also used, a random delay will be added such that the total delay is kept below that value. This can be set to a single time value to apply to any node targeted by this device (useful if a separate device is configured for each target), or to a node map (for example, "node1:1s;node2:5") to set a different value for each target. * Possible values: string (default: ) * pcmk_action_limit: The maximum number of actions can be performed in parallel on this device * If the concurrent-fencing cluster property is "true", this specifies the maximum number of actions that can be performed in parallel on this device. A value of -1 means unlimited. * Possible values: integer (default: ) =#=#=#= End test: List non-advanced fencing parameters - OK (0) =#=#=#= * Passed: crm_resource - List non-advanced fencing parameters =#=#=#= Begin test: List non-advanced fencing parameters (XML) =#=#=#= 1.1 Special parameters that are available for all fencing resources, regardless of type. They are processed by Pacemaker, rather than by the fence agent or the fencing library. Fencing resource common parameters If the fencing agent metadata advertises support for the "port" or "plug" parameter, that will be used as the default, otherwise "none" will be used, which tells the cluster not to supply any additional parameters. Name of agent parameter that should be set to the fencing target For example, "node1:1;node2:2,3" would tell the cluster to use port 1 for node1 and ports 2 and 3 for node2. A mapping of node names to port numbers for devices that do not support node names. Comma-separated list of nodes that can be targeted by this device (for example, "node1,node2,node3"). If pcmk_host_check is "static-list", either this or pcmk_host_map must be set. Nodes targeted by this device Use "dynamic-list" to query the device via the 'list' command; "static-list" to check the pcmk_host_list attribute; "status" to query the device via the 'status' command; or "none" to assume every device can fence every node. The default value is "static-list" if pcmk_host_map or pcmk_host_list is set; otherwise "dynamic-list" if the device supports the list operation; otherwise "status" if the device supports the status operation; otherwise "none" How to determine which nodes can be targeted by the device Enable a delay of no more than the time specified before executing fencing actions. Pacemaker derives the overall delay by taking the value of pcmk_delay_base and adding a random delay value such that the sum is kept below this maximum. Enable a delay of no more than the time specified before executing fencing actions. This enables a static delay for fencing actions, which can help avoid "death matches" where two nodes try to fence each other at the same time. If pcmk_delay_max is also used, a random delay will be added such that the total delay is kept below that value. This can be set to a single time value to apply to any node targeted by this device (useful if a separate device is configured for each target), or to a node map (for example, "node1:1s;node2:5") to set a different value for each target. Enable a base delay for fencing actions and specify base delay value. If the concurrent-fencing cluster property is "true", this specifies the maximum number of actions that can be performed in parallel on this device. A value of -1 means unlimited. The maximum number of actions can be performed in parallel on this device Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'reboot' action. An alternate command to run instead of 'reboot' Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'reboot' actions. Specify an alternate timeout to use for 'reboot' actions instead of stonith-timeout Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'reboot' action before giving up. The maximum number of times to try the 'reboot' command within the timeout period Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'off' action. An alternate command to run instead of 'off' Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'off' actions. Specify an alternate timeout to use for 'off' actions instead of stonith-timeout Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'off' action before giving up. The maximum number of times to try the 'off' command within the timeout period Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'on' action. An alternate command to run instead of 'on' Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'on' actions. Specify an alternate timeout to use for 'on' actions instead of stonith-timeout Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'on' action before giving up. The maximum number of times to try the 'on' command within the timeout period Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'list' action. An alternate command to run instead of 'list' Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'list' actions. Specify an alternate timeout to use for 'list' actions instead of stonith-timeout Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'list' action before giving up. The maximum number of times to try the 'list' command within the timeout period Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'monitor' action. An alternate command to run instead of 'monitor' Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'monitor' actions. Specify an alternate timeout to use for 'monitor' actions instead of stonith-timeout Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'monitor' action before giving up. The maximum number of times to try the 'monitor' command within the timeout period Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'status' action. An alternate command to run instead of 'status' Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'status' actions. Specify an alternate timeout to use for 'status' actions instead of stonith-timeout Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'status' action before giving up. The maximum number of times to try the 'status' command within the timeout period =#=#=#= End test: List non-advanced fencing parameters (XML) - OK (0) =#=#=#= * Passed: crm_resource - List non-advanced fencing parameters (XML) =#=#=#= Begin test: List all available fencing parameters =#=#=#= Fencing resource common parameters Special parameters that are available for all fencing resources, regardless of type. They are processed by Pacemaker, rather than by the fence agent or the fencing library. * pcmk_host_map: A mapping of node names to port numbers for devices that do not support node names. * For example, "node1:1;node2:2,3" would tell the cluster to use port 1 for node1 and ports 2 and 3 for node2. * Possible values: string (no default) * pcmk_host_list: Nodes targeted by this device * Comma-separated list of nodes that can be targeted by this device (for example, "node1,node2,node3"). If pcmk_host_check is "static-list", either this or pcmk_host_map must be set. * Possible values: string (no default) * pcmk_host_check: How to determine which nodes can be targeted by the device * Use "dynamic-list" to query the device via the 'list' command; "static-list" to check the pcmk_host_list attribute; "status" to query the device via the 'status' command; or "none" to assume every device can fence every node. The default value is "static-list" if pcmk_host_map or pcmk_host_list is set; otherwise "dynamic-list" if the device supports the list operation; otherwise "status" if the device supports the status operation; otherwise "none" * Possible values: "dynamic-list", "static-list", "status", "none" * pcmk_delay_max: Enable a delay of no more than the time specified before executing fencing actions. * Enable a delay of no more than the time specified before executing fencing actions. Pacemaker derives the overall delay by taking the value of pcmk_delay_base and adding a random delay value such that the sum is kept below this maximum. * Possible values: duration (default: ) * pcmk_delay_base: Enable a base delay for fencing actions and specify base delay value. * This enables a static delay for fencing actions, which can help avoid "death matches" where two nodes try to fence each other at the same time. If pcmk_delay_max is also used, a random delay will be added such that the total delay is kept below that value. This can be set to a single time value to apply to any node targeted by this device (useful if a separate device is configured for each target), or to a node map (for example, "node1:1s;node2:5") to set a different value for each target. * Possible values: string (default: ) * pcmk_action_limit: The maximum number of actions can be performed in parallel on this device * If the concurrent-fencing cluster property is "true", this specifies the maximum number of actions that can be performed in parallel on this device. A value of -1 means unlimited. * Possible values: integer (default: ) * ADVANCED OPTIONS: * pcmk_host_argument: Name of agent parameter that should be set to the fencing target * If the fencing agent metadata advertises support for the "port" or "plug" parameter, that will be used as the default, otherwise "none" will be used, which tells the cluster not to supply any additional parameters. * Possible values: string (no default) * pcmk_reboot_action: An alternate command to run instead of 'reboot' * Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'reboot' action. * Possible values: string (default: ) * pcmk_reboot_timeout: Specify an alternate timeout to use for 'reboot' actions instead of stonith-timeout * Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'reboot' actions. * Possible values: timeout (default: ) * pcmk_reboot_retries: The maximum number of times to try the 'reboot' command within the timeout period * Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'reboot' action before giving up. * Possible values: integer (default: ) * pcmk_off_action: An alternate command to run instead of 'off' * Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'off' action. * Possible values: string (default: ) * pcmk_off_timeout: Specify an alternate timeout to use for 'off' actions instead of stonith-timeout * Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'off' actions. * Possible values: timeout (default: ) * pcmk_off_retries: The maximum number of times to try the 'off' command within the timeout period * Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'off' action before giving up. * Possible values: integer (default: ) * pcmk_on_action: An alternate command to run instead of 'on' * Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'on' action. * Possible values: string (default: ) * pcmk_on_timeout: Specify an alternate timeout to use for 'on' actions instead of stonith-timeout * Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'on' actions. * Possible values: timeout (default: ) * pcmk_on_retries: The maximum number of times to try the 'on' command within the timeout period * Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'on' action before giving up. * Possible values: integer (default: ) * pcmk_list_action: An alternate command to run instead of 'list' * Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'list' action. * Possible values: string (default: ) * pcmk_list_timeout: Specify an alternate timeout to use for 'list' actions instead of stonith-timeout * Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'list' actions. * Possible values: timeout (default: ) * pcmk_list_retries: The maximum number of times to try the 'list' command within the timeout period * Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'list' action before giving up. * Possible values: integer (default: ) * pcmk_monitor_action: An alternate command to run instead of 'monitor' * Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'monitor' action. * Possible values: string (default: ) * pcmk_monitor_timeout: Specify an alternate timeout to use for 'monitor' actions instead of stonith-timeout * Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'monitor' actions. * Possible values: timeout (default: ) * pcmk_monitor_retries: The maximum number of times to try the 'monitor' command within the timeout period * Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'monitor' action before giving up. * Possible values: integer (default: ) * pcmk_status_action: An alternate command to run instead of 'status' * Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'status' action. * Possible values: string (default: ) * pcmk_status_timeout: Specify an alternate timeout to use for 'status' actions instead of stonith-timeout * Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'status' actions. * Possible values: timeout (default: ) * pcmk_status_retries: The maximum number of times to try the 'status' command within the timeout period * Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'status' action before giving up. * Possible values: integer (default: ) =#=#=#= End test: List all available fencing parameters - OK (0) =#=#=#= * Passed: crm_resource - List all available fencing parameters =#=#=#= Begin test: List all available fencing parameters (XML) =#=#=#= 1.1 Special parameters that are available for all fencing resources, regardless of type. They are processed by Pacemaker, rather than by the fence agent or the fencing library. Fencing resource common parameters If the fencing agent metadata advertises support for the "port" or "plug" parameter, that will be used as the default, otherwise "none" will be used, which tells the cluster not to supply any additional parameters. Name of agent parameter that should be set to the fencing target For example, "node1:1;node2:2,3" would tell the cluster to use port 1 for node1 and ports 2 and 3 for node2. A mapping of node names to port numbers for devices that do not support node names. Comma-separated list of nodes that can be targeted by this device (for example, "node1,node2,node3"). If pcmk_host_check is "static-list", either this or pcmk_host_map must be set. Nodes targeted by this device Use "dynamic-list" to query the device via the 'list' command; "static-list" to check the pcmk_host_list attribute; "status" to query the device via the 'status' command; or "none" to assume every device can fence every node. The default value is "static-list" if pcmk_host_map or pcmk_host_list is set; otherwise "dynamic-list" if the device supports the list operation; otherwise "status" if the device supports the status operation; otherwise "none" How to determine which nodes can be targeted by the device Enable a delay of no more than the time specified before executing fencing actions. Pacemaker derives the overall delay by taking the value of pcmk_delay_base and adding a random delay value such that the sum is kept below this maximum. Enable a delay of no more than the time specified before executing fencing actions. This enables a static delay for fencing actions, which can help avoid "death matches" where two nodes try to fence each other at the same time. If pcmk_delay_max is also used, a random delay will be added such that the total delay is kept below that value. This can be set to a single time value to apply to any node targeted by this device (useful if a separate device is configured for each target), or to a node map (for example, "node1:1s;node2:5") to set a different value for each target. Enable a base delay for fencing actions and specify base delay value. If the concurrent-fencing cluster property is "true", this specifies the maximum number of actions that can be performed in parallel on this device. A value of -1 means unlimited. The maximum number of actions can be performed in parallel on this device Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'reboot' action. An alternate command to run instead of 'reboot' Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'reboot' actions. Specify an alternate timeout to use for 'reboot' actions instead of stonith-timeout Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'reboot' action before giving up. The maximum number of times to try the 'reboot' command within the timeout period Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'off' action. An alternate command to run instead of 'off' Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'off' actions. Specify an alternate timeout to use for 'off' actions instead of stonith-timeout Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'off' action before giving up. The maximum number of times to try the 'off' command within the timeout period Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'on' action. An alternate command to run instead of 'on' Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'on' actions. Specify an alternate timeout to use for 'on' actions instead of stonith-timeout Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'on' action before giving up. The maximum number of times to try the 'on' command within the timeout period Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'list' action. An alternate command to run instead of 'list' Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'list' actions. Specify an alternate timeout to use for 'list' actions instead of stonith-timeout Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'list' action before giving up. The maximum number of times to try the 'list' command within the timeout period Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'monitor' action. An alternate command to run instead of 'monitor' Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'monitor' actions. Specify an alternate timeout to use for 'monitor' actions instead of stonith-timeout Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'monitor' action before giving up. The maximum number of times to try the 'monitor' command within the timeout period Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'status' action. An alternate command to run instead of 'status' Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'status' actions. Specify an alternate timeout to use for 'status' actions instead of stonith-timeout Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'status' action before giving up. The maximum number of times to try the 'status' command within the timeout period =#=#=#= End test: List all available fencing parameters (XML) - OK (0) =#=#=#= * Passed: crm_resource - List all available fencing parameters (XML) =#=#=#= Begin test: Create a resource =#=#=#= =#=#=#= Current cib after: Create a resource =#=#=#= =#=#=#= End test: Create a resource - OK (0) =#=#=#= * Passed: cibadmin - Create a resource =#=#=#= Begin test: crm_resource given both -r and resource config =#=#=#= crm_resource: --resource cannot be used with --class, --agent, and --provider =#=#=#= End test: crm_resource given both -r and resource config - Incorrect usage (64) =#=#=#= * Passed: crm_resource - crm_resource given both -r and resource config =#=#=#= Begin test: crm_resource given resource config with invalid action =#=#=#= crm_resource: --class, --agent, and --provider can only be used with --validate and --force-* =#=#=#= End test: crm_resource given resource config with invalid action - Incorrect usage (64) =#=#=#= * Passed: crm_resource - crm_resource given resource config with invalid action =#=#=#= Begin test: Create a resource meta attribute =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Set 'dummy' option: id=dummy-meta_attributes-is-managed set=dummy-meta_attributes name=is-managed value=false =#=#=#= Current cib after: Create a resource meta attribute =#=#=#= =#=#=#= End test: Create a resource meta attribute - OK (0) =#=#=#= * Passed: crm_resource - Create a resource meta attribute =#=#=#= Begin test: Query a resource meta attribute =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity false =#=#=#= Current cib after: Query a resource meta attribute =#=#=#= =#=#=#= End test: Query a resource meta attribute - OK (0) =#=#=#= * Passed: crm_resource - Query a resource meta attribute =#=#=#= Begin test: Remove a resource meta attribute =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Deleted 'dummy' option: id=dummy-meta_attributes-is-managed name=is-managed =#=#=#= Current cib after: Remove a resource meta attribute =#=#=#= =#=#=#= End test: Remove a resource meta attribute - OK (0) =#=#=#= * Passed: crm_resource - Remove a resource meta attribute =#=#=#= Begin test: Create another resource meta attribute (XML) =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity =#=#=#= End test: Create another resource meta attribute (XML) - OK (0) =#=#=#= * Passed: crm_resource - Create another resource meta attribute (XML) =#=#=#= Begin test: Show why a resource is not running (XML) =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity =#=#=#= End test: Show why a resource is not running (XML) - OK (0) =#=#=#= * Passed: crm_resource - Show why a resource is not running (XML) =#=#=#= Begin test: Remove another resource meta attribute (XML) =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity =#=#=#= End test: Remove another resource meta attribute (XML) - OK (0) =#=#=#= * Passed: crm_resource - Remove another resource meta attribute (XML) =#=#=#= Begin test: Get a non-existent attribute from a resource element (XML) =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Attribute 'nonexistent' not found for 'dummy' =#=#=#= End test: Get a non-existent attribute from a resource element (XML) - OK (0) =#=#=#= * Passed: crm_resource - Get a non-existent attribute from a resource element (XML) =#=#=#= Begin test: Get a non-existent attribute from a resource element =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Attribute 'nonexistent' not found for 'dummy' =#=#=#= Current cib after: Get a non-existent attribute from a resource element =#=#=#= =#=#=#= End test: Get a non-existent attribute from a resource element - OK (0) =#=#=#= * Passed: crm_resource - Get a non-existent attribute from a resource element =#=#=#= Begin test: Get a non-existent attribute from a resource element (XML) =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Attribute 'nonexistent' not found for 'dummy' =#=#=#= Current cib after: Get a non-existent attribute from a resource element (XML) =#=#=#= =#=#=#= End test: Get a non-existent attribute from a resource element (XML) - OK (0) =#=#=#= * Passed: crm_resource - Get a non-existent attribute from a resource element (XML) =#=#=#= Begin test: Get an existent attribute from a resource element =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity ocf =#=#=#= Current cib after: Get an existent attribute from a resource element =#=#=#= =#=#=#= End test: Get an existent attribute from a resource element - OK (0) =#=#=#= * Passed: crm_resource - Get an existent attribute from a resource element =#=#=#= Begin test: Set a non-existent attribute for a resource element (XML) =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity =#=#=#= Current cib after: Set a non-existent attribute for a resource element (XML) =#=#=#= =#=#=#= End test: Set a non-existent attribute for a resource element (XML) - OK (0) =#=#=#= * Passed: crm_resource - Set a non-existent attribute for a resource element (XML) =#=#=#= Begin test: Set an existent attribute for a resource element (XML) =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity =#=#=#= Current cib after: Set an existent attribute for a resource element (XML) =#=#=#= =#=#=#= End test: Set an existent attribute for a resource element (XML) - OK (0) =#=#=#= * Passed: crm_resource - Set an existent attribute for a resource element (XML) =#=#=#= Begin test: Delete an existent attribute for a resource element (XML) =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity =#=#=#= Current cib after: Delete an existent attribute for a resource element (XML) =#=#=#= =#=#=#= End test: Delete an existent attribute for a resource element (XML) - OK (0) =#=#=#= * Passed: crm_resource - Delete an existent attribute for a resource element (XML) =#=#=#= Begin test: Delete a non-existent attribute for a resource element (XML) =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity =#=#=#= Current cib after: Delete a non-existent attribute for a resource element (XML) =#=#=#= =#=#=#= End test: Delete a non-existent attribute for a resource element (XML) - OK (0) =#=#=#= * Passed: crm_resource - Delete a non-existent attribute for a resource element (XML) =#=#=#= Begin test: Set a non-existent attribute for a resource element =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Set attribute: name=description value=test_description =#=#=#= Current cib after: Set a non-existent attribute for a resource element =#=#=#= =#=#=#= End test: Set a non-existent attribute for a resource element - OK (0) =#=#=#= * Passed: crm_resource - Set a non-existent attribute for a resource element =#=#=#= Begin test: Set an existent attribute for a resource element =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Set attribute: name=description value=test_description =#=#=#= Current cib after: Set an existent attribute for a resource element =#=#=#= =#=#=#= End test: Set an existent attribute for a resource element - OK (0) =#=#=#= * Passed: crm_resource - Set an existent attribute for a resource element =#=#=#= Begin test: Delete an existent attribute for a resource element =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Deleted attribute: description =#=#=#= Current cib after: Delete an existent attribute for a resource element =#=#=#= =#=#=#= End test: Delete an existent attribute for a resource element - OK (0) =#=#=#= * Passed: crm_resource - Delete an existent attribute for a resource element =#=#=#= Begin test: Delete a non-existent attribute for a resource element =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Deleted attribute: description =#=#=#= Current cib after: Delete a non-existent attribute for a resource element =#=#=#= =#=#=#= End test: Delete a non-existent attribute for a resource element - OK (0) =#=#=#= * Passed: crm_resource - Delete a non-existent attribute for a resource element =#=#=#= Begin test: Create a resource attribute =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Set 'dummy' option: id=dummy-instance_attributes-delay set=dummy-instance_attributes name=delay value=10s =#=#=#= Current cib after: Create a resource attribute =#=#=#= =#=#=#= End test: Create a resource attribute - OK (0) =#=#=#= * Passed: crm_resource - Create a resource attribute =#=#=#= Begin test: List the configured resources =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Full List of Resources: * dummy (ocf:pacemaker:Dummy): Stopped =#=#=#= Current cib after: List the configured resources =#=#=#= =#=#=#= End test: List the configured resources - OK (0) =#=#=#= * Passed: crm_resource - List the configured resources =#=#=#= Begin test: List the configured resources (XML) =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity =#=#=#= Current cib after: List the configured resources (XML) =#=#=#= =#=#=#= End test: List the configured resources (XML) - OK (0) =#=#=#= * Passed: crm_resource - List the configured resources (XML) =#=#=#= Begin test: Implicitly list the configured resources =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Full List of Resources: * dummy (ocf:pacemaker:Dummy): Stopped =#=#=#= End test: Implicitly list the configured resources - OK (0) =#=#=#= * Passed: crm_resource - Implicitly list the configured resources =#=#=#= Begin test: List IDs of instantiated resources =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity dummy =#=#=#= End test: List IDs of instantiated resources - OK (0) =#=#=#= * Passed: crm_resource - List IDs of instantiated resources =#=#=#= Begin test: Show XML configuration of resource =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity dummy (ocf:pacemaker:Dummy): Stopped Resource XML: =#=#=#= End test: Show XML configuration of resource - OK (0) =#=#=#= * Passed: crm_resource - Show XML configuration of resource =#=#=#= Begin test: Show XML configuration of resource (XML) =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity ]]> =#=#=#= End test: Show XML configuration of resource (XML) - OK (0) =#=#=#= * Passed: crm_resource - Show XML configuration of resource (XML) =#=#=#= Begin test: Require a destination when migrating a resource that is stopped =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity crm_resource: Resource 'dummy' not moved: active in 0 locations. To prevent 'dummy' from running on a specific location, specify a node. =#=#=#= Current cib after: Require a destination when migrating a resource that is stopped =#=#=#= =#=#=#= End test: Require a destination when migrating a resource that is stopped - Incorrect usage (64) =#=#=#= * Passed: crm_resource - Require a destination when migrating a resource that is stopped =#=#=#= Begin test: Don't support migration to non-existent locations =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity crm_resource: Node 'i.do.not.exist' not found Error performing operation: No such object =#=#=#= Current cib after: Don't support migration to non-existent locations =#=#=#= =#=#=#= End test: Don't support migration to non-existent locations - No such object (105) =#=#=#= * Passed: crm_resource - Don't support migration to non-existent locations =#=#=#= Begin test: Create a fencing resource =#=#=#= =#=#=#= Current cib after: Create a fencing resource =#=#=#= =#=#=#= End test: Create a fencing resource - OK (0) =#=#=#= * Passed: cibadmin - Create a fencing resource =#=#=#= Begin test: Bring resources online =#=#=#= Current cluster status: * Node List: * Online: [ node1 ] * Full List of Resources: * dummy (ocf:pacemaker:Dummy): Stopped * Fence (stonith:fence_true): Stopped Transition Summary: * Start dummy ( node1 ) * Start Fence ( node1 ) Executing Cluster Transition: * Resource action: dummy monitor on node1 * Resource action: Fence monitor on node1 * Resource action: dummy start on node1 * Resource action: Fence start on node1 Revised Cluster Status: * Node List: * Online: [ node1 ] * Full List of Resources: * dummy (ocf:pacemaker:Dummy): Started node1 * Fence (stonith:fence_true): Started node1 =#=#=#= Current cib after: Bring resources online =#=#=#= =#=#=#= End test: Bring resources online - OK (0) =#=#=#= * Passed: crm_simulate - Bring resources online =#=#=#= Begin test: Try to move a resource to its existing location =#=#=#= crm_resource: Error performing operation: Requested item already exists =#=#=#= Current cib after: Try to move a resource to its existing location =#=#=#= =#=#=#= End test: Try to move a resource to its existing location - Requested item already exists (108) =#=#=#= * Passed: crm_resource - Try to move a resource to its existing location =#=#=#= Begin test: Try to move a resource that doesn't exist =#=#=#= crm_resource: Resource 'xyz' not found Error performing operation: No such object =#=#=#= End test: Try to move a resource that doesn't exist - No such object (105) =#=#=#= * Passed: crm_resource - Try to move a resource that doesn't exist =#=#=#= Begin test: Move a resource from its existing location =#=#=#= WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score of -INFINITY for resource dummy on node1. This will prevent dummy from running on node1 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool. This will be the case even if node1 is the last node in the cluster =#=#=#= Current cib after: Move a resource from its existing location =#=#=#= =#=#=#= End test: Move a resource from its existing location - OK (0) =#=#=#= * Passed: crm_resource - Move a resource from its existing location =#=#=#= Begin test: Clear out constraints generated by --move =#=#=#= Removing constraint: cli-ban-dummy-on-node1 =#=#=#= Current cib after: Clear out constraints generated by --move =#=#=#= =#=#=#= End test: Clear out constraints generated by --move - OK (0) =#=#=#= * Passed: crm_resource - Clear out constraints generated by --move =#=#=#= Begin test: Ban a resource on unknown node =#=#=#= crm_resource: Node 'host1' not found Error performing operation: No such object =#=#=#= End test: Ban a resource on unknown node - No such object (105) =#=#=#= * Passed: crm_resource - Ban a resource on unknown node =#=#=#= Begin test: Create two more nodes and bring them online =#=#=#= Current cluster status: * Node List: * Online: [ node1 ] * Full List of Resources: * dummy (ocf:pacemaker:Dummy): Started node1 * Fence (stonith:fence_true): Started node1 Performing Requested Modifications: * Bringing node node2 online * Bringing node node3 online Transition Summary: * Move Fence ( node1 -> node2 ) Executing Cluster Transition: * Resource action: dummy monitor on node3 * Resource action: dummy monitor on node2 * Resource action: Fence stop on node1 * Resource action: Fence monitor on node3 * Resource action: Fence monitor on node2 * Resource action: Fence start on node2 Revised Cluster Status: * Node List: * Online: [ node1 node2 node3 ] * Full List of Resources: * dummy (ocf:pacemaker:Dummy): Started node1 * Fence (stonith:fence_true): Started node2 =#=#=#= Current cib after: Create two more nodes and bring them online =#=#=#= =#=#=#= End test: Create two more nodes and bring them online - OK (0) =#=#=#= * Passed: crm_simulate - Create two more nodes and bring them online =#=#=#= Begin test: Ban dummy from node1 =#=#=#= WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score of -INFINITY for resource dummy on node1. This will prevent dummy from running on node1 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool. This will be the case even if node1 is the last node in the cluster =#=#=#= Current cib after: Ban dummy from node1 =#=#=#= =#=#=#= End test: Ban dummy from node1 - OK (0) =#=#=#= * Passed: crm_resource - Ban dummy from node1 =#=#=#= Begin test: Show where a resource is running =#=#=#= resource dummy is running on: node1 =#=#=#= End test: Show where a resource is running - OK (0) =#=#=#= * Passed: crm_resource - Show where a resource is running =#=#=#= Begin test: Show constraints on a resource =#=#=#= Locations: * Node node1 (score=-INFINITY, id=cli-ban-dummy-on-node1, rsc=dummy) =#=#=#= End test: Show constraints on a resource - OK (0) =#=#=#= * Passed: crm_resource - Show constraints on a resource =#=#=#= Begin test: Ban dummy from node2 (XML) =#=#=#= =#=#=#= Current cib after: Ban dummy from node2 (XML) =#=#=#= =#=#=#= End test: Ban dummy from node2 (XML) - OK (0) =#=#=#= * Passed: crm_resource - Ban dummy from node2 (XML) =#=#=#= Begin test: Relocate resources due to ban =#=#=#= Current cluster status: * Node List: * Online: [ node1 node2 node3 ] * Full List of Resources: * dummy (ocf:pacemaker:Dummy): Started node1 * Fence (stonith:fence_true): Started node2 Transition Summary: * Move dummy ( node1 -> node3 ) Executing Cluster Transition: * Resource action: dummy stop on node1 * Resource action: dummy start on node3 Revised Cluster Status: * Node List: * Online: [ node1 node2 node3 ] * Full List of Resources: * dummy (ocf:pacemaker:Dummy): Started node3 * Fence (stonith:fence_true): Started node2 =#=#=#= Current cib after: Relocate resources due to ban =#=#=#= =#=#=#= End test: Relocate resources due to ban - OK (0) =#=#=#= * Passed: crm_simulate - Relocate resources due to ban =#=#=#= Begin test: Move dummy to node1 (XML) =#=#=#= =#=#=#= Current cib after: Move dummy to node1 (XML) =#=#=#= =#=#=#= End test: Move dummy to node1 (XML) - OK (0) =#=#=#= * Passed: crm_resource - Move dummy to node1 (XML) =#=#=#= Begin test: Clear implicit constraints for dummy on node2 =#=#=#= Removing constraint: cli-ban-dummy-on-node2 =#=#=#= Current cib after: Clear implicit constraints for dummy on node2 =#=#=#= =#=#=#= End test: Clear implicit constraints for dummy on node2 - OK (0) =#=#=#= * Passed: crm_resource - Clear implicit constraints for dummy on node2 =#=#=#= Begin test: Drop the status section =#=#=#= =#=#=#= End test: Drop the status section - OK (0) =#=#=#= * Passed: cibadmin - Drop the status section =#=#=#= Begin test: Create a clone =#=#=#= =#=#=#= End test: Create a clone - OK (0) =#=#=#= * Passed: cibadmin - Create a clone =#=#=#= Begin test: Create a resource meta attribute =#=#=#= Performing update of 'is-managed' on 'test-clone', the parent of 'test-primitive' Set 'test-clone' option: id=test-clone-meta_attributes-is-managed set=test-clone-meta_attributes name=is-managed value=false =#=#=#= Current cib after: Create a resource meta attribute =#=#=#= =#=#=#= End test: Create a resource meta attribute - OK (0) =#=#=#= * Passed: crm_resource - Create a resource meta attribute =#=#=#= Begin test: Create a resource meta attribute in the primitive =#=#=#= Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed set=test-primitive-meta_attributes name=is-managed value=false =#=#=#= Current cib after: Create a resource meta attribute in the primitive =#=#=#= =#=#=#= End test: Create a resource meta attribute in the primitive - OK (0) =#=#=#= * Passed: crm_resource - Create a resource meta attribute in the primitive =#=#=#= Begin test: Update resource meta attribute with duplicates =#=#=#= Multiple attributes match name=is-managed Value: false (id=test-primitive-meta_attributes-is-managed) Value: false (id=test-clone-meta_attributes-is-managed) A value for 'is-managed' already exists in child 'test-primitive', performing update on that instead of 'test-clone' Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed value=true =#=#=#= Current cib after: Update resource meta attribute with duplicates =#=#=#= =#=#=#= End test: Update resource meta attribute with duplicates - OK (0) =#=#=#= * Passed: crm_resource - Update resource meta attribute with duplicates =#=#=#= Begin test: Update resource meta attribute with duplicates (force clone) =#=#=#= Set 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-managed value=true =#=#=#= Current cib after: Update resource meta attribute with duplicates (force clone) =#=#=#= =#=#=#= End test: Update resource meta attribute with duplicates (force clone) - OK (0) =#=#=#= * Passed: crm_resource - Update resource meta attribute with duplicates (force clone) =#=#=#= Begin test: Update child resource meta attribute with duplicates =#=#=#= Multiple attributes match name=is-managed Value: true (id=test-primitive-meta_attributes-is-managed) Value: true (id=test-clone-meta_attributes-is-managed) Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed value=false =#=#=#= Current cib after: Update child resource meta attribute with duplicates =#=#=#= =#=#=#= End test: Update child resource meta attribute with duplicates - OK (0) =#=#=#= * Passed: crm_resource - Update child resource meta attribute with duplicates =#=#=#= Begin test: Delete resource meta attribute with duplicates =#=#=#= Multiple attributes match name=is-managed Value: false (id=test-primitive-meta_attributes-is-managed) Value: true (id=test-clone-meta_attributes-is-managed) A value for 'is-managed' already exists in child 'test-primitive', performing delete on that instead of 'test-clone' Deleted 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed =#=#=#= Current cib after: Delete resource meta attribute with duplicates =#=#=#= =#=#=#= End test: Delete resource meta attribute with duplicates - OK (0) =#=#=#= * Passed: crm_resource - Delete resource meta attribute with duplicates =#=#=#= Begin test: Delete resource meta attribute in parent =#=#=#= Performing delete of 'is-managed' on 'test-clone', the parent of 'test-primitive' Deleted 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-managed =#=#=#= Current cib after: Delete resource meta attribute in parent =#=#=#= =#=#=#= End test: Delete resource meta attribute in parent - OK (0) =#=#=#= * Passed: crm_resource - Delete resource meta attribute in parent =#=#=#= Begin test: Create a resource meta attribute in the primitive =#=#=#= Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed set=test-primitive-meta_attributes name=is-managed value=false =#=#=#= Current cib after: Create a resource meta attribute in the primitive =#=#=#= =#=#=#= End test: Create a resource meta attribute in the primitive - OK (0) =#=#=#= * Passed: crm_resource - Create a resource meta attribute in the primitive =#=#=#= Begin test: Update existing resource meta attribute =#=#=#= A value for 'is-managed' already exists in child 'test-primitive', performing update on that instead of 'test-clone' Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed value=true =#=#=#= Current cib after: Update existing resource meta attribute =#=#=#= =#=#=#= End test: Update existing resource meta attribute - OK (0) =#=#=#= * Passed: crm_resource - Update existing resource meta attribute =#=#=#= Begin test: Create a resource meta attribute in the parent =#=#=#= Set 'test-clone' option: id=test-clone-meta_attributes-is-managed set=test-clone-meta_attributes name=is-managed value=true =#=#=#= Current cib after: Create a resource meta attribute in the parent =#=#=#= =#=#=#= End test: Create a resource meta attribute in the parent - OK (0) =#=#=#= * Passed: crm_resource - Create a resource meta attribute in the parent =#=#=#= Begin test: Delete resource parent meta attribute (force) =#=#=#= Deleted 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-managed =#=#=#= Current cib after: Delete resource parent meta attribute (force) =#=#=#= =#=#=#= End test: Delete resource parent meta attribute (force) - OK (0) =#=#=#= * Passed: crm_resource - Delete resource parent meta attribute (force) =#=#=#= Begin test: Delete resource child meta attribute =#=#=#= Multiple attributes match name=is-managed Value: true (id=test-primitive-meta_attributes-is-managed) Value: true (id=test-clone-meta_attributes-is-managed) Deleted 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed =#=#=#= Current cib after: Delete resource child meta attribute =#=#=#= =#=#=#= End test: Delete resource child meta attribute - OK (0) =#=#=#= * Passed: crm_resource - Delete resource child meta attribute =#=#=#= Begin test: Create the dummy-group resource group =#=#=#= =#=#=#= Current cib after: Create the dummy-group resource group =#=#=#= =#=#=#= End test: Create the dummy-group resource group - OK (0) =#=#=#= * Passed: cibadmin - Create the dummy-group resource group =#=#=#= Begin test: Create a resource meta attribute in dummy1 =#=#=#= Set 'dummy1' option: id=dummy1-meta_attributes-is-managed set=dummy1-meta_attributes name=is-managed value=true =#=#=#= Current cib after: Create a resource meta attribute in dummy1 =#=#=#= =#=#=#= End test: Create a resource meta attribute in dummy1 - OK (0) =#=#=#= * Passed: crm_resource - Create a resource meta attribute in dummy1 =#=#=#= Begin test: Create a resource meta attribute in dummy-group =#=#=#= Set 'dummy1' option: id=dummy1-meta_attributes-is-managed name=is-managed value=false Set 'dummy-group' option: id=dummy-group-meta_attributes-is-managed set=dummy-group-meta_attributes name=is-managed value=false =#=#=#= Current cib after: Create a resource meta attribute in dummy-group =#=#=#= =#=#=#= End test: Create a resource meta attribute in dummy-group - OK (0) =#=#=#= * Passed: crm_resource - Create a resource meta attribute in dummy-group =#=#=#= Begin test: Delete the dummy-group resource group =#=#=#= =#=#=#= Current cib after: Delete the dummy-group resource group =#=#=#= =#=#=#= End test: Delete the dummy-group resource group - OK (0) =#=#=#= * Passed: cibadmin - Delete the dummy-group resource group =#=#=#= Begin test: Specify a lifetime when moving a resource =#=#=#= Migration will take effect until: =#=#=#= Current cib after: Specify a lifetime when moving a resource =#=#=#= =#=#=#= End test: Specify a lifetime when moving a resource - OK (0) =#=#=#= * Passed: crm_resource - Specify a lifetime when moving a resource =#=#=#= Begin test: Try to move a resource previously moved with a lifetime =#=#=#= =#=#=#= Current cib after: Try to move a resource previously moved with a lifetime =#=#=#= =#=#=#= End test: Try to move a resource previously moved with a lifetime - OK (0) =#=#=#= * Passed: crm_resource - Try to move a resource previously moved with a lifetime =#=#=#= Begin test: Ban dummy from node1 for a short time =#=#=#= Migration will take effect until: WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score of -INFINITY for resource dummy on node1. This will prevent dummy from running on node1 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool. This will be the case even if node1 is the last node in the cluster =#=#=#= Current cib after: Ban dummy from node1 for a short time =#=#=#= =#=#=#= End test: Ban dummy from node1 for a short time - OK (0) =#=#=#= * Passed: crm_resource - Ban dummy from node1 for a short time =#=#=#= Begin test: Remove expired constraints =#=#=#= Removing constraint: cli-ban-dummy-on-node1 =#=#=#= Current cib after: Remove expired constraints =#=#=#= =#=#=#= End test: Remove expired constraints - OK (0) =#=#=#= * Passed: sleep - Remove expired constraints =#=#=#= Begin test: Clear all implicit constraints for dummy =#=#=#= Removing constraint: cli-prefer-dummy =#=#=#= Current cib after: Clear all implicit constraints for dummy =#=#=#= =#=#=#= End test: Clear all implicit constraints for dummy - OK (0) =#=#=#= * Passed: crm_resource - Clear all implicit constraints for dummy =#=#=#= Begin test: Set a node health strategy =#=#=#= =#=#=#= Current cib after: Set a node health strategy =#=#=#= =#=#=#= End test: Set a node health strategy - OK (0) =#=#=#= * Passed: crm_attribute - Set a node health strategy =#=#=#= Begin test: Set a node health attribute =#=#=#= =#=#=#= Current cib after: Set a node health attribute =#=#=#= =#=#=#= End test: Set a node health attribute - OK (0) =#=#=#= * Passed: crm_attribute - Set a node health attribute =#=#=#= Begin test: Show why a resource is not running on an unhealthy node (XML) =#=#=#= =#=#=#= End test: Show why a resource is not running on an unhealthy node (XML) - OK (0) =#=#=#= * Passed: crm_resource - Show why a resource is not running on an unhealthy node (XML) =#=#=#= Begin test: Delete a resource =#=#=#= =#=#=#= Current cib after: Delete a resource =#=#=#= =#=#=#= End test: Delete a resource - OK (0) =#=#=#= * Passed: crm_resource - Delete a resource =#=#=#= Begin test: Check locations and constraints for prim1 =#=#=#= =#=#=#= End test: Check locations and constraints for prim1 - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim1 =#=#=#= Begin test: Check locations and constraints for prim1 (XML) =#=#=#= =#=#=#= End test: Check locations and constraints for prim1 (XML) - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim1 (XML) =#=#=#= Begin test: Recursively check locations and constraints for prim1 =#=#=#= =#=#=#= End test: Recursively check locations and constraints for prim1 - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim1 =#=#=#= Begin test: Recursively check locations and constraints for prim1 (XML) =#=#=#= =#=#=#= End test: Recursively check locations and constraints for prim1 (XML) - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim1 (XML) =#=#=#= Begin test: Check locations and constraints for prim2 =#=#=#= Locations: * Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2) Resources prim2 is colocated with: * prim3 (score=INFINITY, id=colocation-prim2-prim3-INFINITY) =#=#=#= End test: Check locations and constraints for prim2 - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim2 =#=#=#= Begin test: Check locations and constraints for prim2 (XML) =#=#=#= =#=#=#= End test: Check locations and constraints for prim2 (XML) - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim2 (XML) =#=#=#= Begin test: Recursively check locations and constraints for prim2 =#=#=#= Locations: * Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2) Resources prim2 is colocated with: * prim3 (score=INFINITY, id=colocation-prim2-prim3-INFINITY) * Resources prim3 is colocated with: * prim4 (score=INFINITY, id=colocation-prim3-prim4-INFINITY) * Locations: * Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4) * Resources prim4 is colocated with: * prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY) =#=#=#= End test: Recursively check locations and constraints for prim2 - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim2 =#=#=#= Begin test: Recursively check locations and constraints for prim2 (XML) =#=#=#= =#=#=#= End test: Recursively check locations and constraints for prim2 (XML) - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim2 (XML) =#=#=#= Begin test: Check locations and constraints for prim3 =#=#=#= Resources colocated with prim3: * prim2 (score=INFINITY, id=colocation-prim2-prim3-INFINITY) * Locations: * Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2) Resources prim3 is colocated with: * prim4 (score=INFINITY, id=colocation-prim3-prim4-INFINITY) * Locations: * Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4) =#=#=#= End test: Check locations and constraints for prim3 - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim3 =#=#=#= Begin test: Check locations and constraints for prim3 (XML) =#=#=#= =#=#=#= End test: Check locations and constraints for prim3 (XML) - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim3 (XML) =#=#=#= Begin test: Recursively check locations and constraints for prim3 =#=#=#= Resources colocated with prim3: * prim2 (score=INFINITY, id=colocation-prim2-prim3-INFINITY) * Locations: * Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2) Resources prim3 is colocated with: * prim4 (score=INFINITY, id=colocation-prim3-prim4-INFINITY) * Locations: * Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4) * Resources prim4 is colocated with: * prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY) =#=#=#= End test: Recursively check locations and constraints for prim3 - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim3 =#=#=#= Begin test: Recursively check locations and constraints for prim3 (XML) =#=#=#= =#=#=#= End test: Recursively check locations and constraints for prim3 (XML) - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim3 (XML) =#=#=#= Begin test: Check locations and constraints for prim4 =#=#=#= Locations: * Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4) Resources colocated with prim4: * prim10 (score=INFINITY, id=colocation-prim10-prim4-INFINITY) * prim3 (score=INFINITY, id=colocation-prim3-prim4-INFINITY) Resources prim4 is colocated with: * prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY) =#=#=#= End test: Check locations and constraints for prim4 - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim4 =#=#=#= Begin test: Check locations and constraints for prim4 (XML) =#=#=#= =#=#=#= End test: Check locations and constraints for prim4 (XML) - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim4 (XML) =#=#=#= Begin test: Recursively check locations and constraints for prim4 =#=#=#= Locations: * Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4) Resources colocated with prim4: * prim10 (score=INFINITY, id=colocation-prim10-prim4-INFINITY) * prim3 (score=INFINITY, id=colocation-prim3-prim4-INFINITY) * Resources colocated with prim3: * prim2 (score=INFINITY, id=colocation-prim2-prim3-INFINITY) * Locations: * Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2) Resources prim4 is colocated with: * prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY) =#=#=#= End test: Recursively check locations and constraints for prim4 - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim4 =#=#=#= Begin test: Recursively check locations and constraints for prim4 (XML) =#=#=#= =#=#=#= End test: Recursively check locations and constraints for prim4 (XML) - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim4 (XML) =#=#=#= Begin test: Check locations and constraints for prim5 =#=#=#= Resources colocated with prim5: * prim4 (score=INFINITY, id=colocation-prim4-prim5-INFINITY) * Locations: * Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4) =#=#=#= End test: Check locations and constraints for prim5 - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim5 =#=#=#= Begin test: Check locations and constraints for prim5 (XML) =#=#=#= =#=#=#= End test: Check locations and constraints for prim5 (XML) - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim5 (XML) =#=#=#= Begin test: Recursively check locations and constraints for prim5 =#=#=#= Resources colocated with prim5: * prim4 (score=INFINITY, id=colocation-prim4-prim5-INFINITY) * Locations: * Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4) * Resources colocated with prim4: * prim10 (score=INFINITY, id=colocation-prim10-prim4-INFINITY) * prim3 (score=INFINITY, id=colocation-prim3-prim4-INFINITY) * Resources colocated with prim3: * prim2 (score=INFINITY, id=colocation-prim2-prim3-INFINITY) * Locations: * Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2) =#=#=#= End test: Recursively check locations and constraints for prim5 - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim5 =#=#=#= Begin test: Recursively check locations and constraints for prim5 (XML) =#=#=#= =#=#=#= End test: Recursively check locations and constraints for prim5 (XML) - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim5 (XML) =#=#=#= Begin test: Check locations and constraints for prim6 =#=#=#= Locations: * Node cluster02 (score=-INFINITY, id=prim6-not-on-cluster2, rsc=prim6) =#=#=#= End test: Check locations and constraints for prim6 - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim6 =#=#=#= Begin test: Check locations and constraints for prim6 (XML) =#=#=#= =#=#=#= End test: Check locations and constraints for prim6 (XML) - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim6 (XML) =#=#=#= Begin test: Recursively check locations and constraints for prim6 =#=#=#= Locations: * Node cluster02 (score=-INFINITY, id=prim6-not-on-cluster2, rsc=prim6) =#=#=#= End test: Recursively check locations and constraints for prim6 - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim6 =#=#=#= Begin test: Recursively check locations and constraints for prim6 (XML) =#=#=#= =#=#=#= End test: Recursively check locations and constraints for prim6 (XML) - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim6 (XML) =#=#=#= Begin test: Check locations and constraints for prim7 =#=#=#= Resources prim7 is colocated with: * group (score=INFINITY, id=colocation-prim7-group-INFINITY) =#=#=#= End test: Check locations and constraints for prim7 - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim7 =#=#=#= Begin test: Check locations and constraints for prim7 (XML) =#=#=#= =#=#=#= End test: Check locations and constraints for prim7 (XML) - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim7 (XML) =#=#=#= Begin test: Recursively check locations and constraints for prim7 =#=#=#= Resources prim7 is colocated with: * group (score=INFINITY, id=colocation-prim7-group-INFINITY) =#=#=#= End test: Recursively check locations and constraints for prim7 - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim7 =#=#=#= Begin test: Recursively check locations and constraints for prim7 (XML) =#=#=#= =#=#=#= End test: Recursively check locations and constraints for prim7 (XML) - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim7 (XML) =#=#=#= Begin test: Check locations and constraints for prim8 =#=#=#= Resources prim8 is colocated with: * gr2 (score=INFINITY, id=colocation-prim8-gr2-INFINITY) =#=#=#= End test: Check locations and constraints for prim8 - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim8 =#=#=#= Begin test: Check locations and constraints for prim8 (XML) =#=#=#= =#=#=#= End test: Check locations and constraints for prim8 (XML) - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim8 (XML) =#=#=#= Begin test: Recursively check locations and constraints for prim8 =#=#=#= Resources prim8 is colocated with: * gr2 (score=INFINITY, id=colocation-prim8-gr2-INFINITY) =#=#=#= End test: Recursively check locations and constraints for prim8 - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim8 =#=#=#= Begin test: Recursively check locations and constraints for prim8 (XML) =#=#=#= =#=#=#= End test: Recursively check locations and constraints for prim8 (XML) - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim8 (XML) =#=#=#= Begin test: Check locations and constraints for prim9 =#=#=#= Resources prim9 is colocated with: * clone (score=INFINITY, id=colocation-prim9-clone-INFINITY) =#=#=#= End test: Check locations and constraints for prim9 - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim9 =#=#=#= Begin test: Check locations and constraints for prim9 (XML) =#=#=#= =#=#=#= End test: Check locations and constraints for prim9 (XML) - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim9 (XML) =#=#=#= Begin test: Recursively check locations and constraints for prim9 =#=#=#= Resources prim9 is colocated with: * clone (score=INFINITY, id=colocation-prim9-clone-INFINITY) =#=#=#= End test: Recursively check locations and constraints for prim9 - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim9 =#=#=#= Begin test: Recursively check locations and constraints for prim9 (XML) =#=#=#= =#=#=#= End test: Recursively check locations and constraints for prim9 (XML) - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim9 (XML) =#=#=#= Begin test: Check locations and constraints for prim10 =#=#=#= Resources prim10 is colocated with: * prim4 (score=INFINITY, id=colocation-prim10-prim4-INFINITY) * Locations: * Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4) =#=#=#= End test: Check locations and constraints for prim10 - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim10 =#=#=#= Begin test: Check locations and constraints for prim10 (XML) =#=#=#= =#=#=#= End test: Check locations and constraints for prim10 (XML) - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim10 (XML) =#=#=#= Begin test: Recursively check locations and constraints for prim10 =#=#=#= Resources prim10 is colocated with: * prim4 (score=INFINITY, id=colocation-prim10-prim4-INFINITY) * Locations: * Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4) * Resources prim4 is colocated with: * prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY) =#=#=#= End test: Recursively check locations and constraints for prim10 - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim10 =#=#=#= Begin test: Recursively check locations and constraints for prim10 (XML) =#=#=#= =#=#=#= End test: Recursively check locations and constraints for prim10 (XML) - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim10 (XML) =#=#=#= Begin test: Check locations and constraints for prim11 =#=#=#= Resources colocated with prim11: * prim13 (score=INFINITY, id=colocation-prim13-prim11-INFINITY) Resources prim11 is colocated with: * prim12 (score=INFINITY, id=colocation-prim11-prim12-INFINITY) =#=#=#= End test: Check locations and constraints for prim11 - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim11 =#=#=#= Begin test: Check locations and constraints for prim11 (XML) =#=#=#= =#=#=#= End test: Check locations and constraints for prim11 (XML) - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim11 (XML) =#=#=#= Begin test: Recursively check locations and constraints for prim11 =#=#=#= Resources colocated with prim11: * prim13 (score=INFINITY, id=colocation-prim13-prim11-INFINITY) * Resources colocated with prim13: * prim12 (score=INFINITY, id=colocation-prim12-prim13-INFINITY) * Resources colocated with prim12: * prim11 (id=colocation-prim11-prim12-INFINITY - loop) Resources prim11 is colocated with: * prim12 (score=INFINITY, id=colocation-prim11-prim12-INFINITY) * Resources prim12 is colocated with: * prim13 (score=INFINITY, id=colocation-prim12-prim13-INFINITY) * Resources prim13 is colocated with: * prim11 (id=colocation-prim13-prim11-INFINITY - loop) =#=#=#= End test: Recursively check locations and constraints for prim11 - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim11 =#=#=#= Begin test: Recursively check locations and constraints for prim11 (XML) =#=#=#= =#=#=#= End test: Recursively check locations and constraints for prim11 (XML) - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim11 (XML) =#=#=#= Begin test: Check locations and constraints for prim12 =#=#=#= Resources colocated with prim12: * prim11 (score=INFINITY, id=colocation-prim11-prim12-INFINITY) Resources prim12 is colocated with: * prim13 (score=INFINITY, id=colocation-prim12-prim13-INFINITY) =#=#=#= End test: Check locations and constraints for prim12 - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim12 =#=#=#= Begin test: Check locations and constraints for prim12 (XML) =#=#=#= =#=#=#= End test: Check locations and constraints for prim12 (XML) - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim12 (XML) =#=#=#= Begin test: Recursively check locations and constraints for prim12 =#=#=#= Resources colocated with prim12: * prim11 (score=INFINITY, id=colocation-prim11-prim12-INFINITY) * Resources colocated with prim11: * prim13 (score=INFINITY, id=colocation-prim13-prim11-INFINITY) * Resources colocated with prim13: * prim12 (id=colocation-prim12-prim13-INFINITY - loop) Resources prim12 is colocated with: * prim13 (score=INFINITY, id=colocation-prim12-prim13-INFINITY) * Resources prim13 is colocated with: * prim11 (score=INFINITY, id=colocation-prim13-prim11-INFINITY) * Resources prim11 is colocated with: * prim12 (id=colocation-prim11-prim12-INFINITY - loop) =#=#=#= End test: Recursively check locations and constraints for prim12 - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim12 =#=#=#= Begin test: Recursively check locations and constraints for prim12 (XML) =#=#=#= =#=#=#= End test: Recursively check locations and constraints for prim12 (XML) - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim12 (XML) =#=#=#= Begin test: Check locations and constraints for prim13 =#=#=#= Resources colocated with prim13: * prim12 (score=INFINITY, id=colocation-prim12-prim13-INFINITY) Resources prim13 is colocated with: * prim11 (score=INFINITY, id=colocation-prim13-prim11-INFINITY) =#=#=#= End test: Check locations and constraints for prim13 - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim13 =#=#=#= Begin test: Check locations and constraints for prim13 (XML) =#=#=#= =#=#=#= End test: Check locations and constraints for prim13 (XML) - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim13 (XML) =#=#=#= Begin test: Recursively check locations and constraints for prim13 =#=#=#= Resources colocated with prim13: * prim12 (score=INFINITY, id=colocation-prim12-prim13-INFINITY) * Resources colocated with prim12: * prim11 (score=INFINITY, id=colocation-prim11-prim12-INFINITY) * Resources colocated with prim11: * prim13 (id=colocation-prim13-prim11-INFINITY - loop) Resources prim13 is colocated with: * prim11 (score=INFINITY, id=colocation-prim13-prim11-INFINITY) * Resources prim11 is colocated with: * prim12 (score=INFINITY, id=colocation-prim11-prim12-INFINITY) * Resources prim12 is colocated with: * prim13 (id=colocation-prim12-prim13-INFINITY - loop) =#=#=#= End test: Recursively check locations and constraints for prim13 - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim13 =#=#=#= Begin test: Recursively check locations and constraints for prim13 (XML) =#=#=#= =#=#=#= End test: Recursively check locations and constraints for prim13 (XML) - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim13 (XML) =#=#=#= Begin test: Check locations and constraints for group =#=#=#= Resources colocated with group: * prim7 (score=INFINITY, id=colocation-prim7-group-INFINITY) =#=#=#= End test: Check locations and constraints for group - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for group =#=#=#= Begin test: Check locations and constraints for group (XML) =#=#=#= =#=#=#= End test: Check locations and constraints for group (XML) - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for group (XML) =#=#=#= Begin test: Recursively check locations and constraints for group =#=#=#= Resources colocated with group: * prim7 (score=INFINITY, id=colocation-prim7-group-INFINITY) =#=#=#= End test: Recursively check locations and constraints for group - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for group =#=#=#= Begin test: Recursively check locations and constraints for group (XML) =#=#=#= =#=#=#= End test: Recursively check locations and constraints for group (XML) - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for group (XML) =#=#=#= Begin test: Check locations and constraints for clone =#=#=#= Resources colocated with clone: * prim9 (score=INFINITY, id=colocation-prim9-clone-INFINITY) =#=#=#= End test: Check locations and constraints for clone - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for clone =#=#=#= Begin test: Check locations and constraints for clone (XML) =#=#=#= =#=#=#= End test: Check locations and constraints for clone (XML) - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for clone (XML) =#=#=#= Begin test: Recursively check locations and constraints for clone =#=#=#= Resources colocated with clone: * prim9 (score=INFINITY, id=colocation-prim9-clone-INFINITY) =#=#=#= End test: Recursively check locations and constraints for clone - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for clone =#=#=#= Begin test: Recursively check locations and constraints for clone (XML) =#=#=#= =#=#=#= End test: Recursively check locations and constraints for clone (XML) - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for clone (XML) =#=#=#= Begin test: Check locations and constraints for group member (referring to group) =#=#=#= Resources colocated with group: * prim7 (score=INFINITY, id=colocation-prim7-group-INFINITY) =#=#=#= End test: Check locations and constraints for group member (referring to group) - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for group member (referring to group) =#=#=#= Begin test: Check locations and constraints for group member (without referring to group) =#=#=#= Resources colocated with gr2: * prim8 (score=INFINITY, id=colocation-prim8-gr2-INFINITY) =#=#=#= End test: Check locations and constraints for group member (without referring to group) - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for group member (without referring to group) =#=#=#= Begin test: Set a meta-attribute for primitive and resources colocated with it (XML) =#=#=#= =#=#=#= End test: Set a meta-attribute for primitive and resources colocated with it (XML) - OK (0) =#=#=#= * Passed: crm_resource - Set a meta-attribute for primitive and resources colocated with it (XML) =#=#=#= Begin test: Set a meta-attribute for group and resource colocated with it =#=#=#= Set 'group' option: id=group-meta_attributes-target-role set=group-meta_attributes name=target-role value=Stopped Set 'prim7' option: id=prim7-meta_attributes-target-role set=prim7-meta_attributes name=target-role value=Stopped =#=#=#= End test: Set a meta-attribute for group and resource colocated with it - OK (0) =#=#=#= * Passed: crm_resource - Set a meta-attribute for group and resource colocated with it =#=#=#= Begin test: Set a meta-attribute for clone and resource colocated with it (XML) =#=#=#= =#=#=#= End test: Set a meta-attribute for clone and resource colocated with it (XML) - OK (0) =#=#=#= * Passed: crm_resource - Set a meta-attribute for clone and resource colocated with it (XML) =#=#=#= Begin test: Show resource digests (XML) =#=#=#= =#=#=#= End test: Show resource digests (XML) - OK (0) =#=#=#= * Passed: crm_resource - Show resource digests (XML) =#=#=#= Begin test: Show resource digests with overrides =#=#=#= =#=#=#= End test: Show resource digests with overrides - OK (0) =#=#=#= * Passed: crm_resource - Show resource digests with overrides =#=#=#= Begin test: Show resource operations =#=#=#= -rsc1 (ocf:pacemaker:Dummy): Started: rsc1_monitor_0 (node=node4, call=136, rc=7, exec=28ms): complete -Fencing (stonith:fence_xvm): Started: Fencing_monitor_0 (node=node4, call=5, rc=7, exec=2ms): complete -rsc1 (ocf:pacemaker:Dummy): Started: rsc1_monitor_0 (node=node2, call=101, rc=7, exec=45ms): complete -Fencing (stonith:fence_xvm): Started: Fencing_monitor_0 (node=node2, call=5, rc=7, exec=4ms): complete -Fencing (stonith:fence_xvm): Started: Fencing_monitor_0 (node=node3, call=5, rc=7, exec=24ms): complete -rsc1 (ocf:pacemaker:Dummy): Started: rsc1_monitor_0 (node=node5, call=99, rc=193, exec=27ms): pending -Fencing (stonith:fence_xvm): Started: Fencing_monitor_0 (node=node5, call=5, rc=7, exec=14ms): complete -rsc1 (ocf:pacemaker:Dummy): Started: rsc1_start_0 (node=node1, call=104, rc=0, exec=22ms): complete -rsc1 (ocf:pacemaker:Dummy): Started: rsc1_monitor_10000 (node=node1, call=106, rc=0, exec=20ms): complete -Fencing (stonith:fence_xvm): Started: Fencing_start_0 (node=node1, call=10, rc=0, exec=59ms): complete -Fencing (stonith:fence_xvm): Started: Fencing_monitor_120000 (node=node1, call=12, rc=0, exec=70ms): complete +rsc1 (ocf:pacemaker:Dummy): Started: rsc1_monitor_0 (node=node4, call=136, rc=7, exec=28ms): Done +Fencing (stonith:fence_xvm): Started: Fencing_monitor_0 (node=node4, call=5, rc=7, exec=2ms): Done +rsc1 (ocf:pacemaker:Dummy): Started: rsc1_monitor_0 (node=node2, call=101, rc=7, exec=45ms): Done +Fencing (stonith:fence_xvm): Started: Fencing_monitor_0 (node=node2, call=5, rc=7, exec=4ms): Done +Fencing (stonith:fence_xvm): Started: Fencing_monitor_0 (node=node3, call=5, rc=7, exec=24ms): Done +rsc1 (ocf:pacemaker:Dummy): Started: rsc1_monitor_0 (node=node5, call=99, rc=193, exec=27ms): Pending +Fencing (stonith:fence_xvm): Started: Fencing_monitor_0 (node=node5, call=5, rc=7, exec=14ms): Done +rsc1 (ocf:pacemaker:Dummy): Started: rsc1_start_0 (node=node1, call=104, rc=0, exec=22ms): Done +rsc1 (ocf:pacemaker:Dummy): Started: rsc1_monitor_10000 (node=node1, call=106, rc=0, exec=20ms): Done +Fencing (stonith:fence_xvm): Started: Fencing_start_0 (node=node1, call=10, rc=0, exec=59ms): Done +Fencing (stonith:fence_xvm): Started: Fencing_monitor_120000 (node=node1, call=12, rc=0, exec=70ms): Done =#=#=#= End test: Show resource operations - OK (0) =#=#=#= * Passed: crm_resource - Show resource operations =#=#=#= Begin test: Show resource operations (XML) =#=#=#= - - - - - - - - - - - + + + + + + + + + + + =#=#=#= End test: Show resource operations (XML) - OK (0) =#=#=#= * Passed: crm_resource - Show resource operations (XML) =#=#=#= Begin test: List a promotable clone resource =#=#=#= resource promotable-clone is running on: cluster01 resource promotable-clone is running on: cluster02 Promoted =#=#=#= End test: List a promotable clone resource - OK (0) =#=#=#= * Passed: crm_resource - List a promotable clone resource =#=#=#= Begin test: List a promotable clone resource (XML) =#=#=#= cluster01 cluster02 =#=#=#= End test: List a promotable clone resource (XML) - OK (0) =#=#=#= * Passed: crm_resource - List a promotable clone resource (XML) =#=#=#= Begin test: List the primitive of a promotable clone resource =#=#=#= resource promotable-rsc is running on: cluster01 resource promotable-rsc is running on: cluster02 Promoted =#=#=#= End test: List the primitive of a promotable clone resource - OK (0) =#=#=#= * Passed: crm_resource - List the primitive of a promotable clone resource =#=#=#= Begin test: List the primitive of a promotable clone resource (XML) =#=#=#= cluster01 cluster02 =#=#=#= End test: List the primitive of a promotable clone resource (XML) - OK (0) =#=#=#= * Passed: crm_resource - List the primitive of a promotable clone resource (XML) =#=#=#= Begin test: List a single instance of a promotable clone resource =#=#=#= resource promotable-rsc:0 is running on: cluster02 Promoted =#=#=#= End test: List a single instance of a promotable clone resource - OK (0) =#=#=#= * Passed: crm_resource - List a single instance of a promotable clone resource =#=#=#= Begin test: List a single instance of a promotable clone resource (XML) =#=#=#= cluster02 =#=#=#= End test: List a single instance of a promotable clone resource (XML) - OK (0) =#=#=#= * Passed: crm_resource - List a single instance of a promotable clone resource (XML) =#=#=#= Begin test: List another instance of a promotable clone resource =#=#=#= resource promotable-rsc:1 is running on: cluster01 =#=#=#= End test: List another instance of a promotable clone resource - OK (0) =#=#=#= * Passed: crm_resource - List another instance of a promotable clone resource =#=#=#= Begin test: List another instance of a promotable clone resource (XML) =#=#=#= cluster01 =#=#=#= End test: List another instance of a promotable clone resource (XML) - OK (0) =#=#=#= * Passed: crm_resource - List another instance of a promotable clone resource (XML) =#=#=#= Begin test: Try to move an instance of a cloned resource =#=#=#= crm_resource: Cannot operate on clone resource instance 'promotable-rsc:0' Error performing operation: Invalid parameter =#=#=#= End test: Try to move an instance of a cloned resource - Invalid parameter (2) =#=#=#= * Passed: crm_resource - Try to move an instance of a cloned resource =#=#=#= Begin test: Check that CIB_file="-" works - crm_resource (XML) =#=#=#= =#=#=#= End test: Check that CIB_file="-" works - crm_resource (XML) - OK (0) =#=#=#= * Passed: crm_resource - Check that CIB_file="-" works - crm_resource (XML) diff --git a/include/crm/common/results.h b/include/crm/common/results.h index 2869cb6f3c..2fedb7c736 100644 --- a/include/crm/common/results.h +++ b/include/crm/common/results.h @@ -1,403 +1,403 @@ /* * Copyright 2012-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #ifndef PCMK__CRM_COMMON_RESULTS__H #define PCMK__CRM_COMMON_RESULTS__H #include // gboolean #ifdef __cplusplus extern "C" { #endif /*! * \file * \brief Function and executable result codes * \ingroup core */ // Lifted from config.h /* The _Noreturn keyword of C11. */ #ifndef _Noreturn # if (defined __cplusplus \ && ((201103 <= __cplusplus && !(__GNUC__ == 4 && __GNUC_MINOR__ == 7)) \ || (defined _MSC_VER && 1900 <= _MSC_VER))) # define _Noreturn [[noreturn]] # elif ((!defined __cplusplus || defined __clang__) \ && (201112 <= (defined __STDC_VERSION__ ? __STDC_VERSION__ : 0) \ || 4 < __GNUC__ + (7 <= __GNUC_MINOR__))) /* _Noreturn works as-is. */ # elif 2 < __GNUC__ + (8 <= __GNUC_MINOR__) || 0x5110 <= __SUNPRO_C # define _Noreturn __attribute__ ((__noreturn__)) # elif 1200 <= (defined _MSC_VER ? _MSC_VER : 0) # define _Noreturn __declspec (noreturn) # else # define _Noreturn # endif #endif /* * Function return codes * * Most Pacemaker API functions return an integer return code. There are two * alternative interpretations. The legacy interpration is that the absolute * value of the return code is either a system error number or a custom * pcmk_err_* number. This is less than ideal because system error numbers are * constrained only to the positive int range, so there's the possibility that * system errors and custom errors could collide (which did in fact happen * already on one architecture). The new intepretation is that negative values * are from the pcmk_rc_e enum, and positive values are system error numbers. * Both use 0 for success. * * For system error codes, see: * - /usr/include/asm-generic/errno.h * - /usr/include/asm-generic/errno-base.h */ // Legacy custom return codes for Pacemaker API functions (deprecated) // NOTE: sbd (as of at least 1.5.2) uses this #define pcmk_ok 0 #define PCMK_ERROR_OFFSET 190 /* Replacements on non-linux systems, see include/portability.h */ #define PCMK_CUSTOM_OFFSET 200 /* Purely custom codes */ #define pcmk_err_generic 201 #define pcmk_err_no_quorum 202 #define pcmk_err_schema_validation 203 #define pcmk_err_transform_failed 204 #define pcmk_err_old_data 205 // NOTE: sbd (as of at least 1.5.2) uses this #define pcmk_err_diff_failed 206 // NOTE: sbd (as of at least 1.5.2) uses this #define pcmk_err_diff_resync 207 #define pcmk_err_cib_modified 208 #define pcmk_err_cib_backup 209 #define pcmk_err_cib_save 210 #define pcmk_err_schema_unchanged 211 #define pcmk_err_cib_corrupt 212 #define pcmk_err_multiple 213 #define pcmk_err_node_unknown 214 #define pcmk_err_already 215 /* On HPPA 215 is ENOSYM (Unknown error 215), which hopefully never happens. */ #ifdef __hppa__ #define pcmk_err_bad_nvpair 250 /* 216 is ENOTSOCK */ #define pcmk_err_unknown_format 252 /* 217 is EDESTADDRREQ */ #else #define pcmk_err_bad_nvpair 216 #define pcmk_err_unknown_format 217 #endif /*! * \enum pcmk_rc_e * \brief Return codes for Pacemaker API functions * * Any Pacemaker API function documented as returning a "standard Pacemaker * return code" will return pcmk_rc_ok (0) on success, and one of this * enumeration's other (negative) values or a (positive) system error number * otherwise. The custom codes are at -1001 and lower, so that the caller may * use -1 through -1000 for their own custom values if desired. While generally * referred to as "errors", nonzero values simply indicate a result, which might * or might not be an error depending on the calling context. */ enum pcmk_rc_e { /* When adding new values, use consecutively lower numbers, update the array * in lib/common/results.c, and test with crm_error. */ pcmk_rc_compression = -1039, pcmk_rc_ns_resolution = -1038, pcmk_rc_no_transaction = -1037, pcmk_rc_bad_xml_patch = -1036, pcmk_rc_bad_input = -1035, pcmk_rc_disabled = -1034, pcmk_rc_duplicate_id = -1033, pcmk_rc_unpack_error = -1032, pcmk_rc_invalid_transition = -1031, pcmk_rc_graph_error = -1030, pcmk_rc_dot_error = -1029, pcmk_rc_underflow = -1028, pcmk_rc_no_input = -1027, pcmk_rc_no_output = -1026, pcmk_rc_after_range = -1025, pcmk_rc_within_range = -1024, pcmk_rc_before_range = -1023, pcmk_rc_undetermined = -1022, pcmk_rc_op_unsatisfied = -1021, pcmk_rc_ipc_pid_only = -1020, pcmk_rc_ipc_unresponsive = -1019, pcmk_rc_ipc_unauthorized = -1018, pcmk_rc_no_quorum = -1017, pcmk_rc_schema_validation = -1016, pcmk_rc_schema_unchanged = -1015, pcmk_rc_transform_failed = -1014, pcmk_rc_old_data = -1013, pcmk_rc_diff_failed = -1012, pcmk_rc_diff_resync = -1011, pcmk_rc_cib_modified = -1010, pcmk_rc_cib_backup = -1009, pcmk_rc_cib_save = -1008, pcmk_rc_cib_corrupt = -1007, pcmk_rc_multiple = -1006, pcmk_rc_node_unknown = -1005, pcmk_rc_already = -1004, pcmk_rc_bad_nvpair = -1003, pcmk_rc_unknown_format = -1002, // Developers: Use a more specific code than pcmk_rc_error whenever possible pcmk_rc_error = -1001, // Values -1 through -1000 reserved for caller use // NOTE: sbd (as of at least 1.5.2) uses this pcmk_rc_ok = 0 // Positive values reserved for system error numbers }; /*! * \enum ocf_exitcode * \brief Exit status codes for resource agents * * The OCF Resource Agent API standard enumerates the possible exit status codes * that agents should return. Besides being used with OCF agents, these values * are also used by the executor as a universal status for all agent standards; * actual results are mapped to these before returning them to clients. */ enum ocf_exitcode { PCMK_OCF_OK = 0, //!< Success // NOTE: booth (as of at least 1.1) uses this value PCMK_OCF_UNKNOWN_ERROR = 1, //!< Unspecified error PCMK_OCF_INVALID_PARAM = 2, //!< Parameter invalid (in local context) PCMK_OCF_UNIMPLEMENT_FEATURE = 3, //!< Requested action not implemented PCMK_OCF_INSUFFICIENT_PRIV = 4, //!< Insufficient privileges PCMK_OCF_NOT_INSTALLED = 5, //!< Dependencies not available locally PCMK_OCF_NOT_CONFIGURED = 6, //!< Parameter invalid (inherently) // NOTE: booth (as of at least 1.1) uses this value PCMK_OCF_NOT_RUNNING = 7, //!< Service safely stopped PCMK_OCF_RUNNING_PROMOTED = 8, //!< Service active and promoted PCMK_OCF_FAILED_PROMOTED = 9, //!< Service failed and possibly in promoted role PCMK_OCF_DEGRADED = 190, //!< Service active but more likely to fail soon PCMK_OCF_DEGRADED_PROMOTED = 191, //!< Service promoted but more likely to fail soon /* These two are Pacemaker extensions, not in the OCF standard. The * controller records PCMK_OCF_UNKNOWN for pending actions. * PCMK_OCF_CONNECTION_DIED is used only with older DCs that don't support * PCMK_EXEC_NOT_CONNECTED. */ PCMK_OCF_CONNECTION_DIED = 189, //!< \deprecated See PCMK_EXEC_NOT_CONNECTED PCMK_OCF_UNKNOWN = 193, //!< Action is pending }; // NOTE: sbd (as of at least 1.5.2) uses this /*! * \enum crm_exit_e * \brief Exit status codes for tools and daemons * * We want well-specified (i.e. OS-invariant) exit status codes for our daemons * and applications so they can be relied on by callers. (Function return codes * and errno's do not make good exit statuses.) * * The only hard rule is that exit statuses must be between 0 and 255; all else * is convention. Universally, 0 is success, and 1 is generic error (excluding * OSes we don't support -- for example, OpenVMS considers 1 success!). * * For init scripts, the LSB gives meaning to 0-7, and sets aside 150-199 for * application use. OCF adds 8-9 and 190-191. * * sysexits.h was an attempt to give additional meanings, but never really * caught on. It uses 0 and 64-78. * * Bash reserves 2 ("incorrect builtin usage") and 126-255 (126 is "command * found but not executable", 127 is "command not found", 128 + n is * "interrupted by signal n"). * * tldp.org recommends 64-113 for application use. * * We try to overlap with the above conventions when practical. */ typedef enum crm_exit_e { // Common convention CRM_EX_OK = 0, //!< Success CRM_EX_ERROR = 1, //!< Unspecified error // LSB + OCF CRM_EX_INVALID_PARAM = 2, //!< Parameter invalid (in local context) CRM_EX_UNIMPLEMENT_FEATURE = 3, //!< Requested action not implemented CRM_EX_INSUFFICIENT_PRIV = 4, //!< Insufficient privileges CRM_EX_NOT_INSTALLED = 5, //!< Dependencies not available locally CRM_EX_NOT_CONFIGURED = 6, //!< Parameter invalid (inherently) CRM_EX_NOT_RUNNING = 7, //!< Service safely stopped CRM_EX_PROMOTED = 8, //!< Service active and promoted CRM_EX_FAILED_PROMOTED = 9, //!< Service failed and possibly promoted // sysexits.h CRM_EX_USAGE = 64, //!< Command line usage error CRM_EX_DATAERR = 65, //!< User-supplied data incorrect CRM_EX_NOINPUT = 66, //!< Input file not available CRM_EX_NOUSER = 67, //!< User does not exist CRM_EX_NOHOST = 68, //!< Host unknown CRM_EX_UNAVAILABLE = 69, //!< Needed service unavailable CRM_EX_SOFTWARE = 70, //!< Internal software bug CRM_EX_OSERR = 71, //!< External (OS/environmental) problem CRM_EX_OSFILE = 72, //!< System file not usable CRM_EX_CANTCREAT = 73, //!< File couldn't be created CRM_EX_IOERR = 74, //!< File I/O error CRM_EX_TEMPFAIL = 75, //!< Try again CRM_EX_PROTOCOL = 76, //!< Protocol violated CRM_EX_NOPERM = 77, //!< Non-file permission issue CRM_EX_CONFIG = 78, //!< Misconfiguration // Custom CRM_EX_FATAL = 100, //!< Do not respawn CRM_EX_PANIC = 101, //!< Panic the local host CRM_EX_DISCONNECT = 102, //!< Lost connection to something CRM_EX_OLD = 103, //!< Update older than existing config CRM_EX_DIGEST = 104, //!< Digest comparison failed CRM_EX_NOSUCH = 105, //!< Requested item does not exist CRM_EX_QUORUM = 106, //!< Local partition does not have quorum CRM_EX_UNSAFE = 107, //!< Requires --force or new conditions CRM_EX_EXISTS = 108, //!< Requested item already exists CRM_EX_MULTIPLE = 109, //!< Requested item has multiple matches CRM_EX_EXPIRED = 110, //!< Requested item has expired CRM_EX_NOT_YET_IN_EFFECT = 111, //!< Requested item is not in effect CRM_EX_INDETERMINATE = 112, //!< Could not determine status CRM_EX_UNSATISFIED = 113, //!< Requested item does not satisfy constraints // Other CRM_EX_TIMEOUT = 124, //!< Convention from timeout(1) /* Anything above 128 overlaps with some shells' use of these values for * "interrupted by signal N", and so may be unreliable when detected by * shell scripts. */ // OCF Resource Agent API 1.1 CRM_EX_DEGRADED = 190, //!< Service active but more likely to fail soon CRM_EX_DEGRADED_PROMOTED = 191, //!< Service promoted but more likely to fail soon /* Custom * * This can be used to initialize exit status variables or to indicate that * a command is pending (which is what the controller uses it for). */ CRM_EX_NONE = 193, //!< No exit status available CRM_EX_MAX = 255, //!< Ensure crm_exit_t can hold this } crm_exit_t; /*! * \enum pcmk_exec_status * \brief Execution status * * These codes are used to specify the result of the attempt to execute an * agent, rather than the agent's result itself. */ enum pcmk_exec_status { PCMK_EXEC_UNKNOWN = -2, //!< Used only to initialize variables PCMK_EXEC_PENDING = -1, //!< Action is in progress PCMK_EXEC_DONE, //!< Action completed, result is known PCMK_EXEC_CANCELLED, //!< Action was cancelled PCMK_EXEC_TIMEOUT, //!< Action did not complete in time PCMK_EXEC_NOT_SUPPORTED, //!< Agent does not implement requested action PCMK_EXEC_ERROR, //!< Execution failed, may be retried PCMK_EXEC_ERROR_HARD, //!< Execution failed, do not retry on node PCMK_EXEC_ERROR_FATAL, //!< Execution failed, do not retry anywhere PCMK_EXEC_NOT_INSTALLED, //!< Agent or dependency not available locally PCMK_EXEC_NOT_CONNECTED, //!< No connection to executor PCMK_EXEC_INVALID, //!< Action cannot be attempted (e.g. shutdown) PCMK_EXEC_NO_FENCE_DEVICE, //!< No fence device is configured for target PCMK_EXEC_NO_SECRETS, //!< Necessary CIB secrets are unavailable // Add new values above here then update this one below PCMK_EXEC_MAX = PCMK_EXEC_NO_SECRETS, //!< Maximum value for this enum }; /*! * \enum pcmk_result_type * \brief Types of Pacemaker result codes * * A particular integer can have different meanings within different Pacemaker * result code families. It may be interpretable within zero, one, or multiple * families. * * These values are useful for specifying how an integer result code should be * interpreted in situations involving a generic integer value. For example, a * function that can process multiple types of result codes might accept an * arbitrary integer argument along with a \p pcmk_result_type argument that * specifies how to interpret the integer. */ enum pcmk_result_type { pcmk_result_legacy = 0, //!< Legacy API function return code pcmk_result_rc = 1, //!< Standard Pacemaker return code pcmk_result_exitcode = 2, //!< Exit status code pcmk_result_exec_status = 3, //!< Execution status }; int pcmk_result_get_strings(int code, enum pcmk_result_type type, const char **name, const char **desc); const char *pcmk_rc_name(int rc); // NOTE: sbd (as of at least 1.5.2) uses this const char *pcmk_rc_str(int rc); crm_exit_t pcmk_rc2exitc(int rc); enum ocf_exitcode pcmk_rc2ocf(int rc); int pcmk_rc2legacy(int rc); int pcmk_legacy2rc(int legacy_rc); // NOTE: sbd (as of at least 1.5.2) uses this const char *pcmk_strerror(int rc); const char *pcmk_errorname(int rc); const char *crm_exit_name(crm_exit_t exit_code); // NOTE: sbd (as of at least 1.5.2) uses this const char *crm_exit_str(crm_exit_t exit_code); _Noreturn crm_exit_t crm_exit(crm_exit_t rc); /* coverity[+kill] */ void crm_abort(const char *file, const char *function, int line, const char *condition, gboolean do_core, gboolean do_fork); static inline const char * pcmk_exec_status_str(enum pcmk_exec_status status) { switch (status) { - case PCMK_EXEC_PENDING: return "pending"; - case PCMK_EXEC_DONE: return "complete"; + case PCMK_EXEC_PENDING: return "Pending"; + case PCMK_EXEC_DONE: return "Done"; case PCMK_EXEC_CANCELLED: return "Cancelled"; - case PCMK_EXEC_TIMEOUT: return "Timed Out"; - case PCMK_EXEC_NOT_SUPPORTED: return "NOT SUPPORTED"; + case PCMK_EXEC_TIMEOUT: return "Timed out"; + case PCMK_EXEC_NOT_SUPPORTED: return "Unsupported"; case PCMK_EXEC_ERROR: return "Error"; case PCMK_EXEC_ERROR_HARD: return "Hard error"; case PCMK_EXEC_ERROR_FATAL: return "Fatal error"; case PCMK_EXEC_NOT_INSTALLED: return "Not installed"; case PCMK_EXEC_NOT_CONNECTED: return "Internal communication failure"; case PCMK_EXEC_INVALID: return "Cannot execute now"; case PCMK_EXEC_NO_FENCE_DEVICE: return "No fence device"; case PCMK_EXEC_NO_SECRETS: return "CIB secrets unavailable"; - default: return "UNKNOWN!"; + default: return "Unrecognized status (bug?)"; } } #ifdef __cplusplus } #endif #if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1) #include #endif #endif